• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 
27 static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities * entry,int scope)28 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
29 {
30 	u32 midr = read_cpuid_id();
31 
32 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33 	return is_midr_in_range(midr, &entry->midr_range);
34 }
35 
36 static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities * entry,int scope)37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38 			    int scope)
39 {
40 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42 }
43 
44 static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities * entry,int scope)45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46 {
47 	u32 model;
48 
49 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50 
51 	model = read_cpuid_id();
52 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 		 MIDR_ARCHITECTURE_MASK;
54 
55 	return model == entry->midr_range.model;
56 }
57 
58 static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities * entry,int scope)59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60 			  int scope)
61 {
62 	u64 mask = CTR_CACHE_MINLINE_MASK;
63 
64 	/* Skip matching the min line sizes for cache type check */
65 	if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
66 		mask ^= arm64_ftr_reg_ctrel0.strict_mask;
67 
68 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
69 	return (read_cpuid_cachetype() & mask) !=
70 	       (arm64_ftr_reg_ctrel0.sys_val & mask);
71 }
72 
73 static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities * __unused)74 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
75 {
76 	/* Clear SCTLR_EL1.UCT */
77 	config_sctlr_el1(SCTLR_EL1_UCT, 0);
78 }
79 
80 #include <asm/mmu_context.h>
81 #include <asm/cacheflush.h>
82 
83 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
84 
85 #ifdef CONFIG_KVM
86 extern char __smccc_workaround_1_smc_start[];
87 extern char __smccc_workaround_1_smc_end[];
88 
__copy_hyp_vect_bpi(int slot,const char * hyp_vecs_start,const char * hyp_vecs_end)89 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
90 				const char *hyp_vecs_end)
91 {
92 	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
93 	int i;
94 
95 	for (i = 0; i < SZ_2K; i += 0x80)
96 		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
97 
98 	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
99 }
100 
install_bp_hardening_cb(bp_hardening_cb_t fn,const char * hyp_vecs_start,const char * hyp_vecs_end)101 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
102 				    const char *hyp_vecs_start,
103 				    const char *hyp_vecs_end)
104 {
105 	static int last_slot = -1;
106 	static DEFINE_SPINLOCK(bp_lock);
107 	int cpu, slot = -1;
108 
109 	spin_lock(&bp_lock);
110 	for_each_possible_cpu(cpu) {
111 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
112 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
113 			break;
114 		}
115 	}
116 
117 	if (slot == -1) {
118 		last_slot++;
119 		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
120 			/ SZ_2K) <= last_slot);
121 		slot = last_slot;
122 		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
123 	}
124 
125 	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
126 	__this_cpu_write(bp_hardening_data.fn, fn);
127 	spin_unlock(&bp_lock);
128 }
129 #else
130 #define __smccc_workaround_1_smc_start		NULL
131 #define __smccc_workaround_1_smc_end		NULL
132 
install_bp_hardening_cb(bp_hardening_cb_t fn,const char * hyp_vecs_start,const char * hyp_vecs_end)133 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
134 				      const char *hyp_vecs_start,
135 				      const char *hyp_vecs_end)
136 {
137 	__this_cpu_write(bp_hardening_data.fn, fn);
138 }
139 #endif	/* CONFIG_KVM */
140 
141 #include <uapi/linux/psci.h>
142 #include <linux/arm-smccc.h>
143 #include <linux/psci.h>
144 
call_smc_arch_workaround_1(void)145 static void call_smc_arch_workaround_1(void)
146 {
147 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
148 }
149 
call_hvc_arch_workaround_1(void)150 static void call_hvc_arch_workaround_1(void)
151 {
152 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
153 }
154 
qcom_link_stack_sanitization(void)155 static void qcom_link_stack_sanitization(void)
156 {
157 	u64 tmp;
158 
159 	asm volatile("mov	%0, x30		\n"
160 		     ".rept	16		\n"
161 		     "bl	. + 4		\n"
162 		     ".endr			\n"
163 		     "mov	x30, %0		\n"
164 		     : "=&r" (tmp));
165 }
166 
167 static bool __nospectre_v2;
parse_nospectre_v2(char * str)168 static int __init parse_nospectre_v2(char *str)
169 {
170 	__nospectre_v2 = true;
171 	return 0;
172 }
173 early_param("nospectre_v2", parse_nospectre_v2);
174 
175 /*
176  * -1: No workaround
177  *  0: No workaround required
178  *  1: Workaround installed
179  */
detect_harden_bp_fw(void)180 static int detect_harden_bp_fw(void)
181 {
182 	bp_hardening_cb_t cb;
183 	void *smccc_start, *smccc_end;
184 	struct arm_smccc_res res;
185 	u32 midr = read_cpuid_id();
186 
187 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
188 		return -1;
189 
190 	switch (psci_ops.conduit) {
191 	case PSCI_CONDUIT_HVC:
192 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
193 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
194 		switch ((int)res.a0) {
195 		case 1:
196 			/* Firmware says we're just fine */
197 			return 0;
198 		case 0:
199 			cb = call_hvc_arch_workaround_1;
200 			/* This is a guest, no need to patch KVM vectors */
201 			smccc_start = NULL;
202 			smccc_end = NULL;
203 			break;
204 		default:
205 			return -1;
206 		}
207 		break;
208 
209 	case PSCI_CONDUIT_SMC:
210 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
211 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
212 		switch ((int)res.a0) {
213 		case 1:
214 			/* Firmware says we're just fine */
215 			return 0;
216 		case 0:
217 			cb = call_smc_arch_workaround_1;
218 			smccc_start = __smccc_workaround_1_smc_start;
219 			smccc_end = __smccc_workaround_1_smc_end;
220 			break;
221 		default:
222 			return -1;
223 		}
224 		break;
225 
226 	default:
227 		return -1;
228 	}
229 
230 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
231 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
232 		cb = qcom_link_stack_sanitization;
233 
234 	if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
235 		install_bp_hardening_cb(cb, smccc_start, smccc_end);
236 
237 	return 1;
238 }
239 
240 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
241 
242 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
243 static bool __ssb_safe = true;
244 
245 static const struct ssbd_options {
246 	const char	*str;
247 	int		state;
248 } ssbd_options[] = {
249 	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
250 	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
251 	{ "kernel",	ARM64_SSBD_KERNEL, },
252 };
253 
ssbd_cfg(char * buf)254 static int __init ssbd_cfg(char *buf)
255 {
256 	int i;
257 
258 	if (!buf || !buf[0])
259 		return -EINVAL;
260 
261 	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
262 		int len = strlen(ssbd_options[i].str);
263 
264 		if (strncmp(buf, ssbd_options[i].str, len))
265 			continue;
266 
267 		ssbd_state = ssbd_options[i].state;
268 		return 0;
269 	}
270 
271 	return -EINVAL;
272 }
273 early_param("ssbd", ssbd_cfg);
274 
arm64_update_smccc_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)275 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
276 				       __le32 *origptr, __le32 *updptr,
277 				       int nr_inst)
278 {
279 	u32 insn;
280 
281 	BUG_ON(nr_inst != 1);
282 
283 	switch (psci_ops.conduit) {
284 	case PSCI_CONDUIT_HVC:
285 		insn = aarch64_insn_get_hvc_value();
286 		break;
287 	case PSCI_CONDUIT_SMC:
288 		insn = aarch64_insn_get_smc_value();
289 		break;
290 	default:
291 		return;
292 	}
293 
294 	*updptr = cpu_to_le32(insn);
295 }
296 
arm64_enable_wa2_handling(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)297 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
298 				      __le32 *origptr, __le32 *updptr,
299 				      int nr_inst)
300 {
301 	BUG_ON(nr_inst != 1);
302 	/*
303 	 * Only allow mitigation on EL1 entry/exit and guest
304 	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
305 	 * be flipped.
306 	 */
307 	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
308 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
309 }
310 
arm64_set_ssbd_mitigation(bool state)311 void arm64_set_ssbd_mitigation(bool state)
312 {
313 	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
314 		pr_info_once("SSBD disabled by kernel configuration\n");
315 		return;
316 	}
317 
318 	if (this_cpu_has_cap(ARM64_SSBS)) {
319 		if (state)
320 			asm volatile(SET_PSTATE_SSBS(0));
321 		else
322 			asm volatile(SET_PSTATE_SSBS(1));
323 		return;
324 	}
325 
326 	switch (psci_ops.conduit) {
327 	case PSCI_CONDUIT_HVC:
328 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
329 		break;
330 
331 	case PSCI_CONDUIT_SMC:
332 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
333 		break;
334 
335 	default:
336 		WARN_ON_ONCE(1);
337 		break;
338 	}
339 }
340 
has_ssbd_mitigation(const struct arm64_cpu_capabilities * entry,int scope)341 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
342 				    int scope)
343 {
344 	struct arm_smccc_res res;
345 	bool required = true;
346 	s32 val;
347 	bool this_cpu_safe = false;
348 
349 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
350 
351 	if (cpu_mitigations_off())
352 		ssbd_state = ARM64_SSBD_FORCE_DISABLE;
353 
354 	/* delay setting __ssb_safe until we get a firmware response */
355 	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
356 		this_cpu_safe = true;
357 
358 	if (this_cpu_has_cap(ARM64_SSBS)) {
359 		if (!this_cpu_safe)
360 			__ssb_safe = false;
361 		required = false;
362 		goto out_printmsg;
363 	}
364 
365 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
366 		ssbd_state = ARM64_SSBD_UNKNOWN;
367 		if (!this_cpu_safe)
368 			__ssb_safe = false;
369 		return false;
370 	}
371 
372 	switch (psci_ops.conduit) {
373 	case PSCI_CONDUIT_HVC:
374 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
375 				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
376 		break;
377 
378 	case PSCI_CONDUIT_SMC:
379 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
380 				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
381 		break;
382 
383 	default:
384 		ssbd_state = ARM64_SSBD_UNKNOWN;
385 		if (!this_cpu_safe)
386 			__ssb_safe = false;
387 		return false;
388 	}
389 
390 	val = (s32)res.a0;
391 
392 	switch (val) {
393 	case SMCCC_RET_NOT_SUPPORTED:
394 		ssbd_state = ARM64_SSBD_UNKNOWN;
395 		if (!this_cpu_safe)
396 			__ssb_safe = false;
397 		return false;
398 
399 	/* machines with mixed mitigation requirements must not return this */
400 	case SMCCC_RET_NOT_REQUIRED:
401 		pr_info_once("%s mitigation not required\n", entry->desc);
402 		ssbd_state = ARM64_SSBD_MITIGATED;
403 		return false;
404 
405 	case SMCCC_RET_SUCCESS:
406 		__ssb_safe = false;
407 		required = true;
408 		break;
409 
410 	case 1:	/* Mitigation not required on this CPU */
411 		required = false;
412 		break;
413 
414 	default:
415 		WARN_ON(1);
416 		if (!this_cpu_safe)
417 			__ssb_safe = false;
418 		return false;
419 	}
420 
421 	switch (ssbd_state) {
422 	case ARM64_SSBD_FORCE_DISABLE:
423 		arm64_set_ssbd_mitigation(false);
424 		required = false;
425 		break;
426 
427 	case ARM64_SSBD_KERNEL:
428 		if (required) {
429 			__this_cpu_write(arm64_ssbd_callback_required, 1);
430 			arm64_set_ssbd_mitigation(true);
431 		}
432 		break;
433 
434 	case ARM64_SSBD_FORCE_ENABLE:
435 		arm64_set_ssbd_mitigation(true);
436 		required = true;
437 		break;
438 
439 	default:
440 		WARN_ON(1);
441 		break;
442 	}
443 
444 out_printmsg:
445 	switch (ssbd_state) {
446 	case ARM64_SSBD_FORCE_DISABLE:
447 		pr_info_once("%s disabled from command-line\n", entry->desc);
448 		break;
449 
450 	case ARM64_SSBD_FORCE_ENABLE:
451 		pr_info_once("%s forced from command-line\n", entry->desc);
452 		break;
453 	}
454 
455 	return required;
456 }
457 
458 /* known invulnerable cores */
459 static const struct midr_range arm64_ssb_cpus[] = {
460 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
461 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
462 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
463 	{},
464 };
465 
466 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
467 	.matches = is_affected_midr_range,			\
468 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
469 
470 #define CAP_MIDR_ALL_VERSIONS(model)					\
471 	.matches = is_affected_midr_range,				\
472 	.midr_range = MIDR_ALL_VERSIONS(model)
473 
474 #define MIDR_FIXED(rev, revidr_mask) \
475 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
476 
477 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
478 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
479 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
480 
481 #define CAP_MIDR_RANGE_LIST(list)				\
482 	.matches = is_affected_midr_range_list,			\
483 	.midr_range_list = list
484 
485 /* Errata affecting a range of revisions of  given model variant */
486 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
487 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
488 
489 /* Errata affecting a single variant/revision of a model */
490 #define ERRATA_MIDR_REV(model, var, rev)	\
491 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
492 
493 /* Errata affecting all variants/revisions of a given a model */
494 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
495 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
496 	CAP_MIDR_ALL_VERSIONS(model)
497 
498 /* Errata affecting a list of midr ranges, with same work around */
499 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
500 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
501 	CAP_MIDR_RANGE_LIST(midr_list)
502 
503 /* Track overall mitigation state. We are only mitigated if all cores are ok */
504 static bool __hardenbp_enab = true;
505 static bool __spectrev2_safe = true;
506 
507 /*
508  * List of CPUs that do not need any Spectre-v2 mitigation at all.
509  */
510 static const struct midr_range spectre_v2_safe_list[] = {
511 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
512 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
513 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
514 	{ /* sentinel */ }
515 };
516 
517 /*
518  * Track overall bp hardening for all heterogeneous cores in the machine.
519  * We are only considered "safe" if all booted cores are known safe.
520  */
521 static bool __maybe_unused
check_branch_predictor(const struct arm64_cpu_capabilities * entry,int scope)522 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
523 {
524 	int need_wa;
525 
526 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
527 
528 	/* If the CPU has CSV2 set, we're safe */
529 	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
530 						 ID_AA64PFR0_CSV2_SHIFT))
531 		return false;
532 
533 	/* Alternatively, we have a list of unaffected CPUs */
534 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
535 		return false;
536 
537 	/* Fallback to firmware detection */
538 	need_wa = detect_harden_bp_fw();
539 	if (!need_wa)
540 		return false;
541 
542 	__spectrev2_safe = false;
543 
544 	if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
545 		pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
546 		__hardenbp_enab = false;
547 		return false;
548 	}
549 
550 	/* forced off */
551 	if (__nospectre_v2 || cpu_mitigations_off()) {
552 		pr_info_once("spectrev2 mitigation disabled by command line option\n");
553 		__hardenbp_enab = false;
554 		return false;
555 	}
556 
557 	if (need_wa < 0) {
558 		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
559 		__hardenbp_enab = false;
560 	}
561 
562 	return (need_wa > 0);
563 }
564 
565 const struct arm64_cpu_capabilities arm64_errata[] = {
566 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
567 	defined(CONFIG_ARM64_ERRATUM_827319) || \
568 	defined(CONFIG_ARM64_ERRATUM_824069)
569 	{
570 	/* Cortex-A53 r0p[012] */
571 		.desc = "ARM errata 826319, 827319, 824069",
572 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
573 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
574 		.cpu_enable = cpu_enable_cache_maint_trap,
575 	},
576 #endif
577 #ifdef CONFIG_ARM64_ERRATUM_819472
578 	{
579 	/* Cortex-A53 r0p[01] */
580 		.desc = "ARM errata 819472",
581 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
582 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
583 		.cpu_enable = cpu_enable_cache_maint_trap,
584 	},
585 #endif
586 #ifdef CONFIG_ARM64_ERRATUM_832075
587 	{
588 	/* Cortex-A57 r0p0 - r1p2 */
589 		.desc = "ARM erratum 832075",
590 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
591 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
592 				  0, 0,
593 				  1, 2),
594 	},
595 #endif
596 #ifdef CONFIG_ARM64_ERRATUM_834220
597 	{
598 	/* Cortex-A57 r0p0 - r1p2 */
599 		.desc = "ARM erratum 834220",
600 		.capability = ARM64_WORKAROUND_834220,
601 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
602 				  0, 0,
603 				  1, 2),
604 	},
605 #endif
606 #ifdef CONFIG_ARM64_ERRATUM_845719
607 	{
608 	/* Cortex-A53 r0p[01234] */
609 		.desc = "ARM erratum 845719",
610 		.capability = ARM64_WORKAROUND_845719,
611 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
612 	},
613 #endif
614 #ifdef CONFIG_CAVIUM_ERRATUM_23154
615 	{
616 	/* Cavium ThunderX, pass 1.x */
617 		.desc = "Cavium erratum 23154",
618 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
619 		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
620 	},
621 #endif
622 #ifdef CONFIG_CAVIUM_ERRATUM_27456
623 	{
624 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
625 		.desc = "Cavium erratum 27456",
626 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
627 		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
628 				  0, 0,
629 				  1, 1),
630 	},
631 	{
632 	/* Cavium ThunderX, T81 pass 1.0 */
633 		.desc = "Cavium erratum 27456",
634 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
635 		ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
636 	},
637 #endif
638 #ifdef CONFIG_CAVIUM_ERRATUM_30115
639 	{
640 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
641 		.desc = "Cavium erratum 30115",
642 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
643 		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
644 				      0, 0,
645 				      1, 2),
646 	},
647 	{
648 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
649 		.desc = "Cavium erratum 30115",
650 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
651 		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
652 	},
653 	{
654 	/* Cavium ThunderX, T83 pass 1.0 */
655 		.desc = "Cavium erratum 30115",
656 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
657 		ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
658 	},
659 #endif
660 	{
661 		.desc = "Mismatched cache line size",
662 		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
663 		.matches = has_mismatched_cache_type,
664 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
665 		.cpu_enable = cpu_enable_trap_ctr_access,
666 	},
667 	{
668 		.desc = "Mismatched cache type",
669 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
670 		.matches = has_mismatched_cache_type,
671 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
672 		.cpu_enable = cpu_enable_trap_ctr_access,
673 	},
674 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
675 	{
676 		.desc = "Qualcomm Technologies Falkor erratum 1003",
677 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
678 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
679 	},
680 	{
681 		.desc = "Qualcomm Technologies Kryo erratum 1003",
682 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
683 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
684 		.midr_range.model = MIDR_QCOM_KRYO,
685 		.matches = is_kryo_midr,
686 	},
687 #endif
688 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
689 	{
690 		.desc = "Qualcomm Technologies Falkor erratum 1009",
691 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
692 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
693 	},
694 #endif
695 #ifdef CONFIG_ARM64_ERRATUM_858921
696 	{
697 	/* Cortex-A73 all versions */
698 		.desc = "ARM erratum 858921",
699 		.capability = ARM64_WORKAROUND_858921,
700 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
701 	},
702 #endif
703 	{
704 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
705 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
706 		.matches = check_branch_predictor,
707 	},
708 	{
709 		.desc = "Speculative Store Bypass Disable",
710 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
711 		.capability = ARM64_SSBD,
712 		.matches = has_ssbd_mitigation,
713 		.midr_range_list = arm64_ssb_cpus,
714 	},
715 	{
716 	}
717 };
718 
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)719 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
720 			    char *buf)
721 {
722 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
723 }
724 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)725 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
726 		char *buf)
727 {
728 	if (__spectrev2_safe)
729 		return sprintf(buf, "Not affected\n");
730 
731 	if (__hardenbp_enab)
732 		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
733 
734 	return sprintf(buf, "Vulnerable\n");
735 }
736 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)737 ssize_t cpu_show_spec_store_bypass(struct device *dev,
738 		struct device_attribute *attr, char *buf)
739 {
740 	if (__ssb_safe)
741 		return sprintf(buf, "Not affected\n");
742 
743 	switch (ssbd_state) {
744 	case ARM64_SSBD_KERNEL:
745 	case ARM64_SSBD_FORCE_ENABLE:
746 		if (IS_ENABLED(CONFIG_ARM64_SSBD))
747 			return sprintf(buf,
748 			    "Mitigation: Speculative Store Bypass disabled via prctl\n");
749 	}
750 
751 	return sprintf(buf, "Vulnerable\n");
752 }
753