• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/psci.h>
10 #include <linux/types.h>
11 #include <linux/cpu.h>
12 #include <asm/cpu.h>
13 #include <asm/cputype.h>
14 #include <asm/cpufeature.h>
15 #include <asm/smp_plat.h>
16 #include <asm/vectors.h>
17 
18 static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities * entry,int scope)19 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
20 {
21 	const struct arm64_midr_revidr *fix;
22 	u32 midr = read_cpuid_id(), revidr;
23 
24 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
25 	if (!is_midr_in_range(midr, &entry->midr_range))
26 		return false;
27 
28 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
29 	revidr = read_cpuid(REVIDR_EL1);
30 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
31 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
32 			return false;
33 
34 	return true;
35 }
36 
37 static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities * entry,int scope)38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39 			    int scope)
40 {
41 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 }
44 
45 static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities * entry,int scope)46 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
47 {
48 	u32 model;
49 
50 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 
52 	model = read_cpuid_id();
53 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54 		 MIDR_ARCHITECTURE_MASK;
55 
56 	return model == entry->midr_range.model;
57 }
58 
59 static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities * entry,int scope)60 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
61 			  int scope)
62 {
63 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
64 	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
65 	u64 ctr_raw, ctr_real;
66 
67 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
68 
69 	/*
70 	 * We want to make sure that all the CPUs in the system expose
71 	 * a consistent CTR_EL0 to make sure that applications behaves
72 	 * correctly with migration.
73 	 *
74 	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
75 	 *
76 	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
77 	 *    reports IDC = 0, consistent with the rest.
78 	 *
79 	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
80 	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
81 	 *
82 	 * So, we need to make sure either the raw CTR_EL0 or the effective
83 	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
84 	 */
85 	ctr_raw = read_cpuid_cachetype() & mask;
86 	ctr_real = read_cpuid_effective_cachetype() & mask;
87 
88 	return (ctr_real != sys) && (ctr_raw != sys);
89 }
90 
91 static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities * cap)92 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
93 {
94 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
95 	bool enable_uct_trap = false;
96 
97 	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
98 	if ((read_cpuid_cachetype() & mask) !=
99 	    (arm64_ftr_reg_ctrel0.sys_val & mask))
100 		enable_uct_trap = true;
101 
102 	/* ... or if the system is affected by an erratum */
103 	if (cap->capability == ARM64_WORKAROUND_1542419)
104 		enable_uct_trap = true;
105 
106 	if (enable_uct_trap)
107 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
108 }
109 
110 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
111 
112 #include <asm/mmu_context.h>
113 #include <asm/cacheflush.h>
114 
115 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
116 
117 #ifdef CONFIG_KVM_INDIRECT_VECTORS
118 extern char __smccc_workaround_1_smc_start[];
119 extern char __smccc_workaround_1_smc_end[];
120 extern char __smccc_workaround_3_smc_start[];
121 extern char __smccc_workaround_3_smc_end[];
122 extern char __spectre_bhb_loop_k8_start[];
123 extern char __spectre_bhb_loop_k8_end[];
124 extern char __spectre_bhb_loop_k24_start[];
125 extern char __spectre_bhb_loop_k24_end[];
126 extern char __spectre_bhb_loop_k32_start[];
127 extern char __spectre_bhb_loop_k32_end[];
128 extern char __spectre_bhb_clearbhb_start[];
129 extern char __spectre_bhb_clearbhb_end[];
130 
__copy_hyp_vect_bpi(int slot,const char * hyp_vecs_start,const char * hyp_vecs_end)131 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
132 				const char *hyp_vecs_end)
133 {
134 	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
135 	int i;
136 
137 	for (i = 0; i < SZ_2K; i += 0x80)
138 		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
139 
140 	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
141 }
142 
143 static DEFINE_RAW_SPINLOCK(bp_lock);
install_bp_hardening_cb(bp_hardening_cb_t fn,const char * hyp_vecs_start,const char * hyp_vecs_end)144 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
145 				    const char *hyp_vecs_start,
146 				    const char *hyp_vecs_end)
147 {
148 	int cpu, slot = -1;
149 
150 	/*
151 	 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
152 	 * we're a guest. Skip the hyp-vectors work.
153 	 */
154 	if (!hyp_vecs_start) {
155 		__this_cpu_write(bp_hardening_data.fn, fn);
156 		return;
157 	}
158 
159 	raw_spin_lock(&bp_lock);
160 	for_each_possible_cpu(cpu) {
161 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
162 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
163 			break;
164 		}
165 	}
166 
167 	if (slot == -1) {
168 		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
169 		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
170 		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
171 	}
172 
173 	if (fn != __this_cpu_read(bp_hardening_data.fn)) {
174 		__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
175 		__this_cpu_write(bp_hardening_data.fn, fn);
176 		__this_cpu_write(bp_hardening_data.template_start,
177 				 hyp_vecs_start);
178 	}
179 	raw_spin_unlock(&bp_lock);
180 }
181 #else
182 #define __smccc_workaround_1_smc_start		NULL
183 #define __smccc_workaround_1_smc_end		NULL
184 
install_bp_hardening_cb(bp_hardening_cb_t fn,const char * hyp_vecs_start,const char * hyp_vecs_end)185 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
186 				      const char *hyp_vecs_start,
187 				      const char *hyp_vecs_end)
188 {
189 	__this_cpu_write(bp_hardening_data.fn, fn);
190 }
191 #endif	/* CONFIG_KVM_INDIRECT_VECTORS */
192 
193 #include <uapi/linux/psci.h>
194 #include <linux/arm-smccc.h>
195 #include <linux/psci.h>
196 
call_smc_arch_workaround_1(void)197 static void call_smc_arch_workaround_1(void)
198 {
199 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
200 }
201 
call_hvc_arch_workaround_1(void)202 static void call_hvc_arch_workaround_1(void)
203 {
204 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
205 }
206 
qcom_link_stack_sanitization(void)207 static void qcom_link_stack_sanitization(void)
208 {
209 	u64 tmp;
210 
211 	asm volatile("mov	%0, x30		\n"
212 		     ".rept	16		\n"
213 		     "bl	. + 4		\n"
214 		     ".endr			\n"
215 		     "mov	x30, %0		\n"
216 		     : "=&r" (tmp));
217 }
218 
219 static bool __nospectre_v2;
parse_nospectre_v2(char * str)220 static int __init parse_nospectre_v2(char *str)
221 {
222 	__nospectre_v2 = true;
223 	return 0;
224 }
225 early_param("nospectre_v2", parse_nospectre_v2);
226 
227 /*
228  * -1: No workaround
229  *  0: No workaround required
230  *  1: Workaround installed
231  */
detect_harden_bp_fw(void)232 static int detect_harden_bp_fw(void)
233 {
234 	bp_hardening_cb_t cb;
235 	void *smccc_start, *smccc_end;
236 	struct arm_smccc_res res;
237 	u32 midr = read_cpuid_id();
238 
239 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
240 		return -1;
241 
242 	switch (psci_ops.conduit) {
243 	case PSCI_CONDUIT_HVC:
244 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
245 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
246 		switch ((int)res.a0) {
247 		case 1:
248 			/* Firmware says we're just fine */
249 			return 0;
250 		case 0:
251 			cb = call_hvc_arch_workaround_1;
252 			/* This is a guest, no need to patch KVM vectors */
253 			smccc_start = NULL;
254 			smccc_end = NULL;
255 			break;
256 		default:
257 			return -1;
258 		}
259 		break;
260 
261 	case PSCI_CONDUIT_SMC:
262 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
263 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
264 		switch ((int)res.a0) {
265 		case 1:
266 			/* Firmware says we're just fine */
267 			return 0;
268 		case 0:
269 			cb = call_smc_arch_workaround_1;
270 			smccc_start = __smccc_workaround_1_smc_start;
271 			smccc_end = __smccc_workaround_1_smc_end;
272 			break;
273 		default:
274 			return -1;
275 		}
276 		break;
277 
278 	default:
279 		return -1;
280 	}
281 
282 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
283 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
284 		cb = qcom_link_stack_sanitization;
285 
286 	if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
287 		install_bp_hardening_cb(cb, smccc_start, smccc_end);
288 
289 	return 1;
290 }
291 
292 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
293 
294 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
295 static bool __ssb_safe = true;
296 
297 static const struct ssbd_options {
298 	const char	*str;
299 	int		state;
300 } ssbd_options[] = {
301 	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
302 	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
303 	{ "kernel",	ARM64_SSBD_KERNEL, },
304 };
305 
ssbd_cfg(char * buf)306 static int __init ssbd_cfg(char *buf)
307 {
308 	int i;
309 
310 	if (!buf || !buf[0])
311 		return -EINVAL;
312 
313 	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
314 		int len = strlen(ssbd_options[i].str);
315 
316 		if (strncmp(buf, ssbd_options[i].str, len))
317 			continue;
318 
319 		ssbd_state = ssbd_options[i].state;
320 		return 0;
321 	}
322 
323 	return -EINVAL;
324 }
325 early_param("ssbd", ssbd_cfg);
326 
arm64_update_smccc_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)327 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
328 				       __le32 *origptr, __le32 *updptr,
329 				       int nr_inst)
330 {
331 	u32 insn;
332 
333 	BUG_ON(nr_inst != 1);
334 
335 	switch (psci_ops.conduit) {
336 	case PSCI_CONDUIT_HVC:
337 		insn = aarch64_insn_get_hvc_value();
338 		break;
339 	case PSCI_CONDUIT_SMC:
340 		insn = aarch64_insn_get_smc_value();
341 		break;
342 	default:
343 		return;
344 	}
345 
346 	*updptr = cpu_to_le32(insn);
347 }
348 
arm64_enable_wa2_handling(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)349 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
350 				      __le32 *origptr, __le32 *updptr,
351 				      int nr_inst)
352 {
353 	BUG_ON(nr_inst != 1);
354 	/*
355 	 * Only allow mitigation on EL1 entry/exit and guest
356 	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
357 	 * be flipped.
358 	 */
359 	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
360 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
361 }
362 
arm64_set_ssbd_mitigation(bool state)363 void arm64_set_ssbd_mitigation(bool state)
364 {
365 	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
366 		pr_info_once("SSBD disabled by kernel configuration\n");
367 		return;
368 	}
369 
370 	if (this_cpu_has_cap(ARM64_SSBS)) {
371 		if (state)
372 			asm volatile(SET_PSTATE_SSBS(0));
373 		else
374 			asm volatile(SET_PSTATE_SSBS(1));
375 		return;
376 	}
377 
378 	switch (psci_ops.conduit) {
379 	case PSCI_CONDUIT_HVC:
380 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
381 		break;
382 
383 	case PSCI_CONDUIT_SMC:
384 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
385 		break;
386 
387 	default:
388 		WARN_ON_ONCE(1);
389 		break;
390 	}
391 }
392 
has_ssbd_mitigation(const struct arm64_cpu_capabilities * entry,int scope)393 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
394 				    int scope)
395 {
396 	struct arm_smccc_res res;
397 	bool required = true;
398 	s32 val;
399 	bool this_cpu_safe = false;
400 
401 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
402 
403 	if (cpu_mitigations_off())
404 		ssbd_state = ARM64_SSBD_FORCE_DISABLE;
405 
406 	/* delay setting __ssb_safe until we get a firmware response */
407 	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
408 		this_cpu_safe = true;
409 
410 	if (this_cpu_has_cap(ARM64_SSBS)) {
411 		if (!this_cpu_safe)
412 			__ssb_safe = false;
413 		required = false;
414 		goto out_printmsg;
415 	}
416 
417 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
418 		ssbd_state = ARM64_SSBD_UNKNOWN;
419 		if (!this_cpu_safe)
420 			__ssb_safe = false;
421 		return false;
422 	}
423 
424 	switch (psci_ops.conduit) {
425 	case PSCI_CONDUIT_HVC:
426 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
427 				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
428 		break;
429 
430 	case PSCI_CONDUIT_SMC:
431 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
432 				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
433 		break;
434 
435 	default:
436 		ssbd_state = ARM64_SSBD_UNKNOWN;
437 		if (!this_cpu_safe)
438 			__ssb_safe = false;
439 		return false;
440 	}
441 
442 	val = (s32)res.a0;
443 
444 	switch (val) {
445 	case SMCCC_RET_NOT_SUPPORTED:
446 		ssbd_state = ARM64_SSBD_UNKNOWN;
447 		if (!this_cpu_safe)
448 			__ssb_safe = false;
449 		return false;
450 
451 	/* machines with mixed mitigation requirements must not return this */
452 	case SMCCC_RET_NOT_REQUIRED:
453 		pr_info_once("%s mitigation not required\n", entry->desc);
454 		ssbd_state = ARM64_SSBD_MITIGATED;
455 		return false;
456 
457 	case SMCCC_RET_SUCCESS:
458 		__ssb_safe = false;
459 		required = true;
460 		break;
461 
462 	case 1:	/* Mitigation not required on this CPU */
463 		required = false;
464 		break;
465 
466 	default:
467 		WARN_ON(1);
468 		if (!this_cpu_safe)
469 			__ssb_safe = false;
470 		return false;
471 	}
472 
473 	switch (ssbd_state) {
474 	case ARM64_SSBD_FORCE_DISABLE:
475 		arm64_set_ssbd_mitigation(false);
476 		required = false;
477 		break;
478 
479 	case ARM64_SSBD_KERNEL:
480 		if (required) {
481 			__this_cpu_write(arm64_ssbd_callback_required, 1);
482 			arm64_set_ssbd_mitigation(true);
483 		}
484 		break;
485 
486 	case ARM64_SSBD_FORCE_ENABLE:
487 		arm64_set_ssbd_mitigation(true);
488 		required = true;
489 		break;
490 
491 	default:
492 		WARN_ON(1);
493 		break;
494 	}
495 
496 out_printmsg:
497 	switch (ssbd_state) {
498 	case ARM64_SSBD_FORCE_DISABLE:
499 		pr_info_once("%s disabled from command-line\n", entry->desc);
500 		break;
501 
502 	case ARM64_SSBD_FORCE_ENABLE:
503 		pr_info_once("%s forced from command-line\n", entry->desc);
504 		break;
505 	}
506 
507 	return required;
508 }
509 
cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities * cap)510 static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
511 {
512 	if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
513 		cap->matches(cap, SCOPE_LOCAL_CPU);
514 }
515 
516 /* known invulnerable cores */
517 static const struct midr_range arm64_ssb_cpus[] = {
518 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
519 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
520 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
521 	MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
522 	{},
523 };
524 
525 #ifdef CONFIG_ARM64_ERRATUM_1463225
526 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
527 
528 static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities * entry,int scope)529 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
530 			       int scope)
531 {
532 	u32 midr = read_cpuid_id();
533 	/* Cortex-A76 r0p0 - r3p1 */
534 	struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
535 
536 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
537 	return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
538 }
539 #endif
540 
541 static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities * __unused)542 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
543 {
544 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
545 }
546 
547 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
548 	.matches = is_affected_midr_range,			\
549 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
550 
551 #define CAP_MIDR_ALL_VERSIONS(model)					\
552 	.matches = is_affected_midr_range,				\
553 	.midr_range = MIDR_ALL_VERSIONS(model)
554 
555 #define MIDR_FIXED(rev, revidr_mask) \
556 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
557 
558 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
559 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
560 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
561 
562 #define CAP_MIDR_RANGE_LIST(list)				\
563 	.matches = is_affected_midr_range_list,			\
564 	.midr_range_list = list
565 
566 /* Errata affecting a range of revisions of  given model variant */
567 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
568 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
569 
570 /* Errata affecting a single variant/revision of a model */
571 #define ERRATA_MIDR_REV(model, var, rev)	\
572 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
573 
574 /* Errata affecting all variants/revisions of a given a model */
575 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
576 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
577 	CAP_MIDR_ALL_VERSIONS(model)
578 
579 /* Errata affecting a list of midr ranges, with same work around */
580 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
581 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
582 	CAP_MIDR_RANGE_LIST(midr_list)
583 
584 /* Track overall mitigation state. We are only mitigated if all cores are ok */
585 static bool __hardenbp_enab = true;
586 static bool __spectrev2_safe = true;
587 
get_spectre_v2_workaround_state(void)588 int get_spectre_v2_workaround_state(void)
589 {
590 	if (__spectrev2_safe)
591 		return ARM64_BP_HARDEN_NOT_REQUIRED;
592 
593 	if (!__hardenbp_enab)
594 		return ARM64_BP_HARDEN_UNKNOWN;
595 
596 	return ARM64_BP_HARDEN_WA_NEEDED;
597 }
598 
599 /*
600  * List of CPUs that do not need any Spectre-v2 mitigation at all.
601  */
602 static const struct midr_range spectre_v2_safe_list[] = {
603 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
604 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
605 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
606 	MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
607 	MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
608 	{ /* sentinel */ }
609 };
610 
611 /*
612  * Track overall bp hardening for all heterogeneous cores in the machine.
613  * We are only considered "safe" if all booted cores are known safe.
614  */
615 static bool __maybe_unused
check_branch_predictor(const struct arm64_cpu_capabilities * entry,int scope)616 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
617 {
618 	int need_wa;
619 
620 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
621 
622 	/* If the CPU has CSV2 set, we're safe */
623 	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
624 						 ID_AA64PFR0_CSV2_SHIFT))
625 		return false;
626 
627 	/* Alternatively, we have a list of unaffected CPUs */
628 	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
629 		return false;
630 
631 	/* Fallback to firmware detection */
632 	need_wa = detect_harden_bp_fw();
633 	if (!need_wa)
634 		return false;
635 
636 	__spectrev2_safe = false;
637 
638 	if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
639 		pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
640 		__hardenbp_enab = false;
641 		return false;
642 	}
643 
644 	/* forced off */
645 	if (__nospectre_v2 || cpu_mitigations_off()) {
646 		pr_info_once("spectrev2 mitigation disabled by command line option\n");
647 		__hardenbp_enab = false;
648 		return false;
649 	}
650 
651 	if (need_wa < 0) {
652 		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
653 		__hardenbp_enab = false;
654 	}
655 
656 	return (need_wa > 0);
657 }
658 
659 static void
cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities * cap)660 cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
661 {
662 	cap->matches(cap, SCOPE_LOCAL_CPU);
663 }
664 
665 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
666 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
667 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
668 	{},
669 };
670 
671 static bool __maybe_unused
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities * entry,int scope)672 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
673 			 int scope)
674 {
675 	int i;
676 
677 	if (!is_affected_midr_range_list(entry, scope) ||
678 	    !is_hyp_mode_available())
679 		return false;
680 
681 	for_each_possible_cpu(i) {
682 		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
683 			return true;
684 	}
685 
686 	return false;
687 }
688 
689 static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities * entry,int scope)690 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
691 				int scope)
692 {
693 	u32 midr = read_cpuid_id();
694 	bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
695 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
696 
697 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
698 	return is_midr_in_range(midr, &range) && has_dic;
699 }
700 
701 #ifdef CONFIG_HARDEN_EL2_VECTORS
702 
703 static const struct midr_range arm64_harden_el2_vectors[] = {
704 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
705 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
706 	{},
707 };
708 
709 #endif
710 
711 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
712 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
713 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
714 	{
715 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
716 	},
717 	{
718 		.midr_range.model = MIDR_QCOM_KRYO,
719 		.matches = is_kryo_midr,
720 	},
721 #endif
722 #ifdef CONFIG_ARM64_ERRATUM_1286807
723 	{
724 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
725 	},
726 #endif
727 	{},
728 };
729 #endif
730 
731 #ifdef CONFIG_CAVIUM_ERRATUM_27456
732 const struct midr_range cavium_erratum_27456_cpus[] = {
733 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
734 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
735 	/* Cavium ThunderX, T81 pass 1.0 */
736 	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
737 	{},
738 };
739 #endif
740 
741 #ifdef CONFIG_CAVIUM_ERRATUM_30115
742 static const struct midr_range cavium_erratum_30115_cpus[] = {
743 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
744 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
745 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
746 	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
747 	/* Cavium ThunderX, T83 pass 1.0 */
748 	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
749 	{},
750 };
751 #endif
752 
753 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
754 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
755 	{
756 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
757 	},
758 	{
759 		.midr_range.model = MIDR_QCOM_KRYO,
760 		.matches = is_kryo_midr,
761 	},
762 	{},
763 };
764 #endif
765 
766 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
767 static const struct midr_range workaround_clean_cache[] = {
768 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
769 	defined(CONFIG_ARM64_ERRATUM_827319) || \
770 	defined(CONFIG_ARM64_ERRATUM_824069)
771 	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
772 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
773 #endif
774 #ifdef	CONFIG_ARM64_ERRATUM_819472
775 	/* Cortex-A53 r0p[01] : ARM errata 819472 */
776 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
777 #endif
778 	{},
779 };
780 #endif
781 
782 #ifdef CONFIG_ARM64_ERRATUM_1418040
783 /*
784  * - 1188873 affects r0p0 to r2p0
785  * - 1418040 affects r0p0 to r3p1
786  */
787 static const struct midr_range erratum_1418040_list[] = {
788 	/* Cortex-A76 r0p0 to r3p1 */
789 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
790 	/* Neoverse-N1 r0p0 to r3p1 */
791 	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
792 	{},
793 };
794 #endif
795 
796 #ifdef CONFIG_ARM64_ERRATUM_845719
797 static const struct midr_range erratum_845719_list[] = {
798 	/* Cortex-A53 r0p[01234] */
799 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
800 	/* Brahma-B53 r0p[0] */
801 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
802 	{},
803 };
804 #endif
805 
806 #ifdef CONFIG_ARM64_ERRATUM_843419
807 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
808 	{
809 		/* Cortex-A53 r0p[01234] */
810 		.matches = is_affected_midr_range,
811 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
812 		MIDR_FIXED(0x4, BIT(8)),
813 	},
814 	{
815 		/* Brahma-B53 r0p[0] */
816 		.matches = is_affected_midr_range,
817 		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
818 	},
819 	{},
820 };
821 #endif
822 
823 #ifdef CONFIG_ARM64_ERRATUM_1742098
824 static struct midr_range broken_aarch32_aes[] = {
825 	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
826 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
827 	{},
828 };
829 #endif
830 
831 const struct arm64_cpu_capabilities arm64_errata[] = {
832 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
833 	{
834 		.desc = "ARM errata 826319, 827319, 824069, 819472",
835 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
836 		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
837 		.cpu_enable = cpu_enable_cache_maint_trap,
838 	},
839 #endif
840 #ifdef CONFIG_ARM64_ERRATUM_832075
841 	{
842 	/* Cortex-A57 r0p0 - r1p2 */
843 		.desc = "ARM erratum 832075",
844 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
845 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
846 				  0, 0,
847 				  1, 2),
848 	},
849 #endif
850 #ifdef CONFIG_ARM64_ERRATUM_834220
851 	{
852 	/* Cortex-A57 r0p0 - r1p2 */
853 		.desc = "ARM erratum 834220",
854 		.capability = ARM64_WORKAROUND_834220,
855 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
856 				  0, 0,
857 				  1, 2),
858 	},
859 #endif
860 #ifdef CONFIG_ARM64_ERRATUM_843419
861 	{
862 		.desc = "ARM erratum 843419",
863 		.capability = ARM64_WORKAROUND_843419,
864 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
865 		.matches = cpucap_multi_entry_cap_matches,
866 		.match_list = erratum_843419_list,
867 	},
868 #endif
869 #ifdef CONFIG_ARM64_ERRATUM_845719
870 	{
871 		.desc = "ARM erratum 845719",
872 		.capability = ARM64_WORKAROUND_845719,
873 		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
874 	},
875 #endif
876 #ifdef CONFIG_CAVIUM_ERRATUM_23154
877 	{
878 	/* Cavium ThunderX, pass 1.x */
879 		.desc = "Cavium erratum 23154",
880 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
881 		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
882 	},
883 #endif
884 #ifdef CONFIG_CAVIUM_ERRATUM_27456
885 	{
886 		.desc = "Cavium erratum 27456",
887 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
888 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
889 	},
890 #endif
891 #ifdef CONFIG_CAVIUM_ERRATUM_30115
892 	{
893 		.desc = "Cavium erratum 30115",
894 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
895 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
896 	},
897 #endif
898 	{
899 		.desc = "Mismatched cache type (CTR_EL0)",
900 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
901 		.matches = has_mismatched_cache_type,
902 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
903 		.cpu_enable = cpu_enable_trap_ctr_access,
904 	},
905 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
906 	{
907 		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
908 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
909 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
910 		.matches = cpucap_multi_entry_cap_matches,
911 		.match_list = qcom_erratum_1003_list,
912 	},
913 #endif
914 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
915 	{
916 		.desc = "Qualcomm erratum 1009, ARM erratum 1286807",
917 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
918 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
919 		.matches = cpucap_multi_entry_cap_matches,
920 		.match_list = arm64_repeat_tlbi_list,
921 	},
922 #endif
923 #ifdef CONFIG_ARM64_ERRATUM_858921
924 	{
925 	/* Cortex-A73 all versions */
926 		.desc = "ARM erratum 858921",
927 		.capability = ARM64_WORKAROUND_858921,
928 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
929 	},
930 #endif
931 	{
932 		.desc = "Branch predictor hardening",
933 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
934 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
935 		.matches = check_branch_predictor,
936 		.cpu_enable = cpu_enable_branch_predictor_hardening,
937 	},
938 #ifdef CONFIG_HARDEN_EL2_VECTORS
939 	{
940 		.desc = "EL2 vector hardening",
941 		.capability = ARM64_HARDEN_EL2_VECTORS,
942 		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
943 	},
944 #endif
945 	{
946 		.desc = "Speculative Store Bypass Disable",
947 		.capability = ARM64_SSBD,
948 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
949 		.matches = has_ssbd_mitigation,
950 		.cpu_enable = cpu_enable_ssbd_mitigation,
951 		.midr_range_list = arm64_ssb_cpus,
952 	},
953 	{
954 		.desc = "Spectre-BHB",
955 		.capability = ARM64_SPECTRE_BHB,
956 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
957 		.matches = is_spectre_bhb_affected,
958 		.cpu_enable = spectre_bhb_enable_mitigation,
959 	},
960 #ifdef CONFIG_ARM64_ERRATUM_1418040
961 	{
962 		.desc = "ARM erratum 1418040",
963 		.capability = ARM64_WORKAROUND_1418040,
964 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
965 		/*
966 		 * We need to allow affected CPUs to come in late, but
967 		 * also need the non-affected CPUs to be able to come
968 		 * in at any point in time. Wonderful.
969 		 */
970 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
971 	},
972 #endif
973 #ifdef CONFIG_ARM64_ERRATUM_1165522
974 	{
975 		/* Cortex-A76 r0p0 to r2p0 */
976 		.desc = "ARM erratum 1165522",
977 		.capability = ARM64_WORKAROUND_1165522,
978 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
979 	},
980 #endif
981 #ifdef CONFIG_ARM64_ERRATUM_1463225
982 	{
983 		.desc = "ARM erratum 1463225",
984 		.capability = ARM64_WORKAROUND_1463225,
985 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
986 		.matches = has_cortex_a76_erratum_1463225,
987 	},
988 #endif
989 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
990 	{
991 		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
992 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
993 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
994 		.matches = needs_tx2_tvm_workaround,
995 	},
996 	{
997 		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
998 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
999 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
1000 	},
1001 #endif
1002 #ifdef CONFIG_ARM64_ERRATUM_1542419
1003 	{
1004 		/* we depend on the firmware portion for correctness */
1005 		.desc = "ARM erratum 1542419 (kernel portion)",
1006 		.capability = ARM64_WORKAROUND_1542419,
1007 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1008 		.matches = has_neoverse_n1_erratum_1542419,
1009 		.cpu_enable = cpu_enable_trap_ctr_access,
1010 	},
1011 #endif
1012 #ifdef CONFIG_ARM64_ERRATUM_1742098
1013 	{
1014 		.desc = "ARM erratum 1742098",
1015 		.capability = ARM64_WORKAROUND_1742098,
1016 		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
1017 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1018 	},
1019 #endif
1020 	{
1021 	}
1022 };
1023 
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)1024 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
1025 			    char *buf)
1026 {
1027 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1028 }
1029 
get_bhb_affected_string(enum mitigation_state bhb_state)1030 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
1031 {
1032 	switch (bhb_state) {
1033 	case SPECTRE_UNAFFECTED:
1034 		return "";
1035 	default:
1036 	case SPECTRE_VULNERABLE:
1037 		return ", but not BHB";
1038 	case SPECTRE_MITIGATED:
1039 		return ", BHB";
1040 	}
1041 }
1042 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)1043 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
1044 		char *buf)
1045 {
1046 	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
1047 	const char *bhb_str = get_bhb_affected_string(bhb_state);
1048 	const char *v2_str = "Branch predictor hardening";
1049 
1050 	switch (get_spectre_v2_workaround_state()) {
1051 	case ARM64_BP_HARDEN_NOT_REQUIRED:
1052 		if (bhb_state == SPECTRE_UNAFFECTED)
1053 			return sprintf(buf, "Not affected\n");
1054 
1055 		/*
1056 		 * Platforms affected by Spectre-BHB can't report
1057 		 * "Not affected" for Spectre-v2.
1058 		 */
1059 		v2_str = "CSV2";
1060 		fallthrough;
1061 	case ARM64_BP_HARDEN_WA_NEEDED:
1062 		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
1063 	case ARM64_BP_HARDEN_UNKNOWN:
1064 		fallthrough;
1065 	default:
1066 		return sprintf(buf, "Vulnerable\n");
1067 	}
1068 }
1069 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)1070 ssize_t cpu_show_spec_store_bypass(struct device *dev,
1071 		struct device_attribute *attr, char *buf)
1072 {
1073 	if (__ssb_safe)
1074 		return sprintf(buf, "Not affected\n");
1075 
1076 	switch (ssbd_state) {
1077 	case ARM64_SSBD_KERNEL:
1078 	case ARM64_SSBD_FORCE_ENABLE:
1079 		if (IS_ENABLED(CONFIG_ARM64_SSBD))
1080 			return sprintf(buf,
1081 			    "Mitigation: Speculative Store Bypass disabled via prctl\n");
1082 	}
1083 
1084 	return sprintf(buf, "Vulnerable\n");
1085 }
1086 
1087 /*
1088  * We try to ensure that the mitigation state can never change as the result of
1089  * onlining a late CPU.
1090  */
update_mitigation_state(enum mitigation_state * oldp,enum mitigation_state new)1091 static void update_mitigation_state(enum mitigation_state *oldp,
1092 				    enum mitigation_state new)
1093 {
1094 	enum mitigation_state state;
1095 
1096 	do {
1097 		state = READ_ONCE(*oldp);
1098 		if (new <= state)
1099 			break;
1100 	} while (cmpxchg_relaxed(oldp, state, new) != state);
1101 }
1102 
1103 /*
1104  * Spectre BHB.
1105  *
1106  * A CPU is either:
1107  * - Mitigated by a branchy loop a CPU specific number of times, and listed
1108  *   in our "loop mitigated list".
1109  * - Mitigated in software by the firmware Spectre v2 call.
1110  * - Has the ClearBHB instruction to perform the mitigation.
1111  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
1112  *   software mitigation in the vectors is needed.
1113  * - Has CSV2.3, so is unaffected.
1114  */
1115 static enum mitigation_state spectre_bhb_state;
1116 
arm64_get_spectre_bhb_state(void)1117 enum mitigation_state arm64_get_spectre_bhb_state(void)
1118 {
1119 	return spectre_bhb_state;
1120 }
1121 
1122 /*
1123  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
1124  * SCOPE_SYSTEM call will give the right answer.
1125  */
spectre_bhb_loop_affected(int scope)1126 u8 spectre_bhb_loop_affected(int scope)
1127 {
1128 	u8 k = 0;
1129 	static u8 max_bhb_k;
1130 
1131 	if (scope == SCOPE_LOCAL_CPU) {
1132 		static const struct midr_range spectre_bhb_k32_list[] = {
1133 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
1134 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
1135 			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
1136 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
1137 			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
1138 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
1139 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
1140 			{},
1141 		};
1142 		static const struct midr_range spectre_bhb_k24_list[] = {
1143 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
1144 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
1145 			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
1146 			{},
1147 		};
1148 		static const struct midr_range spectre_bhb_k11_list[] = {
1149 			MIDR_ALL_VERSIONS(MIDR_AMPERE1),
1150 			{},
1151 		};
1152 		static const struct midr_range spectre_bhb_k8_list[] = {
1153 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1154 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1155 			{},
1156 		};
1157 
1158 		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
1159 			k = 32;
1160 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
1161 			k = 24;
1162 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
1163 			k = 11;
1164 		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
1165 			k =  8;
1166 
1167 		max_bhb_k = max(max_bhb_k, k);
1168 	} else {
1169 		k = max_bhb_k;
1170 	}
1171 
1172 	return k;
1173 }
1174 
spectre_bhb_get_cpu_fw_mitigation_state(void)1175 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
1176 {
1177 	int ret;
1178 	struct arm_smccc_res res;
1179 
1180 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1181 		return SPECTRE_VULNERABLE;
1182 
1183 	switch (psci_ops.conduit) {
1184 	case PSCI_CONDUIT_HVC:
1185 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1186 				  ARM_SMCCC_ARCH_WORKAROUND_3, &res);
1187 		break;
1188 
1189 	case PSCI_CONDUIT_SMC:
1190 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1191 				  ARM_SMCCC_ARCH_WORKAROUND_3, &res);
1192 		break;
1193 
1194 	default:
1195 		return SPECTRE_VULNERABLE;
1196 	}
1197 
1198 	ret = res.a0;
1199 	switch (ret) {
1200 	case SMCCC_RET_SUCCESS:
1201 		return SPECTRE_MITIGATED;
1202 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
1203 		return SPECTRE_UNAFFECTED;
1204 	default:
1205 		fallthrough;
1206 	case SMCCC_RET_NOT_SUPPORTED:
1207 		return SPECTRE_VULNERABLE;
1208 	}
1209 }
1210 
is_spectre_bhb_fw_affected(int scope)1211 static bool is_spectre_bhb_fw_affected(int scope)
1212 {
1213 	static bool system_affected;
1214 	enum mitigation_state fw_state;
1215 	bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
1216 	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
1217 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1218 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
1219 		{},
1220 	};
1221 	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
1222 					 spectre_bhb_firmware_mitigated_list);
1223 
1224 	if (scope != SCOPE_LOCAL_CPU)
1225 		return system_affected;
1226 
1227 	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1228 	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
1229 		system_affected = true;
1230 		return true;
1231 	}
1232 
1233 	return false;
1234 }
1235 
supports_ecbhb(int scope)1236 static bool supports_ecbhb(int scope)
1237 {
1238 	u64 mmfr1;
1239 
1240 	if (scope == SCOPE_LOCAL_CPU)
1241 		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
1242 	else
1243 		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
1244 
1245 	return cpuid_feature_extract_unsigned_field(mmfr1,
1246 						    ID_AA64MMFR1_ECBHB_SHIFT);
1247 }
1248 
is_spectre_bhb_affected(const struct arm64_cpu_capabilities * entry,int scope)1249 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
1250 			     int scope)
1251 {
1252 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1253 
1254 	if (supports_csv2p3(scope))
1255 		return false;
1256 
1257 	if (supports_clearbhb(scope))
1258 		return true;
1259 
1260 	if (spectre_bhb_loop_affected(scope))
1261 		return true;
1262 
1263 	if (is_spectre_bhb_fw_affected(scope))
1264 		return true;
1265 
1266 	return false;
1267 }
1268 
this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)1269 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
1270 {
1271 	const char *v = arm64_get_bp_hardening_vector(slot);
1272 
1273 	if (slot < 0)
1274 		return;
1275 
1276 	__this_cpu_write(this_cpu_vector, v);
1277 
1278 	/*
1279 	 * When KPTI is in use, the vectors are switched when exiting to
1280 	 * user-space.
1281 	 */
1282 	if (arm64_kernel_unmapped_at_el0())
1283 		return;
1284 
1285 	write_sysreg(v, vbar_el1);
1286 	isb();
1287 }
1288 
1289 #ifdef CONFIG_KVM_INDIRECT_VECTORS
kvm_bhb_get_vecs_end(const char * start)1290 static const char *kvm_bhb_get_vecs_end(const char *start)
1291 {
1292 	if (start == __smccc_workaround_3_smc_start)
1293 		return __smccc_workaround_3_smc_end;
1294 	else if (start == __spectre_bhb_loop_k8_start)
1295 		return __spectre_bhb_loop_k8_end;
1296 	else if (start == __spectre_bhb_loop_k24_start)
1297 		return __spectre_bhb_loop_k24_end;
1298 	else if (start == __spectre_bhb_loop_k32_start)
1299 		return __spectre_bhb_loop_k32_end;
1300 	else if (start == __spectre_bhb_clearbhb_start)
1301 		return __spectre_bhb_clearbhb_end;
1302 
1303 	return NULL;
1304 }
1305 
kvm_setup_bhb_slot(const char * hyp_vecs_start)1306 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1307 {
1308 	int cpu, slot = -1;
1309 	const char *hyp_vecs_end;
1310 
1311 	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1312 		return;
1313 
1314 	hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1315 	if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1316 		return;
1317 
1318 	raw_spin_lock(&bp_lock);
1319 	for_each_possible_cpu(cpu) {
1320 		if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1321 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1322 			break;
1323 		}
1324 	}
1325 
1326 	if (slot == -1) {
1327 		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
1328 		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
1329 		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1330 	}
1331 
1332 	if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
1333 		__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1334 		__this_cpu_write(bp_hardening_data.template_start,
1335 				 hyp_vecs_start);
1336 	}
1337 	raw_spin_unlock(&bp_lock);
1338 }
1339 #else
1340 #define __smccc_workaround_3_smc_start NULL
1341 #define __spectre_bhb_loop_k8_start NULL
1342 #define __spectre_bhb_loop_k24_start NULL
1343 #define __spectre_bhb_loop_k32_start NULL
1344 #define __spectre_bhb_clearbhb_start NULL
1345 
kvm_setup_bhb_slot(const char * hyp_vecs_start)1346 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
1347 #endif
1348 
spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities * entry)1349 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1350 {
1351 	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1352 
1353 	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1354 		return;
1355 
1356 	if (get_spectre_v2_workaround_state() == ARM64_BP_HARDEN_UNKNOWN) {
1357 		/* No point mitigating Spectre-BHB alone. */
1358 	} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1359 		pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1360 	} else if (cpu_mitigations_off()) {
1361 		pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1362 	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1363 		state = SPECTRE_MITIGATED;
1364 	} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1365 		kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1366 		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1367 
1368 		state = SPECTRE_MITIGATED;
1369 	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1370 		switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1371 		case 8:
1372 			/*
1373 			 * A57/A72-r0 will already have selected the
1374 			 * spectre-indirect vector, which is sufficient
1375 			 * for BHB too.
1376 			 */
1377 			if (!__this_cpu_read(bp_hardening_data.fn))
1378 				kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1379 			break;
1380 		case 24:
1381 			kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1382 			break;
1383 		case 32:
1384 			kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1385 			break;
1386 		default:
1387 			WARN_ON_ONCE(1);
1388 		}
1389 		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1390 
1391 		state = SPECTRE_MITIGATED;
1392 	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1393 		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1394 		if (fw_state == SPECTRE_MITIGATED) {
1395 			kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1396 			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1397 
1398 			/*
1399 			 * With WA3 in the vectors, the WA1 calls can be
1400 			 * removed.
1401 			 */
1402 			__this_cpu_write(bp_hardening_data.fn, NULL);
1403 
1404 			state = SPECTRE_MITIGATED;
1405 		}
1406 	}
1407 
1408 	update_mitigation_state(&spectre_bhb_state, state);
1409 }
1410 
1411 /* Patched to correct the immediate */
spectre_bhb_patch_loop_iter(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1412 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1413 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1414 {
1415 	u8 rd;
1416 	u32 insn;
1417 	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1418 
1419 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1420 
1421 	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1422 		return;
1423 
1424 	insn = le32_to_cpu(*origptr);
1425 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1426 	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1427 					 AARCH64_INSN_VARIANT_64BIT,
1428 					 AARCH64_INSN_MOVEWIDE_ZERO);
1429 	*updptr++ = cpu_to_le32(insn);
1430 }
1431