• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/arm-smccc.h>
3 #include <linux/kernel.h>
4 #include <linux/psci.h>
5 #include <linux/smp.h>
6 
7 #include <asm/cp15.h>
8 #include <asm/cputype.h>
9 #include <asm/proc-fns.h>
10 #include <asm/spectre.h>
11 #include <asm/system_misc.h>
12 
13 #ifdef CONFIG_ARM_PSCI
spectre_v2_get_cpu_fw_mitigation_state(void)14 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
15 {
16 	struct arm_smccc_res res;
17 
18 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
19 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
20 
21 	switch ((int)res.a0) {
22 	case SMCCC_RET_SUCCESS:
23 		return SPECTRE_MITIGATED;
24 
25 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
26 		return SPECTRE_UNAFFECTED;
27 
28 	default:
29 		return SPECTRE_VULNERABLE;
30 	}
31 }
32 #else
spectre_v2_get_cpu_fw_mitigation_state(void)33 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
34 {
35 	return SPECTRE_VULNERABLE;
36 }
37 #endif
38 
39 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
40 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
41 
42 extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
43 extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
44 extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
45 extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
46 
harden_branch_predictor_bpiall(void)47 static void harden_branch_predictor_bpiall(void)
48 {
49 	write_sysreg(0, BPIALL);
50 }
51 
harden_branch_predictor_iciallu(void)52 static void harden_branch_predictor_iciallu(void)
53 {
54 	write_sysreg(0, ICIALLU);
55 }
56 
call_smc_arch_workaround_1(void)57 static void __maybe_unused call_smc_arch_workaround_1(void)
58 {
59 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
60 }
61 
call_hvc_arch_workaround_1(void)62 static void __maybe_unused call_hvc_arch_workaround_1(void)
63 {
64 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
65 }
66 
spectre_v2_install_workaround(unsigned int method)67 static unsigned int spectre_v2_install_workaround(unsigned int method)
68 {
69 	const char *spectre_v2_method = NULL;
70 	int cpu = smp_processor_id();
71 
72 	if (per_cpu(harden_branch_predictor_fn, cpu))
73 		return SPECTRE_MITIGATED;
74 
75 	switch (method) {
76 	case SPECTRE_V2_METHOD_BPIALL:
77 		per_cpu(harden_branch_predictor_fn, cpu) =
78 			harden_branch_predictor_bpiall;
79 		spectre_v2_method = "BPIALL";
80 		break;
81 
82 	case SPECTRE_V2_METHOD_ICIALLU:
83 		per_cpu(harden_branch_predictor_fn, cpu) =
84 			harden_branch_predictor_iciallu;
85 		spectre_v2_method = "ICIALLU";
86 		break;
87 
88 	case SPECTRE_V2_METHOD_HVC:
89 		per_cpu(harden_branch_predictor_fn, cpu) =
90 			call_hvc_arch_workaround_1;
91 		cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
92 		spectre_v2_method = "hypervisor";
93 		break;
94 
95 	case SPECTRE_V2_METHOD_SMC:
96 		per_cpu(harden_branch_predictor_fn, cpu) =
97 			call_smc_arch_workaround_1;
98 		cpu_do_switch_mm = cpu_v7_smc_switch_mm;
99 		spectre_v2_method = "firmware";
100 		break;
101 	}
102 
103 	if (spectre_v2_method)
104 		pr_info("CPU%u: Spectre v2: using %s workaround\n",
105 			smp_processor_id(), spectre_v2_method);
106 
107 	return SPECTRE_MITIGATED;
108 }
109 #else
spectre_v2_install_workaround(unsigned int method)110 static unsigned int spectre_v2_install_workaround(unsigned int method)
111 {
112 	pr_info_once("Spectre V2: workarounds disabled by configuration\n");
113 
114 	return SPECTRE_VULNERABLE;
115 }
116 #endif
117 
cpu_v7_spectre_v2_init(void)118 static void cpu_v7_spectre_v2_init(void)
119 {
120 	unsigned int state, method = 0;
121 
122 	switch (read_cpuid_part()) {
123 	case ARM_CPU_PART_CORTEX_A8:
124 	case ARM_CPU_PART_CORTEX_A9:
125 	case ARM_CPU_PART_CORTEX_A12:
126 	case ARM_CPU_PART_CORTEX_A17:
127 	case ARM_CPU_PART_CORTEX_A73:
128 	case ARM_CPU_PART_CORTEX_A75:
129 		state = SPECTRE_MITIGATED;
130 		method = SPECTRE_V2_METHOD_BPIALL;
131 		break;
132 
133 	case ARM_CPU_PART_CORTEX_A15:
134 	case ARM_CPU_PART_BRAHMA_B15:
135 		state = SPECTRE_MITIGATED;
136 		method = SPECTRE_V2_METHOD_ICIALLU;
137 		break;
138 
139 	case ARM_CPU_PART_BRAHMA_B53:
140 		/* Requires no workaround */
141 		state = SPECTRE_UNAFFECTED;
142 		break;
143 
144 	default:
145 		/* Other ARM CPUs require no workaround */
146 		if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
147 			state = SPECTRE_UNAFFECTED;
148 			break;
149 		}
150 		/* fallthrough */
151 	/* Cortex A57/A72 require firmware workaround */
152 	case ARM_CPU_PART_CORTEX_A57:
153 	case ARM_CPU_PART_CORTEX_A72: {
154 		struct arm_smccc_res res;
155 
156 		state = spectre_v2_get_cpu_fw_mitigation_state();
157 		if (state != SPECTRE_MITIGATED)
158 			break;
159 
160 		if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
161 			break;
162 
163 		switch (psci_ops.conduit) {
164 		case PSCI_CONDUIT_HVC:
165 			arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
166 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
167 			if ((int)res.a0 != 0)
168 				break;
169 			method = SPECTRE_V2_METHOD_HVC;
170 			break;
171 
172 		case PSCI_CONDUIT_SMC:
173 			arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
174 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
175 			if ((int)res.a0 != 0)
176 				break;
177 			method = SPECTRE_V2_METHOD_SMC;
178 			break;
179 
180 		default:
181 			state = SPECTRE_VULNERABLE;
182 			break;
183 		}
184 	}
185 	}
186 
187 	if (state == SPECTRE_MITIGATED)
188 		state = spectre_v2_install_workaround(method);
189 
190 	spectre_v2_update_state(state, method);
191 }
192 
193 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
194 static int spectre_bhb_method;
195 
spectre_bhb_method_name(int method)196 static const char *spectre_bhb_method_name(int method)
197 {
198 	switch (method) {
199 	case SPECTRE_V2_METHOD_LOOP8:
200 		return "loop";
201 
202 	case SPECTRE_V2_METHOD_BPIALL:
203 		return "BPIALL";
204 
205 	default:
206 		return "unknown";
207 	}
208 }
209 
spectre_bhb_install_workaround(int method)210 static int spectre_bhb_install_workaround(int method)
211 {
212 	if (spectre_bhb_method != method) {
213 		if (spectre_bhb_method) {
214 			pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
215 			       smp_processor_id());
216 
217 			return SPECTRE_VULNERABLE;
218 		}
219 
220 		if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
221 			return SPECTRE_VULNERABLE;
222 
223 		spectre_bhb_method = method;
224 
225 		pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
226 			smp_processor_id(), spectre_bhb_method_name(method));
227 	}
228 
229 	return SPECTRE_MITIGATED;
230 }
231 #else
spectre_bhb_install_workaround(int method)232 static int spectre_bhb_install_workaround(int method)
233 {
234 	return SPECTRE_VULNERABLE;
235 }
236 #endif
237 
cpu_v7_spectre_bhb_init(void)238 static void cpu_v7_spectre_bhb_init(void)
239 {
240 	unsigned int state, method = 0;
241 
242 	switch (read_cpuid_part()) {
243 	case ARM_CPU_PART_CORTEX_A15:
244 	case ARM_CPU_PART_BRAHMA_B15:
245 	case ARM_CPU_PART_CORTEX_A57:
246 	case ARM_CPU_PART_CORTEX_A72:
247 		state = SPECTRE_MITIGATED;
248 		method = SPECTRE_V2_METHOD_LOOP8;
249 		break;
250 
251 	case ARM_CPU_PART_CORTEX_A73:
252 	case ARM_CPU_PART_CORTEX_A75:
253 		state = SPECTRE_MITIGATED;
254 		method = SPECTRE_V2_METHOD_BPIALL;
255 		break;
256 
257 	default:
258 		state = SPECTRE_UNAFFECTED;
259 		break;
260 	}
261 
262 	if (state == SPECTRE_MITIGATED)
263 		state = spectre_bhb_install_workaround(method);
264 
265 	spectre_v2_update_state(state, method);
266 }
267 
cpu_v7_check_auxcr_set(bool * warned,u32 mask,const char * msg)268 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
269 						  u32 mask, const char *msg)
270 {
271 	u32 aux_cr;
272 
273 	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
274 
275 	if ((aux_cr & mask) != mask) {
276 		if (!*warned)
277 			pr_err("CPU%u: %s", smp_processor_id(), msg);
278 		*warned = true;
279 		return false;
280 	}
281 	return true;
282 }
283 
284 static DEFINE_PER_CPU(bool, spectre_warned);
285 
check_spectre_auxcr(bool * warned,u32 bit)286 static bool check_spectre_auxcr(bool *warned, u32 bit)
287 {
288 	return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
289 		cpu_v7_check_auxcr_set(warned, bit,
290 				       "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
291 }
292 
cpu_v7_ca8_ibe(void)293 void cpu_v7_ca8_ibe(void)
294 {
295 	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
296 		cpu_v7_spectre_v2_init();
297 }
298 
cpu_v7_ca15_ibe(void)299 void cpu_v7_ca15_ibe(void)
300 {
301 	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
302 		cpu_v7_spectre_v2_init();
303 	cpu_v7_spectre_bhb_init();
304 }
305 
cpu_v7_bugs_init(void)306 void cpu_v7_bugs_init(void)
307 {
308 	cpu_v7_spectre_v2_init();
309 	cpu_v7_spectre_bhb_init();
310 }
311