• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PowerNV setup code.
3  *
4  * Copyright 2011 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/cpu.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/tty.h>
19 #include <linux/reboot.h>
20 #include <linux/init.h>
21 #include <linux/console.h>
22 #include <linux/delay.h>
23 #include <linux/irq.h>
24 #include <linux/seq_file.h>
25 #include <linux/of.h>
26 #include <linux/of_fdt.h>
27 #include <linux/interrupt.h>
28 #include <linux/bug.h>
29 #include <linux/pci.h>
30 #include <linux/cpufreq.h>
31 
32 #include <asm/machdep.h>
33 #include <asm/firmware.h>
34 #include <asm/xics.h>
35 #include <asm/xive.h>
36 #include <asm/opal.h>
37 #include <asm/kexec.h>
38 #include <asm/smp.h>
39 #include <asm/setup.h>
40 #include <asm/security_features.h>
41 
42 #include "powernv.h"
43 
44 
fw_feature_is(const char * state,const char * name,struct device_node * fw_features)45 static bool fw_feature_is(const char *state, const char *name,
46 			  struct device_node *fw_features)
47 {
48 	struct device_node *np;
49 	bool rc = false;
50 
51 	np = of_get_child_by_name(fw_features, name);
52 	if (np) {
53 		rc = of_property_read_bool(np, state);
54 		of_node_put(np);
55 	}
56 
57 	return rc;
58 }
59 
init_fw_feat_flags(struct device_node * np)60 static void init_fw_feat_flags(struct device_node *np)
61 {
62 	if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
63 		security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
64 
65 	if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
66 		security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
67 
68 	if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
69 		security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
70 
71 	if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
72 		security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
73 
74 	if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
75 		security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
76 
77 	if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
78 		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
79 
80 	if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
81 		security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
82 
83 	if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
84 		security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
85 
86 	/*
87 	 * The features below are enabled by default, so we instead look to see
88 	 * if firmware has *disabled* them, and clear them if so.
89 	 */
90 	if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
91 		security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
92 
93 	if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
94 		security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
95 
96 	if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
97 		security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
98 
99 	if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
100 		security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
101 }
102 
pnv_setup_rfi_flush(void)103 static void pnv_setup_rfi_flush(void)
104 {
105 	struct device_node *np, *fw_features;
106 	enum l1d_flush_type type;
107 	bool enable;
108 
109 	/* Default to fallback in case fw-features are not available */
110 	type = L1D_FLUSH_FALLBACK;
111 
112 	np = of_find_node_by_name(NULL, "ibm,opal");
113 	fw_features = of_get_child_by_name(np, "fw-features");
114 	of_node_put(np);
115 
116 	if (fw_features) {
117 		init_fw_feat_flags(fw_features);
118 		of_node_put(fw_features);
119 
120 		if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
121 			type = L1D_FLUSH_MTTRIG;
122 
123 		if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
124 			type = L1D_FLUSH_ORI;
125 	}
126 
127 	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
128 		 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)   || \
129 		  security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
130 
131 	setup_rfi_flush(type, enable);
132 	setup_count_cache_flush();
133 }
134 
pnv_setup_arch(void)135 static void __init pnv_setup_arch(void)
136 {
137 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
138 
139 	pnv_setup_rfi_flush();
140 	setup_stf_barrier();
141 
142 	/* Initialize SMP */
143 	pnv_smp_init();
144 
145 	/* Setup PCI */
146 	pnv_pci_init();
147 
148 	/* Setup RTC and NVRAM callbacks */
149 	if (firmware_has_feature(FW_FEATURE_OPAL))
150 		opal_nvram_init();
151 
152 	/* Enable NAP mode */
153 	powersave_nap = 1;
154 
155 	/* XXX PMCS */
156 }
157 
pnv_init(void)158 static void __init pnv_init(void)
159 {
160 	/*
161 	 * Initialize the LPC bus now so that legacy serial
162 	 * ports can be found on it
163 	 */
164 	opal_lpc_init();
165 
166 #ifdef CONFIG_HVC_OPAL
167 	if (firmware_has_feature(FW_FEATURE_OPAL))
168 		hvc_opal_init_early();
169 	else
170 #endif
171 		add_preferred_console("hvc", 0, NULL);
172 }
173 
pnv_init_IRQ(void)174 static void __init pnv_init_IRQ(void)
175 {
176 	/* Try using a XIVE if available, otherwise use a XICS */
177 	if (!xive_native_init())
178 		xics_init();
179 
180 	WARN_ON(!ppc_md.get_irq);
181 }
182 
pnv_show_cpuinfo(struct seq_file * m)183 static void pnv_show_cpuinfo(struct seq_file *m)
184 {
185 	struct device_node *root;
186 	const char *model = "";
187 
188 	root = of_find_node_by_path("/");
189 	if (root)
190 		model = of_get_property(root, "model", NULL);
191 	seq_printf(m, "machine\t\t: PowerNV %s\n", model);
192 	if (firmware_has_feature(FW_FEATURE_OPAL))
193 		seq_printf(m, "firmware\t: OPAL\n");
194 	else
195 		seq_printf(m, "firmware\t: BML\n");
196 	of_node_put(root);
197 	if (radix_enabled())
198 		seq_printf(m, "MMU\t\t: Radix\n");
199 	else
200 		seq_printf(m, "MMU\t\t: Hash\n");
201 }
202 
pnv_prepare_going_down(void)203 static void pnv_prepare_going_down(void)
204 {
205 	/*
206 	 * Disable all notifiers from OPAL, we can't
207 	 * service interrupts anymore anyway
208 	 */
209 	opal_event_shutdown();
210 
211 	/* Soft disable interrupts */
212 	local_irq_disable();
213 
214 	/*
215 	 * Return secondary CPUs to firwmare if a flash update
216 	 * is pending otherwise we will get all sort of error
217 	 * messages about CPU being stuck etc.. This will also
218 	 * have the side effect of hard disabling interrupts so
219 	 * past this point, the kernel is effectively dead.
220 	 */
221 	opal_flash_term_callback();
222 }
223 
pnv_restart(char * cmd)224 static void  __noreturn pnv_restart(char *cmd)
225 {
226 	long rc = OPAL_BUSY;
227 
228 	pnv_prepare_going_down();
229 
230 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
231 		rc = opal_cec_reboot();
232 		if (rc == OPAL_BUSY_EVENT)
233 			opal_poll_events(NULL);
234 		else
235 			mdelay(10);
236 	}
237 	for (;;)
238 		opal_poll_events(NULL);
239 }
240 
pnv_power_off(void)241 static void __noreturn pnv_power_off(void)
242 {
243 	long rc = OPAL_BUSY;
244 
245 	pnv_prepare_going_down();
246 
247 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
248 		rc = opal_cec_power_down(0);
249 		if (rc == OPAL_BUSY_EVENT)
250 			opal_poll_events(NULL);
251 		else
252 			mdelay(10);
253 	}
254 	for (;;)
255 		opal_poll_events(NULL);
256 }
257 
pnv_halt(void)258 static void __noreturn pnv_halt(void)
259 {
260 	pnv_power_off();
261 }
262 
pnv_progress(char * s,unsigned short hex)263 static void pnv_progress(char *s, unsigned short hex)
264 {
265 }
266 
pnv_shutdown(void)267 static void pnv_shutdown(void)
268 {
269 	/* Let the PCI code clear up IODA tables */
270 	pnv_pci_shutdown();
271 
272 	/*
273 	 * Stop OPAL activity: Unregister all OPAL interrupts so they
274 	 * don't fire up while we kexec and make sure all potentially
275 	 * DMA'ing ops are complete (such as dump retrieval).
276 	 */
277 	opal_shutdown();
278 }
279 
280 #ifdef CONFIG_KEXEC_CORE
pnv_kexec_wait_secondaries_down(void)281 static void pnv_kexec_wait_secondaries_down(void)
282 {
283 	int my_cpu, i, notified = -1;
284 
285 	my_cpu = get_cpu();
286 
287 	for_each_online_cpu(i) {
288 		uint8_t status;
289 		int64_t rc, timeout = 1000;
290 
291 		if (i == my_cpu)
292 			continue;
293 
294 		for (;;) {
295 			rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
296 						   &status);
297 			if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
298 				break;
299 			barrier();
300 			if (i != notified) {
301 				printk(KERN_INFO "kexec: waiting for cpu %d "
302 				       "(physical %d) to enter OPAL\n",
303 				       i, paca[i].hw_cpu_id);
304 				notified = i;
305 			}
306 
307 			/*
308 			 * On crash secondaries might be unreachable or hung,
309 			 * so timeout if we've waited too long
310 			 * */
311 			mdelay(1);
312 			if (timeout-- == 0) {
313 				printk(KERN_ERR "kexec: timed out waiting for "
314 				       "cpu %d (physical %d) to enter OPAL\n",
315 				       i, paca[i].hw_cpu_id);
316 				break;
317 			}
318 		}
319 	}
320 }
321 
pnv_kexec_cpu_down(int crash_shutdown,int secondary)322 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
323 {
324 	u64 reinit_flags;
325 
326 	if (xive_enabled())
327 		xive_kexec_teardown_cpu(secondary);
328 	else
329 		xics_kexec_teardown_cpu(secondary);
330 
331 	/* On OPAL, we return all CPUs to firmware */
332 	if (!firmware_has_feature(FW_FEATURE_OPAL))
333 		return;
334 
335 	if (secondary) {
336 		/* Return secondary CPUs to firmware on OPAL v3 */
337 		mb();
338 		get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
339 		mb();
340 
341 		/* Return the CPU to OPAL */
342 		opal_return_cpu();
343 	} else {
344 		/* Primary waits for the secondaries to have reached OPAL */
345 		pnv_kexec_wait_secondaries_down();
346 
347 		/* Switch XIVE back to emulation mode */
348 		if (xive_enabled())
349 			xive_shutdown();
350 
351 		/*
352 		 * We might be running as little-endian - now that interrupts
353 		 * are disabled, reset the HILE bit to big-endian so we don't
354 		 * take interrupts in the wrong endian later
355 		 *
356 		 * We reinit to enable both radix and hash on P9 to ensure
357 		 * the mode used by the next kernel is always supported.
358 		 */
359 		reinit_flags = OPAL_REINIT_CPUS_HILE_BE;
360 		if (cpu_has_feature(CPU_FTR_ARCH_300))
361 			reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX |
362 				OPAL_REINIT_CPUS_MMU_HASH;
363 		opal_reinit_cpus(reinit_flags);
364 	}
365 }
366 #endif /* CONFIG_KEXEC_CORE */
367 
368 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
pnv_memory_block_size(void)369 static unsigned long pnv_memory_block_size(void)
370 {
371 	/*
372 	 * We map the kernel linear region with 1GB large pages on radix. For
373 	 * memory hot unplug to work our memory block size must be at least
374 	 * this size.
375 	 */
376 	if (radix_enabled())
377 		return 1UL * 1024 * 1024 * 1024;
378 	else
379 		return 256UL * 1024 * 1024;
380 }
381 #endif
382 
pnv_setup_machdep_opal(void)383 static void __init pnv_setup_machdep_opal(void)
384 {
385 	ppc_md.get_boot_time = opal_get_boot_time;
386 	ppc_md.restart = pnv_restart;
387 	pm_power_off = pnv_power_off;
388 	ppc_md.halt = pnv_halt;
389 	ppc_md.machine_check_exception = opal_machine_check;
390 	ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
391 	ppc_md.hmi_exception_early = opal_hmi_exception_early;
392 	ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
393 }
394 
pnv_probe(void)395 static int __init pnv_probe(void)
396 {
397 	if (!of_machine_is_compatible("ibm,powernv"))
398 		return 0;
399 
400 	if (firmware_has_feature(FW_FEATURE_OPAL))
401 		pnv_setup_machdep_opal();
402 
403 	pr_debug("PowerNV detected !\n");
404 
405 	pnv_init();
406 
407 	return 1;
408 }
409 
410 /*
411  * Returns the cpu frequency for 'cpu' in Hz. This is used by
412  * /proc/cpuinfo
413  */
pnv_get_proc_freq(unsigned int cpu)414 static unsigned long pnv_get_proc_freq(unsigned int cpu)
415 {
416 	unsigned long ret_freq;
417 
418 	ret_freq = cpufreq_get(cpu) * 1000ul;
419 
420 	/*
421 	 * If the backend cpufreq driver does not exist,
422          * then fallback to old way of reporting the clockrate.
423 	 */
424 	if (!ret_freq)
425 		ret_freq = ppc_proc_freq;
426 	return ret_freq;
427 }
428 
define_machine(powernv)429 define_machine(powernv) {
430 	.name			= "PowerNV",
431 	.probe			= pnv_probe,
432 	.setup_arch		= pnv_setup_arch,
433 	.init_IRQ		= pnv_init_IRQ,
434 	.show_cpuinfo		= pnv_show_cpuinfo,
435 	.get_proc_freq          = pnv_get_proc_freq,
436 	.progress		= pnv_progress,
437 	.machine_shutdown	= pnv_shutdown,
438 	.power_save             = NULL,
439 	.calibrate_decr		= generic_calibrate_decr,
440 #ifdef CONFIG_KEXEC_CORE
441 	.kexec_cpu_down		= pnv_kexec_cpu_down,
442 #endif
443 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
444 	.memory_block_size	= pnv_memory_block_size,
445 #endif
446 };
447