1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8 #define KMSG_COMPONENT "setup"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11 #include <linux/compiler.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/ctype.h>
16 #include <linux/lockdep.h>
17 #include <linux/extable.h>
18 #include <linux/pfn.h>
19 #include <linux/uaccess.h>
20 #include <linux/kernel.h>
21 #include <asm/diag.h>
22 #include <asm/ebcdic.h>
23 #include <asm/ipl.h>
24 #include <asm/lowcore.h>
25 #include <asm/processor.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sysinfo.h>
29 #include <asm/cpcmd.h>
30 #include <asm/sclp.h>
31 #include <asm/facility.h>
32 #include <asm/boot_data.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
35
reset_tod_clock(void)36 static void __init reset_tod_clock(void)
37 {
38 u64 time;
39
40 if (store_tod_clock(&time) == 0)
41 return;
42 /* TOD clock not running. Set the clock to Unix Epoch. */
43 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
44 disabled_wait();
45
46 memset(tod_clock_base, 0, 16);
47 *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
48 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
49 }
50
51 /*
52 * Initialize storage key for kernel pages
53 */
init_kernel_storage_key(void)54 static noinline __init void init_kernel_storage_key(void)
55 {
56 #if PAGE_DEFAULT_KEY
57 unsigned long end_pfn, init_pfn;
58
59 end_pfn = PFN_UP(__pa(_end));
60
61 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
62 page_set_storage_key(init_pfn << PAGE_SHIFT,
63 PAGE_DEFAULT_KEY, 0);
64 #endif
65 }
66
67 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
68
detect_machine_type(void)69 static noinline __init void detect_machine_type(void)
70 {
71 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
72
73 /* Check current-configuration-level */
74 if (stsi(NULL, 0, 0, 0) <= 2) {
75 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
76 return;
77 }
78 /* Get virtual-machine cpu information. */
79 if (stsi(vmms, 3, 2, 2) || !vmms->count)
80 return;
81
82 /* Detect known hypervisors */
83 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
84 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
85 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
86 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
87 }
88
89 /* Remove leading, trailing and double whitespace. */
strim_all(char * str)90 static inline void strim_all(char *str)
91 {
92 char *s;
93
94 s = strim(str);
95 if (s != str)
96 memmove(str, s, strlen(s));
97 while (*str) {
98 if (!isspace(*str++))
99 continue;
100 if (isspace(*str)) {
101 s = skip_spaces(str);
102 memmove(str, s, strlen(s) + 1);
103 }
104 }
105 }
106
setup_arch_string(void)107 static noinline __init void setup_arch_string(void)
108 {
109 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
110 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
111 char mstr[80], hvstr[17];
112
113 if (stsi(mach, 1, 1, 1))
114 return;
115 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
116 EBCASC(mach->type, sizeof(mach->type));
117 EBCASC(mach->model, sizeof(mach->model));
118 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
119 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
120 mach->manufacturer, mach->type,
121 mach->model, mach->model_capacity);
122 strim_all(mstr);
123 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
124 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
125 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
126 strim_all(hvstr);
127 } else {
128 sprintf(hvstr, "%s",
129 MACHINE_IS_LPAR ? "LPAR" :
130 MACHINE_IS_VM ? "z/VM" :
131 MACHINE_IS_KVM ? "KVM" : "unknown");
132 }
133 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
134 }
135
setup_topology(void)136 static __init void setup_topology(void)
137 {
138 int max_mnest;
139
140 if (!test_facility(11))
141 return;
142 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
143 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
144 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
145 break;
146 }
147 topology_max_mnest = max_mnest;
148 }
149
early_pgm_check_handler(void)150 static void early_pgm_check_handler(void)
151 {
152 const struct exception_table_entry *fixup;
153 unsigned long cr0, cr0_new;
154 unsigned long addr;
155
156 addr = S390_lowcore.program_old_psw.addr;
157 fixup = s390_search_extables(addr);
158 if (!fixup)
159 disabled_wait();
160 /* Disable low address protection before storing into lowcore. */
161 __ctl_store(cr0, 0, 0);
162 cr0_new = cr0 & ~(1UL << 28);
163 __ctl_load(cr0_new, 0, 0);
164 S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
165 __ctl_load(cr0, 0, 0);
166 }
167
setup_lowcore_early(void)168 static noinline __init void setup_lowcore_early(void)
169 {
170 psw_t psw;
171
172 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
173 if (IS_ENABLED(CONFIG_KASAN))
174 psw.mask |= PSW_MASK_DAT;
175 psw.addr = (unsigned long) s390_base_ext_handler;
176 S390_lowcore.external_new_psw = psw;
177 psw.addr = (unsigned long) s390_base_pgm_handler;
178 S390_lowcore.program_new_psw = psw;
179 s390_base_pgm_handler_fn = early_pgm_check_handler;
180 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
181 }
182
setup_facility_list(void)183 static noinline __init void setup_facility_list(void)
184 {
185 memcpy(S390_lowcore.alt_stfle_fac_list,
186 S390_lowcore.stfle_fac_list,
187 sizeof(S390_lowcore.alt_stfle_fac_list));
188 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
189 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
190 }
191
detect_diag9c(void)192 static __init void detect_diag9c(void)
193 {
194 unsigned int cpu_address;
195 int rc;
196
197 cpu_address = stap();
198 diag_stat_inc(DIAG_STAT_X09C);
199 asm volatile(
200 " diag %2,0,0x9c\n"
201 "0: la %0,0\n"
202 "1:\n"
203 EX_TABLE(0b,1b)
204 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
205 if (!rc)
206 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
207 }
208
detect_machine_facilities(void)209 static __init void detect_machine_facilities(void)
210 {
211 if (test_facility(8)) {
212 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
213 __ctl_set_bit(0, 23);
214 }
215 if (test_facility(78))
216 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
217 if (test_facility(3))
218 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
219 if (test_facility(50) && test_facility(73)) {
220 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
221 __ctl_set_bit(0, 55);
222 }
223 if (test_facility(51))
224 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
225 if (test_facility(129)) {
226 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
227 __ctl_set_bit(0, 17);
228 }
229 if (test_facility(130) && !noexec_disabled) {
230 S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
231 __ctl_set_bit(0, 20);
232 }
233 if (test_facility(133))
234 S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
235 if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
236 /* Enabled signed clock comparator comparisons */
237 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
238 clock_comparator_max = -1ULL >> 1;
239 __ctl_set_bit(0, 53);
240 }
241 if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
242 S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
243 /* the control bit is set during PCI initialization */
244 }
245 }
246
save_vector_registers(void)247 static inline void save_vector_registers(void)
248 {
249 #ifdef CONFIG_CRASH_DUMP
250 if (test_facility(129))
251 save_vx_regs(boot_cpu_vector_save_area);
252 #endif
253 }
254
setup_control_registers(void)255 static inline void setup_control_registers(void)
256 {
257 unsigned long reg;
258
259 __ctl_store(reg, 0, 0);
260 reg |= CR0_LOW_ADDRESS_PROTECTION;
261 reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
262 reg |= CR0_EXTERNAL_CALL_SUBMASK;
263 __ctl_load(reg, 0, 0);
264 }
265
setup_access_registers(void)266 static inline void setup_access_registers(void)
267 {
268 unsigned int acrs[NUM_ACRS] = { 0 };
269
270 restore_access_regs(acrs);
271 }
272
disable_vector_extension(char * str)273 static int __init disable_vector_extension(char *str)
274 {
275 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
276 __ctl_clear_bit(0, 17);
277 return 0;
278 }
279 early_param("novx", disable_vector_extension);
280
281 char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
setup_boot_command_line(void)282 static void __init setup_boot_command_line(void)
283 {
284 /* copy arch command line */
285 strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
286 }
287
check_image_bootable(void)288 static void __init check_image_bootable(void)
289 {
290 if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
291 return;
292
293 sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
294 sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
295 sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
296 disabled_wait();
297 }
298
startup_init(void)299 void __init startup_init(void)
300 {
301 reset_tod_clock();
302 check_image_bootable();
303 time_early_init();
304 init_kernel_storage_key();
305 lockdep_off();
306 setup_lowcore_early();
307 setup_facility_list();
308 detect_machine_type();
309 setup_arch_string();
310 setup_boot_command_line();
311 detect_diag9c();
312 detect_machine_facilities();
313 save_vector_registers();
314 setup_topology();
315 sclp_early_detect();
316 setup_control_registers();
317 setup_access_registers();
318 lockdep_on();
319 }
320