1 /*
2 * arch/s390/kernel/setup.c
3 *
4 * S390 version
5 * Copyright (C) IBM Corp. 1999,2012
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "arch/i386/kernel/setup.c"
10 * Copyright (C) 1995, Linus Torvalds
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of initialization
15 */
16
17 #define KMSG_COMPONENT "setup"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/memblock.h>
25 #include <linux/mm.h>
26 #include <linux/stddef.h>
27 #include <linux/unistd.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/tty.h>
31 #include <linux/ioport.h>
32 #include <linux/delay.h>
33 #include <linux/init.h>
34 #include <linux/initrd.h>
35 #include <linux/bootmem.h>
36 #include <linux/root_dev.h>
37 #include <linux/console.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/device.h>
40 #include <linux/notifier.h>
41 #include <linux/pfn.h>
42 #include <linux/ctype.h>
43 #include <linux/reboot.h>
44 #include <linux/topology.h>
45 #include <linux/ftrace.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
48 #include <linux/memory.h>
49 #include <linux/compat.h>
50
51 #include <asm/ipl.h>
52 #include <asm/uaccess.h>
53 #include <asm/facility.h>
54 #include <asm/smp.h>
55 #include <asm/mmu_context.h>
56 #include <asm/cpcmd.h>
57 #include <asm/lowcore.h>
58 #include <asm/irq.h>
59 #include <asm/page.h>
60 #include <asm/ptrace.h>
61 #include <asm/sections.h>
62 #include <asm/ebcdic.h>
63 #include <asm/kvm_virtio.h>
64 #include <asm/diag.h>
65 #include <asm/os_info.h>
66 #include "entry.h"
67
68 long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
69 PSW_MASK_EA | PSW_MASK_BA;
70 long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
71 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
72 PSW_MASK_PSTATE | PSW_ASC_HOME;
73
74 /*
75 * User copy operations.
76 */
77 struct uaccess_ops uaccess;
78 EXPORT_SYMBOL(uaccess);
79
80 /*
81 * Machine setup..
82 */
83 unsigned int console_mode = 0;
84 EXPORT_SYMBOL(console_mode);
85
86 unsigned int console_devno = -1;
87 EXPORT_SYMBOL(console_devno);
88
89 unsigned int console_irq = -1;
90 EXPORT_SYMBOL(console_irq);
91
92 unsigned long elf_hwcap = 0;
93 char elf_platform[ELF_PLATFORM_SIZE];
94
95 struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
96
97 int __initdata memory_end_set;
98 unsigned long __initdata memory_end;
99
100 unsigned long VMALLOC_START;
101 EXPORT_SYMBOL(VMALLOC_START);
102
103 unsigned long VMALLOC_END;
104 EXPORT_SYMBOL(VMALLOC_END);
105
106 struct page *vmemmap;
107 EXPORT_SYMBOL(vmemmap);
108
109 /* An array with a pointer to the lowcore of every CPU. */
110 struct _lowcore *lowcore_ptr[NR_CPUS];
111 EXPORT_SYMBOL(lowcore_ptr);
112
113 /*
114 * This is set up by the setup-routine at boot-time
115 * for S390 need to find out, what we have to setup
116 * using address 0x10400 ...
117 */
118
119 #include <asm/setup.h>
120
121 /*
122 * condev= and conmode= setup parameter.
123 */
124
condev_setup(char * str)125 static int __init condev_setup(char *str)
126 {
127 int vdev;
128
129 vdev = simple_strtoul(str, &str, 0);
130 if (vdev >= 0 && vdev < 65536) {
131 console_devno = vdev;
132 console_irq = -1;
133 }
134 return 1;
135 }
136
137 __setup("condev=", condev_setup);
138
set_preferred_console(void)139 static void __init set_preferred_console(void)
140 {
141 if (MACHINE_IS_KVM)
142 add_preferred_console("hvc", 0, NULL);
143 else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
144 add_preferred_console("ttyS", 0, NULL);
145 else if (CONSOLE_IS_3270)
146 add_preferred_console("tty3270", 0, NULL);
147 }
148
conmode_setup(char * str)149 static int __init conmode_setup(char *str)
150 {
151 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
152 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
153 SET_CONSOLE_SCLP;
154 #endif
155 #if defined(CONFIG_TN3215_CONSOLE)
156 if (strncmp(str, "3215", 5) == 0)
157 SET_CONSOLE_3215;
158 #endif
159 #if defined(CONFIG_TN3270_CONSOLE)
160 if (strncmp(str, "3270", 5) == 0)
161 SET_CONSOLE_3270;
162 #endif
163 set_preferred_console();
164 return 1;
165 }
166
167 __setup("conmode=", conmode_setup);
168
conmode_default(void)169 static void __init conmode_default(void)
170 {
171 char query_buffer[1024];
172 char *ptr;
173
174 if (MACHINE_IS_VM) {
175 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
176 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
177 ptr = strstr(query_buffer, "SUBCHANNEL =");
178 console_irq = simple_strtoul(ptr + 13, NULL, 16);
179 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
180 ptr = strstr(query_buffer, "CONMODE");
181 /*
182 * Set the conmode to 3215 so that the device recognition
183 * will set the cu_type of the console to 3215. If the
184 * conmode is 3270 and we don't set it back then both
185 * 3215 and the 3270 driver will try to access the console
186 * device (3215 as console and 3270 as normal tty).
187 */
188 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
189 if (ptr == NULL) {
190 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
191 SET_CONSOLE_SCLP;
192 #endif
193 return;
194 }
195 if (strncmp(ptr + 8, "3270", 4) == 0) {
196 #if defined(CONFIG_TN3270_CONSOLE)
197 SET_CONSOLE_3270;
198 #elif defined(CONFIG_TN3215_CONSOLE)
199 SET_CONSOLE_3215;
200 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
201 SET_CONSOLE_SCLP;
202 #endif
203 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
204 #if defined(CONFIG_TN3215_CONSOLE)
205 SET_CONSOLE_3215;
206 #elif defined(CONFIG_TN3270_CONSOLE)
207 SET_CONSOLE_3270;
208 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
209 SET_CONSOLE_SCLP;
210 #endif
211 }
212 } else {
213 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
214 SET_CONSOLE_SCLP;
215 #endif
216 }
217 }
218
219 #ifdef CONFIG_ZFCPDUMP
setup_zfcpdump(unsigned int console_devno)220 static void __init setup_zfcpdump(unsigned int console_devno)
221 {
222 static char str[41];
223
224 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
225 return;
226 if (OLDMEM_BASE)
227 return;
228 if (console_devno != -1)
229 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
230 ipl_info.data.fcp.dev_id.devno, console_devno);
231 else
232 sprintf(str, " cio_ignore=all,!0.0.%04x",
233 ipl_info.data.fcp.dev_id.devno);
234 strcat(boot_command_line, str);
235 console_loglevel = 2;
236 }
237 #else
setup_zfcpdump(unsigned int console_devno)238 static inline void setup_zfcpdump(unsigned int console_devno) {}
239 #endif /* CONFIG_ZFCPDUMP */
240
241 /*
242 * Reboot, halt and power_off stubs. They just call _machine_restart,
243 * _machine_halt or _machine_power_off.
244 */
245
machine_restart(char * command)246 void machine_restart(char *command)
247 {
248 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
249 /*
250 * Only unblank the console if we are called in enabled
251 * context or a bust_spinlocks cleared the way for us.
252 */
253 console_unblank();
254 _machine_restart(command);
255 }
256
machine_halt(void)257 void machine_halt(void)
258 {
259 if (!in_interrupt() || oops_in_progress)
260 /*
261 * Only unblank the console if we are called in enabled
262 * context or a bust_spinlocks cleared the way for us.
263 */
264 console_unblank();
265 _machine_halt();
266 }
267
machine_power_off(void)268 void machine_power_off(void)
269 {
270 if (!in_interrupt() || oops_in_progress)
271 /*
272 * Only unblank the console if we are called in enabled
273 * context or a bust_spinlocks cleared the way for us.
274 */
275 console_unblank();
276 _machine_power_off();
277 }
278
279 /*
280 * Dummy power off function.
281 */
282 void (*pm_power_off)(void) = machine_power_off;
283
early_parse_mem(char * p)284 static int __init early_parse_mem(char *p)
285 {
286 memory_end = memparse(p, &p);
287 memory_end_set = 1;
288 return 0;
289 }
290 early_param("mem", early_parse_mem);
291
parse_vmalloc(char * arg)292 static int __init parse_vmalloc(char *arg)
293 {
294 if (!arg)
295 return -EINVAL;
296 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
297 return 0;
298 }
299 early_param("vmalloc", parse_vmalloc);
300
301 unsigned int user_mode = HOME_SPACE_MODE;
302 EXPORT_SYMBOL_GPL(user_mode);
303
set_amode_primary(void)304 static int set_amode_primary(void)
305 {
306 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
307 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
308 #ifdef CONFIG_COMPAT
309 psw32_user_bits =
310 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
311 #endif
312
313 if (MACHINE_HAS_MVCOS) {
314 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
315 return 1;
316 } else {
317 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
318 return 0;
319 }
320 }
321
322 /*
323 * Switch kernel/user addressing modes?
324 */
early_parse_switch_amode(char * p)325 static int __init early_parse_switch_amode(char *p)
326 {
327 user_mode = PRIMARY_SPACE_MODE;
328 return 0;
329 }
330 early_param("switch_amode", early_parse_switch_amode);
331
early_parse_user_mode(char * p)332 static int __init early_parse_user_mode(char *p)
333 {
334 if (p && strcmp(p, "primary") == 0)
335 user_mode = PRIMARY_SPACE_MODE;
336 else if (!p || strcmp(p, "home") == 0)
337 user_mode = HOME_SPACE_MODE;
338 else
339 return 1;
340 return 0;
341 }
342 early_param("user_mode", early_parse_user_mode);
343
setup_addressing_mode(void)344 static void setup_addressing_mode(void)
345 {
346 if (user_mode == PRIMARY_SPACE_MODE) {
347 if (set_amode_primary())
348 pr_info("Address spaces switched, "
349 "mvcos available\n");
350 else
351 pr_info("Address spaces switched, "
352 "mvcos not available\n");
353 }
354 }
355
356 void *restart_stack __attribute__((__section__(".data")));
357
setup_lowcore(void)358 static void __init setup_lowcore(void)
359 {
360 struct _lowcore *lc;
361
362 /*
363 * Setup lowcore for boot cpu
364 */
365 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
366 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
367 lc->restart_psw.mask = psw_kernel_bits;
368 lc->restart_psw.addr =
369 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
370 lc->external_new_psw.mask = psw_kernel_bits |
371 PSW_MASK_DAT | PSW_MASK_MCHECK;
372 lc->external_new_psw.addr =
373 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
374 lc->svc_new_psw.mask = psw_kernel_bits |
375 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
376 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
377 lc->program_new_psw.mask = psw_kernel_bits |
378 PSW_MASK_DAT | PSW_MASK_MCHECK;
379 lc->program_new_psw.addr =
380 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
381 lc->mcck_new_psw.mask = psw_kernel_bits;
382 lc->mcck_new_psw.addr =
383 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
384 lc->io_new_psw.mask = psw_kernel_bits |
385 PSW_MASK_DAT | PSW_MASK_MCHECK;
386 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
387 lc->clock_comparator = -1ULL;
388 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
389 lc->async_stack = (unsigned long)
390 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
391 lc->panic_stack = (unsigned long)
392 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
393 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
394 lc->thread_info = (unsigned long) &init_thread_union;
395 lc->machine_flags = S390_lowcore.machine_flags;
396 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
397 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
398 MAX_FACILITY_BIT/8);
399 #ifndef CONFIG_64BIT
400 if (MACHINE_HAS_IEEE) {
401 lc->extended_save_area_addr = (__u32)
402 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
403 /* enable extended save area */
404 __ctl_set_bit(14, 29);
405 }
406 #else
407 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
408 #endif
409 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
410 lc->async_enter_timer = S390_lowcore.async_enter_timer;
411 lc->exit_timer = S390_lowcore.exit_timer;
412 lc->user_timer = S390_lowcore.user_timer;
413 lc->system_timer = S390_lowcore.system_timer;
414 lc->steal_timer = S390_lowcore.steal_timer;
415 lc->last_update_timer = S390_lowcore.last_update_timer;
416 lc->last_update_clock = S390_lowcore.last_update_clock;
417 lc->ftrace_func = S390_lowcore.ftrace_func;
418
419 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
420 restart_stack += ASYNC_SIZE;
421
422 /*
423 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
424 * restart data to the absolute zero lowcore. This is necesary if
425 * PSW restart is done on an offline CPU that has lowcore zero.
426 */
427 lc->restart_stack = (unsigned long) restart_stack;
428 lc->restart_fn = (unsigned long) do_restart;
429 lc->restart_data = 0;
430 lc->restart_source = -1UL;
431 memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
432 4*sizeof(unsigned long));
433 copy_to_absolute_zero(&S390_lowcore.restart_psw,
434 &lc->restart_psw, sizeof(psw_t));
435
436 set_prefix((u32)(unsigned long) lc);
437 lowcore_ptr[0] = lc;
438 }
439
440 static struct resource code_resource = {
441 .name = "Kernel code",
442 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
443 };
444
445 static struct resource data_resource = {
446 .name = "Kernel data",
447 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
448 };
449
450 static struct resource bss_resource = {
451 .name = "Kernel bss",
452 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
453 };
454
455 static struct resource __initdata *standard_resources[] = {
456 &code_resource,
457 &data_resource,
458 &bss_resource,
459 };
460
setup_resources(void)461 static void __init setup_resources(void)
462 {
463 struct resource *res, *std_res, *sub_res;
464 int i, j;
465
466 code_resource.start = (unsigned long) &_text;
467 code_resource.end = (unsigned long) &_etext - 1;
468 data_resource.start = (unsigned long) &_etext;
469 data_resource.end = (unsigned long) &_edata - 1;
470 bss_resource.start = (unsigned long) &__bss_start;
471 bss_resource.end = (unsigned long) &__bss_stop - 1;
472
473 for (i = 0; i < MEMORY_CHUNKS; i++) {
474 if (!memory_chunk[i].size)
475 continue;
476 if (memory_chunk[i].type == CHUNK_OLDMEM ||
477 memory_chunk[i].type == CHUNK_CRASHK)
478 continue;
479 res = alloc_bootmem_low(sizeof(*res));
480 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
481 switch (memory_chunk[i].type) {
482 case CHUNK_READ_WRITE:
483 case CHUNK_CRASHK:
484 res->name = "System RAM";
485 break;
486 case CHUNK_READ_ONLY:
487 res->name = "System ROM";
488 res->flags |= IORESOURCE_READONLY;
489 break;
490 default:
491 res->name = "reserved";
492 }
493 res->start = memory_chunk[i].addr;
494 res->end = res->start + memory_chunk[i].size - 1;
495 request_resource(&iomem_resource, res);
496
497 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
498 std_res = standard_resources[j];
499 if (std_res->start < res->start ||
500 std_res->start > res->end)
501 continue;
502 if (std_res->end > res->end) {
503 sub_res = alloc_bootmem_low(sizeof(*sub_res));
504 *sub_res = *std_res;
505 sub_res->end = res->end;
506 std_res->start = res->end + 1;
507 request_resource(res, sub_res);
508 } else {
509 request_resource(res, std_res);
510 }
511 }
512 }
513 }
514
515 unsigned long real_memory_size;
516 EXPORT_SYMBOL_GPL(real_memory_size);
517
setup_memory_end(void)518 static void __init setup_memory_end(void)
519 {
520 unsigned long vmax, vmalloc_size, tmp;
521 int i;
522
523
524 #ifdef CONFIG_ZFCPDUMP
525 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
526 memory_end = ZFCPDUMP_HSA_SIZE;
527 memory_end_set = 1;
528 }
529 #endif
530 real_memory_size = 0;
531 memory_end &= PAGE_MASK;
532
533 /*
534 * Make sure all chunks are MAX_ORDER aligned so we don't need the
535 * extra checks that HOLES_IN_ZONE would require.
536 */
537 for (i = 0; i < MEMORY_CHUNKS; i++) {
538 unsigned long start, end;
539 struct mem_chunk *chunk;
540 unsigned long align;
541
542 chunk = &memory_chunk[i];
543 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
544 start = (chunk->addr + align - 1) & ~(align - 1);
545 end = (chunk->addr + chunk->size) & ~(align - 1);
546 if (start >= end)
547 memset(chunk, 0, sizeof(*chunk));
548 else {
549 chunk->addr = start;
550 chunk->size = end - start;
551 }
552 real_memory_size = max(real_memory_size,
553 chunk->addr + chunk->size);
554 }
555
556 /* Choose kernel address space layout: 2, 3, or 4 levels. */
557 #ifdef CONFIG_64BIT
558 vmalloc_size = VMALLOC_END ?: 128UL << 30;
559 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
560 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
561 if (tmp <= (1UL << 42))
562 vmax = 1UL << 42; /* 3-level kernel page table */
563 else
564 vmax = 1UL << 53; /* 4-level kernel page table */
565 #else
566 vmalloc_size = VMALLOC_END ?: 96UL << 20;
567 vmax = 1UL << 31; /* 2-level kernel page table */
568 #endif
569 /* vmalloc area is at the end of the kernel address space. */
570 VMALLOC_END = vmax;
571 VMALLOC_START = vmax - vmalloc_size;
572
573 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
574 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
575 tmp = VMALLOC_START - tmp * sizeof(struct page);
576 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
577 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
578 vmemmap = (struct page *) tmp;
579
580 /* Take care that memory_end is set and <= vmemmap */
581 memory_end = min(memory_end ?: real_memory_size, tmp);
582
583 /* Fixup memory chunk array to fit into 0..memory_end */
584 for (i = 0; i < MEMORY_CHUNKS; i++) {
585 struct mem_chunk *chunk = &memory_chunk[i];
586
587 if (chunk->addr >= memory_end) {
588 memset(chunk, 0, sizeof(*chunk));
589 continue;
590 }
591 if (chunk->addr + chunk->size > memory_end)
592 chunk->size = memory_end - chunk->addr;
593 }
594 }
595
setup_vmcoreinfo(void)596 static void __init setup_vmcoreinfo(void)
597 {
598 #ifdef CONFIG_KEXEC
599 unsigned long ptr = paddr_vmcoreinfo_note();
600
601 copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
602 #endif
603 }
604
605 #ifdef CONFIG_CRASH_DUMP
606
607 /*
608 * Find suitable location for crashkernel memory
609 */
find_crash_base(unsigned long crash_size,char ** msg)610 static unsigned long __init find_crash_base(unsigned long crash_size,
611 char **msg)
612 {
613 unsigned long crash_base;
614 struct mem_chunk *chunk;
615 int i;
616
617 if (memory_chunk[0].size < crash_size) {
618 *msg = "first memory chunk must be at least crashkernel size";
619 return 0;
620 }
621 if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
622 return OLDMEM_BASE;
623
624 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
625 chunk = &memory_chunk[i];
626 if (chunk->size == 0)
627 continue;
628 if (chunk->type != CHUNK_READ_WRITE)
629 continue;
630 if (chunk->size < crash_size)
631 continue;
632 crash_base = (chunk->addr + chunk->size) - crash_size;
633 if (crash_base < crash_size)
634 continue;
635 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
636 continue;
637 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
638 continue;
639 return crash_base;
640 }
641 *msg = "no suitable area found";
642 return 0;
643 }
644
645 /*
646 * Check if crash_base and crash_size is valid
647 */
verify_crash_base(unsigned long crash_base,unsigned long crash_size,char ** msg)648 static int __init verify_crash_base(unsigned long crash_base,
649 unsigned long crash_size,
650 char **msg)
651 {
652 struct mem_chunk *chunk;
653 int i;
654
655 /*
656 * Because we do the swap to zero, we must have at least 'crash_size'
657 * bytes free space before crash_base
658 */
659 if (crash_size > crash_base) {
660 *msg = "crashkernel offset must be greater than size";
661 return -EINVAL;
662 }
663
664 /* First memory chunk must be at least crash_size */
665 if (memory_chunk[0].size < crash_size) {
666 *msg = "first memory chunk must be at least crashkernel size";
667 return -EINVAL;
668 }
669 /* Check if we fit into the respective memory chunk */
670 for (i = 0; i < MEMORY_CHUNKS; i++) {
671 chunk = &memory_chunk[i];
672 if (chunk->size == 0)
673 continue;
674 if (crash_base < chunk->addr)
675 continue;
676 if (crash_base >= chunk->addr + chunk->size)
677 continue;
678 /* we have found the memory chunk */
679 if (crash_base + crash_size > chunk->addr + chunk->size) {
680 *msg = "selected memory chunk is too small for "
681 "crashkernel memory";
682 return -EINVAL;
683 }
684 return 0;
685 }
686 *msg = "invalid memory range specified";
687 return -EINVAL;
688 }
689
690 /*
691 * Reserve kdump memory by creating a memory hole in the mem_chunk array
692 */
reserve_kdump_bootmem(unsigned long addr,unsigned long size,int type)693 static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
694 int type)
695 {
696 create_mem_hole(memory_chunk, addr, size, type);
697 }
698
699 /*
700 * When kdump is enabled, we have to ensure that no memory from
701 * the area [0 - crashkernel memory size] and
702 * [crashk_res.start - crashk_res.end] is set offline.
703 */
kdump_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)704 static int kdump_mem_notifier(struct notifier_block *nb,
705 unsigned long action, void *data)
706 {
707 struct memory_notify *arg = data;
708
709 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
710 return NOTIFY_BAD;
711 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
712 return NOTIFY_OK;
713 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
714 return NOTIFY_OK;
715 return NOTIFY_BAD;
716 }
717
718 static struct notifier_block kdump_mem_nb = {
719 .notifier_call = kdump_mem_notifier,
720 };
721
722 #endif
723
724 /*
725 * Make sure that oldmem, where the dump is stored, is protected
726 */
reserve_oldmem(void)727 static void reserve_oldmem(void)
728 {
729 #ifdef CONFIG_CRASH_DUMP
730 if (!OLDMEM_BASE)
731 return;
732
733 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
734 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
735 CHUNK_OLDMEM);
736 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
737 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
738 else
739 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
740 #endif
741 }
742
743 /*
744 * Reserve memory for kdump kernel to be loaded with kexec
745 */
reserve_crashkernel(void)746 static void __init reserve_crashkernel(void)
747 {
748 #ifdef CONFIG_CRASH_DUMP
749 unsigned long long crash_base, crash_size;
750 char *msg = NULL;
751 int rc;
752
753 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
754 &crash_base);
755 if (rc || crash_size == 0)
756 return;
757 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
758 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
759 if (register_memory_notifier(&kdump_mem_nb))
760 return;
761 if (!crash_base)
762 crash_base = find_crash_base(crash_size, &msg);
763 if (!crash_base) {
764 pr_info("crashkernel reservation failed: %s\n", msg);
765 unregister_memory_notifier(&kdump_mem_nb);
766 return;
767 }
768 if (verify_crash_base(crash_base, crash_size, &msg)) {
769 pr_info("crashkernel reservation failed: %s\n", msg);
770 unregister_memory_notifier(&kdump_mem_nb);
771 return;
772 }
773 if (!OLDMEM_BASE && MACHINE_IS_VM)
774 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
775 crashk_res.start = crash_base;
776 crashk_res.end = crash_base + crash_size - 1;
777 insert_resource(&iomem_resource, &crashk_res);
778 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
779 pr_info("Reserving %lluMB of memory at %lluMB "
780 "for crashkernel (System RAM: %luMB)\n",
781 crash_size >> 20, crash_base >> 20, memory_end >> 20);
782 os_info_crashkernel_add(crash_base, crash_size);
783 #endif
784 }
785
setup_memory(void)786 static void __init setup_memory(void)
787 {
788 unsigned long bootmap_size;
789 unsigned long start_pfn, end_pfn;
790 int i;
791
792 /*
793 * partially used pages are not usable - thus
794 * we are rounding upwards:
795 */
796 start_pfn = PFN_UP(__pa(&_end));
797 end_pfn = max_pfn = PFN_DOWN(memory_end);
798
799 #ifdef CONFIG_BLK_DEV_INITRD
800 /*
801 * Move the initrd in case the bitmap of the bootmem allocater
802 * would overwrite it.
803 */
804
805 if (INITRD_START && INITRD_SIZE) {
806 unsigned long bmap_size;
807 unsigned long start;
808
809 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
810 bmap_size = PFN_PHYS(bmap_size);
811
812 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
813 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
814
815 #ifdef CONFIG_CRASH_DUMP
816 if (OLDMEM_BASE) {
817 /* Move initrd behind kdump oldmem */
818 if (start + INITRD_SIZE > OLDMEM_BASE &&
819 start < OLDMEM_BASE + OLDMEM_SIZE)
820 start = OLDMEM_BASE + OLDMEM_SIZE;
821 }
822 #endif
823 if (start + INITRD_SIZE > memory_end) {
824 pr_err("initrd extends beyond end of "
825 "memory (0x%08lx > 0x%08lx) "
826 "disabling initrd\n",
827 start + INITRD_SIZE, memory_end);
828 INITRD_START = INITRD_SIZE = 0;
829 } else {
830 pr_info("Moving initrd (0x%08lx -> "
831 "0x%08lx, size: %ld)\n",
832 INITRD_START, start, INITRD_SIZE);
833 memmove((void *) start, (void *) INITRD_START,
834 INITRD_SIZE);
835 INITRD_START = start;
836 }
837 }
838 }
839 #endif
840
841 /*
842 * Initialize the boot-time allocator
843 */
844 bootmap_size = init_bootmem(start_pfn, end_pfn);
845
846 /*
847 * Register RAM areas with the bootmem allocator.
848 */
849
850 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
851 unsigned long start_chunk, end_chunk, pfn;
852
853 if (memory_chunk[i].type != CHUNK_READ_WRITE &&
854 memory_chunk[i].type != CHUNK_CRASHK)
855 continue;
856 start_chunk = PFN_DOWN(memory_chunk[i].addr);
857 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
858 end_chunk = min(end_chunk, end_pfn);
859 if (start_chunk >= end_chunk)
860 continue;
861 memblock_add_node(PFN_PHYS(start_chunk),
862 PFN_PHYS(end_chunk - start_chunk), 0);
863 pfn = max(start_chunk, start_pfn);
864 for (; pfn < end_chunk; pfn++)
865 page_set_storage_key(PFN_PHYS(pfn),
866 PAGE_DEFAULT_KEY, 0);
867 }
868
869 psw_set_key(PAGE_DEFAULT_KEY);
870
871 free_bootmem_with_active_regions(0, max_pfn);
872
873 /*
874 * Reserve memory used for lowcore/command line/kernel image.
875 */
876 reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
877 reserve_bootmem((unsigned long)_stext,
878 PFN_PHYS(start_pfn) - (unsigned long)_stext,
879 BOOTMEM_DEFAULT);
880 /*
881 * Reserve the bootmem bitmap itself as well. We do this in two
882 * steps (first step was init_bootmem()) because this catches
883 * the (very unlikely) case of us accidentally initializing the
884 * bootmem allocator with an invalid RAM area.
885 */
886 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
887 BOOTMEM_DEFAULT);
888
889 #ifdef CONFIG_CRASH_DUMP
890 if (crashk_res.start)
891 reserve_bootmem(crashk_res.start,
892 crashk_res.end - crashk_res.start + 1,
893 BOOTMEM_DEFAULT);
894 if (is_kdump_kernel())
895 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
896 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
897 #endif
898 #ifdef CONFIG_BLK_DEV_INITRD
899 if (INITRD_START && INITRD_SIZE) {
900 if (INITRD_START + INITRD_SIZE <= memory_end) {
901 reserve_bootmem(INITRD_START, INITRD_SIZE,
902 BOOTMEM_DEFAULT);
903 initrd_start = INITRD_START;
904 initrd_end = initrd_start + INITRD_SIZE;
905 } else {
906 pr_err("initrd extends beyond end of "
907 "memory (0x%08lx > 0x%08lx) "
908 "disabling initrd\n",
909 initrd_start + INITRD_SIZE, memory_end);
910 initrd_start = initrd_end = 0;
911 }
912 }
913 #endif
914 }
915
916 /*
917 * Setup hardware capabilities.
918 */
setup_hwcaps(void)919 static void __init setup_hwcaps(void)
920 {
921 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
922 struct cpuid cpu_id;
923 int i;
924
925 /*
926 * The store facility list bits numbers as found in the principles
927 * of operation are numbered with bit 1UL<<31 as number 0 to
928 * bit 1UL<<0 as number 31.
929 * Bit 0: instructions named N3, "backported" to esa-mode
930 * Bit 2: z/Architecture mode is active
931 * Bit 7: the store-facility-list-extended facility is installed
932 * Bit 17: the message-security assist is installed
933 * Bit 19: the long-displacement facility is installed
934 * Bit 21: the extended-immediate facility is installed
935 * Bit 22: extended-translation facility 3 is installed
936 * Bit 30: extended-translation facility 3 enhancement facility
937 * These get translated to:
938 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
939 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
940 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
941 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
942 */
943 for (i = 0; i < 6; i++)
944 if (test_facility(stfl_bits[i]))
945 elf_hwcap |= 1UL << i;
946
947 if (test_facility(22) && test_facility(30))
948 elf_hwcap |= HWCAP_S390_ETF3EH;
949
950 /*
951 * Check for additional facilities with store-facility-list-extended.
952 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
953 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
954 * as stored by stfl, bits 32-xxx contain additional facilities.
955 * How many facility words are stored depends on the number of
956 * doublewords passed to the instruction. The additional facilities
957 * are:
958 * Bit 42: decimal floating point facility is installed
959 * Bit 44: perform floating point operation facility is installed
960 * translated to:
961 * HWCAP_S390_DFP bit 6 (42 && 44).
962 */
963 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
964 elf_hwcap |= HWCAP_S390_DFP;
965
966 /*
967 * Huge page support HWCAP_S390_HPAGE is bit 7.
968 */
969 if (MACHINE_HAS_HPAGE)
970 elf_hwcap |= HWCAP_S390_HPAGE;
971
972 /*
973 * 64-bit register support for 31-bit processes
974 * HWCAP_S390_HIGH_GPRS is bit 9.
975 */
976 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
977
978 get_cpu_id(&cpu_id);
979 switch (cpu_id.machine) {
980 case 0x9672:
981 #if !defined(CONFIG_64BIT)
982 default: /* Use "g5" as default for 31 bit kernels. */
983 #endif
984 strcpy(elf_platform, "g5");
985 break;
986 case 0x2064:
987 case 0x2066:
988 #if defined(CONFIG_64BIT)
989 default: /* Use "z900" as default for 64 bit kernels. */
990 #endif
991 strcpy(elf_platform, "z900");
992 break;
993 case 0x2084:
994 case 0x2086:
995 strcpy(elf_platform, "z990");
996 break;
997 case 0x2094:
998 case 0x2096:
999 strcpy(elf_platform, "z9-109");
1000 break;
1001 case 0x2097:
1002 case 0x2098:
1003 strcpy(elf_platform, "z10");
1004 break;
1005 case 0x2817:
1006 case 0x2818:
1007 strcpy(elf_platform, "z196");
1008 break;
1009 }
1010 }
1011
1012 /*
1013 * Setup function called from init/main.c just after the banner
1014 * was printed.
1015 */
1016
setup_arch(char ** cmdline_p)1017 void __init setup_arch(char **cmdline_p)
1018 {
1019 /*
1020 * print what head.S has found out about the machine
1021 */
1022 #ifndef CONFIG_64BIT
1023 if (MACHINE_IS_VM)
1024 pr_info("Linux is running as a z/VM "
1025 "guest operating system in 31-bit mode\n");
1026 else if (MACHINE_IS_LPAR)
1027 pr_info("Linux is running natively in 31-bit mode\n");
1028 if (MACHINE_HAS_IEEE)
1029 pr_info("The hardware system has IEEE compatible "
1030 "floating point units\n");
1031 else
1032 pr_info("The hardware system has no IEEE compatible "
1033 "floating point units\n");
1034 #else /* CONFIG_64BIT */
1035 if (MACHINE_IS_VM)
1036 pr_info("Linux is running as a z/VM "
1037 "guest operating system in 64-bit mode\n");
1038 else if (MACHINE_IS_KVM)
1039 pr_info("Linux is running under KVM in 64-bit mode\n");
1040 else if (MACHINE_IS_LPAR)
1041 pr_info("Linux is running natively in 64-bit mode\n");
1042 #endif /* CONFIG_64BIT */
1043
1044 /* Have one command line that is parsed and saved in /proc/cmdline */
1045 /* boot_command_line has been already set up in early.c */
1046 *cmdline_p = boot_command_line;
1047
1048 ROOT_DEV = Root_RAM0;
1049
1050 init_mm.start_code = PAGE_OFFSET;
1051 init_mm.end_code = (unsigned long) &_etext;
1052 init_mm.end_data = (unsigned long) &_edata;
1053 init_mm.brk = (unsigned long) &_end;
1054
1055 if (MACHINE_HAS_MVCOS)
1056 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
1057 else
1058 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
1059
1060 parse_early_param();
1061
1062 os_info_init();
1063 setup_ipl();
1064 setup_memory_end();
1065 setup_addressing_mode();
1066 reserve_oldmem();
1067 reserve_crashkernel();
1068 setup_memory();
1069 setup_resources();
1070 setup_vmcoreinfo();
1071 setup_lowcore();
1072
1073 cpu_init();
1074 s390_init_cpu_topology();
1075
1076 /*
1077 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
1078 */
1079 setup_hwcaps();
1080
1081 /*
1082 * Create kernel page tables and switch to virtual addressing.
1083 */
1084 paging_init();
1085
1086 /* Setup default console */
1087 conmode_default();
1088 set_preferred_console();
1089
1090 /* Setup zfcpdump support */
1091 setup_zfcpdump(console_devno);
1092 }
1093