1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/alpha/kernel/setup.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 */
7
8 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
9
10 /*
11 * Bootup setup stuff.
12 */
13
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/screen_info.h>
23 #include <linux/delay.h>
24 #include <linux/mc146818rtc.h>
25 #include <linux/console.h>
26 #include <linux/cpu.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/ioport.h>
31 #include <linux/platform_device.h>
32 #include <linux/memblock.h>
33 #include <linux/pci.h>
34 #include <linux/seq_file.h>
35 #include <linux/root_dev.h>
36 #include <linux/initrd.h>
37 #include <linux/eisa.h>
38 #include <linux/pfn.h>
39 #ifdef CONFIG_MAGIC_SYSRQ
40 #include <linux/sysrq.h>
41 #include <linux/reboot.h>
42 #endif
43 #include <linux/notifier.h>
44 #include <asm/setup.h>
45 #include <asm/io.h>
46 #include <linux/log2.h>
47 #include <linux/export.h>
48
49 extern struct atomic_notifier_head panic_notifier_list;
50 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
51 static struct notifier_block alpha_panic_block = {
52 alpha_panic_event,
53 NULL,
54 INT_MAX /* try to do it first */
55 };
56
57 #include <linux/uaccess.h>
58 #include <asm/hwrpb.h>
59 #include <asm/dma.h>
60 #include <asm/mmu_context.h>
61 #include <asm/console.h>
62
63 #include "proto.h"
64 #include "pci_impl.h"
65
66
67 struct hwrpb_struct *hwrpb;
68 EXPORT_SYMBOL(hwrpb);
69 unsigned long srm_hae;
70
71 int alpha_l1i_cacheshape;
72 int alpha_l1d_cacheshape;
73 int alpha_l2_cacheshape;
74 int alpha_l3_cacheshape;
75
76 #ifdef CONFIG_VERBOSE_MCHECK
77 /* 0=minimum, 1=verbose, 2=all */
78 /* These can be overridden via the command line, ie "verbose_mcheck=2") */
79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80 #endif
81
82 #ifdef CONFIG_NUMA
83 struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84 EXPORT_SYMBOL(node_to_cpumask_map);
85 #endif
86
87 /* Which processor we booted from. */
88 int boot_cpuid;
89
90 /*
91 * Using SRM callbacks for initial console output. This works from
92 * setup_arch() time through the end of time_init(), as those places
93 * are under our (Alpha) control.
94
95 * "srmcons" specified in the boot command arguments allows us to
96 * see kernel messages during the period of time before the true
97 * console device is "registered" during console_init().
98 * As of this version (2.5.59), console_init() will call
99 * disable_early_printk() as the last action before initializing
100 * the console drivers. That's the last possible time srmcons can be
101 * unregistered without interfering with console behavior.
102 *
103 * By default, OFF; set it with a bootcommand arg of "srmcons" or
104 * "console=srm". The meaning of these two args is:
105 * "srmcons" - early callback prints
106 * "console=srm" - full callback based console, including early prints
107 */
108 int srmcons_output = 0;
109
110 /* Enforce a memory size limit; useful for testing. By default, none. */
111 unsigned long mem_size_limit = 0;
112
113 /* Set AGP GART window size (0 means disabled). */
114 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
115
116 #ifdef CONFIG_ALPHA_GENERIC
117 struct alpha_machine_vector alpha_mv;
118 EXPORT_SYMBOL(alpha_mv);
119 #endif
120
121 #ifndef alpha_using_srm
122 int alpha_using_srm;
123 EXPORT_SYMBOL(alpha_using_srm);
124 #endif
125
126 #ifndef alpha_using_qemu
127 int alpha_using_qemu;
128 #endif
129
130 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
131 unsigned long);
132 static struct alpha_machine_vector *get_sysvec_byname(const char *);
133 static void get_sysnames(unsigned long, unsigned long, unsigned long,
134 char **, char **);
135 static void determine_cpu_caches (unsigned int);
136
137 static char __initdata command_line[COMMAND_LINE_SIZE];
138
139 /*
140 * The format of "screen_info" is strange, and due to early
141 * i386-setup code. This is just enough to make the console
142 * code think we're on a VGA color display.
143 */
144
145 struct screen_info screen_info = {
146 .orig_x = 0,
147 .orig_y = 25,
148 .orig_video_cols = 80,
149 .orig_video_lines = 25,
150 .orig_video_isVGA = 1,
151 .orig_video_points = 16
152 };
153
154 EXPORT_SYMBOL(screen_info);
155
156 /*
157 * The direct map I/O window, if any. This should be the same
158 * for all busses, since it's used by virt_to_bus.
159 */
160
161 unsigned long __direct_map_base;
162 unsigned long __direct_map_size;
163 EXPORT_SYMBOL(__direct_map_base);
164 EXPORT_SYMBOL(__direct_map_size);
165
166 /*
167 * Declare all of the machine vectors.
168 */
169
170 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either
171 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
172 to the assembler. */
173
174 #define WEAK(X) \
175 extern struct alpha_machine_vector X; \
176 asm(".weak "#X)
177
178 WEAK(alcor_mv);
179 WEAK(alphabook1_mv);
180 WEAK(avanti_mv);
181 WEAK(cabriolet_mv);
182 WEAK(clipper_mv);
183 WEAK(dp264_mv);
184 WEAK(eb164_mv);
185 WEAK(eb64p_mv);
186 WEAK(eb66_mv);
187 WEAK(eb66p_mv);
188 WEAK(eiger_mv);
189 WEAK(jensen_mv);
190 WEAK(lx164_mv);
191 WEAK(lynx_mv);
192 WEAK(marvel_ev7_mv);
193 WEAK(miata_mv);
194 WEAK(mikasa_mv);
195 WEAK(mikasa_primo_mv);
196 WEAK(monet_mv);
197 WEAK(nautilus_mv);
198 WEAK(noname_mv);
199 WEAK(noritake_mv);
200 WEAK(noritake_primo_mv);
201 WEAK(p2k_mv);
202 WEAK(pc164_mv);
203 WEAK(privateer_mv);
204 WEAK(rawhide_mv);
205 WEAK(ruffian_mv);
206 WEAK(rx164_mv);
207 WEAK(sable_mv);
208 WEAK(sable_gamma_mv);
209 WEAK(shark_mv);
210 WEAK(sx164_mv);
211 WEAK(takara_mv);
212 WEAK(titan_mv);
213 WEAK(webbrick_mv);
214 WEAK(wildfire_mv);
215 WEAK(xl_mv);
216 WEAK(xlt_mv);
217
218 #undef WEAK
219
220 /*
221 * I/O resources inherited from PeeCees. Except for perhaps the
222 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
223 *
224 * ??? If this becomes less standard, move the struct out into the
225 * machine vector.
226 */
227
228 static void __init
reserve_std_resources(void)229 reserve_std_resources(void)
230 {
231 static struct resource standard_io_resources[] = {
232 { .name = "rtc", .start = -1, .end = -1 },
233 { .name = "dma1", .start = 0x00, .end = 0x1f },
234 { .name = "pic1", .start = 0x20, .end = 0x3f },
235 { .name = "timer", .start = 0x40, .end = 0x5f },
236 { .name = "keyboard", .start = 0x60, .end = 0x6f },
237 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
238 { .name = "pic2", .start = 0xa0, .end = 0xbf },
239 { .name = "dma2", .start = 0xc0, .end = 0xdf },
240 };
241
242 struct resource *io = &ioport_resource;
243 size_t i;
244
245 if (hose_head) {
246 struct pci_controller *hose;
247 for (hose = hose_head; hose; hose = hose->next)
248 if (hose->index == 0) {
249 io = hose->io_space;
250 break;
251 }
252 }
253
254 /* Fix up for the Jensen's queer RTC placement. */
255 standard_io_resources[0].start = RTC_PORT(0);
256 standard_io_resources[0].end = RTC_PORT(0) + 0x0f;
257
258 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
259 request_resource(io, standard_io_resources+i);
260 }
261
262 #define PFN_MAX PFN_DOWN(0x80000000)
263 #define for_each_mem_cluster(memdesc, _cluster, i) \
264 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
265 (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
266
267 static unsigned long __init
get_mem_size_limit(char * s)268 get_mem_size_limit(char *s)
269 {
270 unsigned long end = 0;
271 char *from = s;
272
273 end = simple_strtoul(from, &from, 0);
274 if ( *from == 'K' || *from == 'k' ) {
275 end = end << 10;
276 from++;
277 } else if ( *from == 'M' || *from == 'm' ) {
278 end = end << 20;
279 from++;
280 } else if ( *from == 'G' || *from == 'g' ) {
281 end = end << 30;
282 from++;
283 }
284 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
285 }
286
287 #ifdef CONFIG_BLK_DEV_INITRD
288 void * __init
move_initrd(unsigned long mem_limit)289 move_initrd(unsigned long mem_limit)
290 {
291 void *start;
292 unsigned long size;
293
294 size = initrd_end - initrd_start;
295 start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
296 if (!start || __pa(start) + size > mem_limit) {
297 initrd_start = initrd_end = 0;
298 return NULL;
299 }
300 memmove(start, (void *)initrd_start, size);
301 initrd_start = (unsigned long)start;
302 initrd_end = initrd_start + size;
303 printk("initrd moved to %p\n", start);
304 return start;
305 }
306 #endif
307
308 #ifndef CONFIG_DISCONTIGMEM
309 static void __init
setup_memory(void * kernel_end)310 setup_memory(void *kernel_end)
311 {
312 struct memclust_struct * cluster;
313 struct memdesc_struct * memdesc;
314 unsigned long kernel_size;
315 unsigned long i;
316
317 /* Find free clusters, and init and free the bootmem accordingly. */
318 memdesc = (struct memdesc_struct *)
319 (hwrpb->mddt_offset + (unsigned long) hwrpb);
320
321 for_each_mem_cluster(memdesc, cluster, i) {
322 unsigned long end;
323
324 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
325 i, cluster->usage, cluster->start_pfn,
326 cluster->start_pfn + cluster->numpages);
327
328 end = cluster->start_pfn + cluster->numpages;
329 if (end > max_low_pfn)
330 max_low_pfn = end;
331
332 memblock_add(PFN_PHYS(cluster->start_pfn),
333 cluster->numpages << PAGE_SHIFT);
334
335 /* Bit 0 is console/PALcode reserved. Bit 1 is
336 non-volatile memory -- we might want to mark
337 this for later. */
338 if (cluster->usage & 3)
339 memblock_reserve(PFN_PHYS(cluster->start_pfn),
340 cluster->numpages << PAGE_SHIFT);
341 }
342
343 /*
344 * Except for the NUMA systems (wildfire, marvel) all of the
345 * Alpha systems we run on support 32GB of memory or less.
346 * Since the NUMA systems introduce large holes in memory addressing,
347 * we can get into a situation where there is not enough contiguous
348 * memory for the memory map.
349 *
350 * Limit memory to the first 32GB to limit the NUMA systems to
351 * memory on their first node (wildfire) or 2 (marvel) to avoid
352 * not being able to produce the memory map. In order to access
353 * all of the memory on the NUMA systems, build with discontiguous
354 * memory support.
355 *
356 * If the user specified a memory limit, let that memory limit stand.
357 */
358 if (!mem_size_limit)
359 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
360
361 if (mem_size_limit && max_low_pfn >= mem_size_limit)
362 {
363 printk("setup: forcing memory size to %ldK (from %ldK).\n",
364 mem_size_limit << (PAGE_SHIFT - 10),
365 max_low_pfn << (PAGE_SHIFT - 10));
366 max_low_pfn = mem_size_limit;
367 }
368
369 /* Reserve the kernel memory. */
370 kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
371 memblock_reserve(KERNEL_START_PHYS, kernel_size);
372
373 #ifdef CONFIG_BLK_DEV_INITRD
374 initrd_start = INITRD_START;
375 if (initrd_start) {
376 initrd_end = initrd_start+INITRD_SIZE;
377 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
378 (void *) initrd_start, INITRD_SIZE);
379
380 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
381 if (!move_initrd(PFN_PHYS(max_low_pfn)))
382 printk("initrd extends beyond end of memory "
383 "(0x%08lx > 0x%p)\ndisabling initrd\n",
384 initrd_end,
385 phys_to_virt(PFN_PHYS(max_low_pfn)));
386 } else {
387 memblock_reserve(virt_to_phys((void *)initrd_start),
388 INITRD_SIZE);
389 }
390 }
391 #endif /* CONFIG_BLK_DEV_INITRD */
392 }
393 #else
394 extern void setup_memory(void *);
395 #endif /* !CONFIG_DISCONTIGMEM */
396
397 int __init
page_is_ram(unsigned long pfn)398 page_is_ram(unsigned long pfn)
399 {
400 struct memclust_struct * cluster;
401 struct memdesc_struct * memdesc;
402 unsigned long i;
403
404 memdesc = (struct memdesc_struct *)
405 (hwrpb->mddt_offset + (unsigned long) hwrpb);
406 for_each_mem_cluster(memdesc, cluster, i)
407 {
408 if (pfn >= cluster->start_pfn &&
409 pfn < cluster->start_pfn + cluster->numpages) {
410 return (cluster->usage & 3) ? 0 : 1;
411 }
412 }
413
414 return 0;
415 }
416
417 static int __init
register_cpus(void)418 register_cpus(void)
419 {
420 int i;
421
422 for_each_possible_cpu(i) {
423 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
424 if (!p)
425 return -ENOMEM;
426 register_cpu(p, i);
427 }
428 return 0;
429 }
430
431 arch_initcall(register_cpus);
432
433 #ifdef CONFIG_MAGIC_SYSRQ
sysrq_reboot_handler(int unused)434 static void sysrq_reboot_handler(int unused)
435 {
436 machine_halt();
437 }
438
439 static const struct sysrq_key_op srm_sysrq_reboot_op = {
440 .handler = sysrq_reboot_handler,
441 .help_msg = "reboot(b)",
442 .action_msg = "Resetting",
443 .enable_mask = SYSRQ_ENABLE_BOOT,
444 };
445 #endif
446
447 void __init
setup_arch(char ** cmdline_p)448 setup_arch(char **cmdline_p)
449 {
450 extern char _end[];
451
452 struct alpha_machine_vector *vec = NULL;
453 struct percpu_struct *cpu;
454 char *type_name, *var_name, *p;
455 void *kernel_end = _end; /* end of kernel */
456 char *args = command_line;
457
458 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
459 boot_cpuid = hard_smp_processor_id();
460
461 /*
462 * Pre-process the system type to make sure it will be valid.
463 *
464 * This may restore real CABRIO and EB66+ family names, ie
465 * EB64+ and EB66.
466 *
467 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
468 * and AS1200 (DIGITAL Server 5000 series) have the type as
469 * the negative of the real one.
470 */
471 if ((long)hwrpb->sys_type < 0) {
472 hwrpb->sys_type = -((long)hwrpb->sys_type);
473 hwrpb_update_checksum(hwrpb);
474 }
475
476 /* Register a call for panic conditions. */
477 atomic_notifier_chain_register(&panic_notifier_list,
478 &alpha_panic_block);
479
480 #ifndef alpha_using_srm
481 /* Assume that we've booted from SRM if we haven't booted from MILO.
482 Detect the later by looking for "MILO" in the system serial nr. */
483 alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO");
484 #endif
485 #ifndef alpha_using_qemu
486 /* Similarly, look for QEMU. */
487 alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
488 #endif
489
490 /* If we are using SRM, we want to allow callbacks
491 as early as possible, so do this NOW, and then
492 they should work immediately thereafter.
493 */
494 kernel_end = callback_init(kernel_end);
495
496 /*
497 * Locate the command line.
498 */
499 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
500 boot flags depending on the boot mode, we need some shorthand.
501 This should do for installation. */
502 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
503 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
504 } else {
505 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
506 }
507 strcpy(boot_command_line, command_line);
508 *cmdline_p = command_line;
509
510 /*
511 * Process command-line arguments.
512 */
513 while ((p = strsep(&args, " \t")) != NULL) {
514 if (!*p) continue;
515 if (strncmp(p, "alpha_mv=", 9) == 0) {
516 vec = get_sysvec_byname(p+9);
517 continue;
518 }
519 if (strncmp(p, "cycle=", 6) == 0) {
520 est_cycle_freq = simple_strtol(p+6, NULL, 0);
521 continue;
522 }
523 if (strncmp(p, "mem=", 4) == 0) {
524 mem_size_limit = get_mem_size_limit(p+4);
525 continue;
526 }
527 if (strncmp(p, "srmcons", 7) == 0) {
528 srmcons_output |= 1;
529 continue;
530 }
531 if (strncmp(p, "console=srm", 11) == 0) {
532 srmcons_output |= 2;
533 continue;
534 }
535 if (strncmp(p, "gartsize=", 9) == 0) {
536 alpha_agpgart_size =
537 get_mem_size_limit(p+9) << PAGE_SHIFT;
538 continue;
539 }
540 #ifdef CONFIG_VERBOSE_MCHECK
541 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
542 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
543 continue;
544 }
545 #endif
546 }
547
548 /* Replace the command line, now that we've killed it with strsep. */
549 strcpy(command_line, boot_command_line);
550
551 /* If we want SRM console printk echoing early, do it now. */
552 if (alpha_using_srm && srmcons_output) {
553 register_srm_console();
554
555 /*
556 * If "console=srm" was specified, clear the srmcons_output
557 * flag now so that time.c won't unregister_srm_console
558 */
559 if (srmcons_output & 2)
560 srmcons_output = 0;
561 }
562
563 #ifdef CONFIG_MAGIC_SYSRQ
564 /* If we're using SRM, make sysrq-b halt back to the prom,
565 not auto-reboot. */
566 if (alpha_using_srm) {
567 unregister_sysrq_key('b', __sysrq_reboot_op);
568 register_sysrq_key('b', &srm_sysrq_reboot_op);
569 }
570 #endif
571
572 /*
573 * Identify and reconfigure for the current system.
574 */
575 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
576
577 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
578 cpu->type, &type_name, &var_name);
579 if (*var_name == '0')
580 var_name = "";
581
582 if (!vec) {
583 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
584 cpu->type);
585 }
586
587 if (!vec) {
588 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
589 type_name, (*var_name ? " variation " : ""), var_name,
590 hwrpb->sys_type, hwrpb->sys_variation);
591 }
592 if (vec != &alpha_mv) {
593 alpha_mv = *vec;
594 }
595
596 printk("Booting "
597 #ifdef CONFIG_ALPHA_GENERIC
598 "GENERIC "
599 #endif
600 "on %s%s%s using machine vector %s from %s\n",
601 type_name, (*var_name ? " variation " : ""),
602 var_name, alpha_mv.vector_name,
603 (alpha_using_srm ? "SRM" : "MILO"));
604
605 printk("Major Options: "
606 #ifdef CONFIG_SMP
607 "SMP "
608 #endif
609 #ifdef CONFIG_ALPHA_EV56
610 "EV56 "
611 #endif
612 #ifdef CONFIG_ALPHA_EV67
613 "EV67 "
614 #endif
615 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
616 "LEGACY_START "
617 #endif
618 #ifdef CONFIG_VERBOSE_MCHECK
619 "VERBOSE_MCHECK "
620 #endif
621
622 #ifdef CONFIG_DISCONTIGMEM
623 "DISCONTIGMEM "
624 #ifdef CONFIG_NUMA
625 "NUMA "
626 #endif
627 #endif
628
629 #ifdef CONFIG_DEBUG_SPINLOCK
630 "DEBUG_SPINLOCK "
631 #endif
632 #ifdef CONFIG_MAGIC_SYSRQ
633 "MAGIC_SYSRQ "
634 #endif
635 "\n");
636
637 printk("Command line: %s\n", command_line);
638
639 /*
640 * Sync up the HAE.
641 * Save the SRM's current value for restoration.
642 */
643 srm_hae = *alpha_mv.hae_register;
644 __set_hae(alpha_mv.hae_cache);
645
646 /* Reset enable correctable error reports. */
647 wrmces(0x7);
648
649 /* Find our memory. */
650 setup_memory(kernel_end);
651 memblock_set_bottom_up(true);
652
653 /* First guess at cpu cache sizes. Do this before init_arch. */
654 determine_cpu_caches(cpu->type);
655
656 /* Initialize the machine. Usually has to do with setting up
657 DMA windows and the like. */
658 if (alpha_mv.init_arch)
659 alpha_mv.init_arch();
660
661 /* Reserve standard resources. */
662 reserve_std_resources();
663
664 /*
665 * Give us a default console. TGA users will see nothing until
666 * chr_dev_init is called, rather late in the boot sequence.
667 */
668
669 #ifdef CONFIG_VT
670 #if defined(CONFIG_VGA_CONSOLE)
671 conswitchp = &vga_con;
672 #endif
673 #endif
674
675 /* Default root filesystem to sda2. */
676 ROOT_DEV = Root_SDA2;
677
678 #ifdef CONFIG_EISA
679 /* FIXME: only set this when we actually have EISA in this box? */
680 EISA_bus = 1;
681 #endif
682
683 /*
684 * Check ASN in HWRPB for validity, report if bad.
685 * FIXME: how was this failing? Should we trust it instead,
686 * and copy the value into alpha_mv.max_asn?
687 */
688
689 if (hwrpb->max_asn != MAX_ASN) {
690 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
691 }
692
693 /*
694 * Identify the flock of penguins.
695 */
696
697 #ifdef CONFIG_SMP
698 setup_smp();
699 #endif
700 paging_init();
701 }
702
703 static char sys_unknown[] = "Unknown";
704 static char systype_names[][16] = {
705 "0",
706 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
707 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
708 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
709 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
710 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
711 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
712 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
713 };
714
715 static char unofficial_names[][8] = {"100", "Ruffian"};
716
717 static char api_names[][16] = {"200", "Nautilus"};
718
719 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
720 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
721
722 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
723 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
724
725 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
726 static int eb64p_indices[] = {0,0,1,2};
727
728 static char eb66_names[][8] = {"EB66", "EB66+"};
729 static int eb66_indices[] = {0,0,1};
730
731 static char marvel_names[][16] = {
732 "Marvel/EV7"
733 };
734 static int marvel_indices[] = { 0 };
735
736 static char rawhide_names[][16] = {
737 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
738 };
739 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
740
741 static char titan_names[][16] = {
742 "DEFAULT", "Privateer", "Falcon", "Granite"
743 };
744 static int titan_indices[] = {0,1,2,2,3};
745
746 static char tsunami_names[][16] = {
747 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
748 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
749 "Flying Clipper", "Shark"
750 };
751 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
752
753 static struct alpha_machine_vector * __init
get_sysvec(unsigned long type,unsigned long variation,unsigned long cpu)754 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
755 {
756 static struct alpha_machine_vector *systype_vecs[] __initdata =
757 {
758 NULL, /* 0 */
759 NULL, /* ADU */
760 NULL, /* Cobra */
761 NULL, /* Ruby */
762 NULL, /* Flamingo */
763 NULL, /* Mannequin */
764 &jensen_mv,
765 NULL, /* Pelican */
766 NULL, /* Morgan */
767 NULL, /* Sable -- see below. */
768 NULL, /* Medulla */
769 &noname_mv,
770 NULL, /* Turbolaser */
771 &avanti_mv,
772 NULL, /* Mustang */
773 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
774 NULL, /* Tradewind */
775 NULL, /* Mikasa -- see below. */
776 NULL, /* EB64 */
777 NULL, /* EB66 -- see variation. */
778 NULL, /* EB64+ -- see variation. */
779 &alphabook1_mv,
780 &rawhide_mv,
781 NULL, /* K2 */
782 &lynx_mv, /* Lynx */
783 &xl_mv,
784 NULL, /* EB164 -- see variation. */
785 NULL, /* Noritake -- see below. */
786 NULL, /* Cortex */
787 NULL, /* 29 */
788 &miata_mv,
789 NULL, /* XXM */
790 &takara_mv,
791 NULL, /* Yukon */
792 NULL, /* Tsunami -- see variation. */
793 &wildfire_mv, /* Wildfire */
794 NULL, /* CUSCO */
795 &eiger_mv, /* Eiger */
796 NULL, /* Titan */
797 NULL, /* Marvel */
798 };
799
800 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
801 {
802 NULL, /* 100 */
803 &ruffian_mv,
804 };
805
806 static struct alpha_machine_vector *api_vecs[] __initdata =
807 {
808 NULL, /* 200 */
809 &nautilus_mv,
810 };
811
812 static struct alpha_machine_vector *alcor_vecs[] __initdata =
813 {
814 &alcor_mv, &xlt_mv, &xlt_mv
815 };
816
817 static struct alpha_machine_vector *eb164_vecs[] __initdata =
818 {
819 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
820 };
821
822 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
823 {
824 &eb64p_mv,
825 &cabriolet_mv,
826 &cabriolet_mv /* AlphaPCI64 */
827 };
828
829 static struct alpha_machine_vector *eb66_vecs[] __initdata =
830 {
831 &eb66_mv,
832 &eb66p_mv
833 };
834
835 static struct alpha_machine_vector *marvel_vecs[] __initdata =
836 {
837 &marvel_ev7_mv,
838 };
839
840 static struct alpha_machine_vector *titan_vecs[] __initdata =
841 {
842 &titan_mv, /* default */
843 &privateer_mv, /* privateer */
844 &titan_mv, /* falcon */
845 &privateer_mv, /* granite */
846 };
847
848 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
849 {
850 NULL,
851 &dp264_mv, /* dp264 */
852 &dp264_mv, /* warhol */
853 &dp264_mv, /* windjammer */
854 &monet_mv, /* monet */
855 &clipper_mv, /* clipper */
856 &dp264_mv, /* goldrush */
857 &webbrick_mv, /* webbrick */
858 &dp264_mv, /* catamaran */
859 NULL, /* brisbane? */
860 NULL, /* melbourne? */
861 NULL, /* flying clipper? */
862 &shark_mv, /* shark */
863 };
864
865 /* ??? Do we need to distinguish between Rawhides? */
866
867 struct alpha_machine_vector *vec;
868
869 /* Search the system tables first... */
870 vec = NULL;
871 if (type < ARRAY_SIZE(systype_vecs)) {
872 vec = systype_vecs[type];
873 } else if ((type > ST_API_BIAS) &&
874 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
875 vec = api_vecs[type - ST_API_BIAS];
876 } else if ((type > ST_UNOFFICIAL_BIAS) &&
877 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
878 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
879 }
880
881 /* If we've not found one, try for a variation. */
882
883 if (!vec) {
884 /* Member ID is a bit-field. */
885 unsigned long member = (variation >> 10) & 0x3f;
886
887 cpu &= 0xffffffff; /* make it usable */
888
889 switch (type) {
890 case ST_DEC_ALCOR:
891 if (member < ARRAY_SIZE(alcor_indices))
892 vec = alcor_vecs[alcor_indices[member]];
893 break;
894 case ST_DEC_EB164:
895 if (member < ARRAY_SIZE(eb164_indices))
896 vec = eb164_vecs[eb164_indices[member]];
897 /* PC164 may show as EB164 variation with EV56 CPU,
898 but, since no true EB164 had anything but EV5... */
899 if (vec == &eb164_mv && cpu == EV56_CPU)
900 vec = &pc164_mv;
901 break;
902 case ST_DEC_EB64P:
903 if (member < ARRAY_SIZE(eb64p_indices))
904 vec = eb64p_vecs[eb64p_indices[member]];
905 break;
906 case ST_DEC_EB66:
907 if (member < ARRAY_SIZE(eb66_indices))
908 vec = eb66_vecs[eb66_indices[member]];
909 break;
910 case ST_DEC_MARVEL:
911 if (member < ARRAY_SIZE(marvel_indices))
912 vec = marvel_vecs[marvel_indices[member]];
913 break;
914 case ST_DEC_TITAN:
915 vec = titan_vecs[0]; /* default */
916 if (member < ARRAY_SIZE(titan_indices))
917 vec = titan_vecs[titan_indices[member]];
918 break;
919 case ST_DEC_TSUNAMI:
920 if (member < ARRAY_SIZE(tsunami_indices))
921 vec = tsunami_vecs[tsunami_indices[member]];
922 break;
923 case ST_DEC_1000:
924 if (cpu == EV5_CPU || cpu == EV56_CPU)
925 vec = &mikasa_primo_mv;
926 else
927 vec = &mikasa_mv;
928 break;
929 case ST_DEC_NORITAKE:
930 if (cpu == EV5_CPU || cpu == EV56_CPU)
931 vec = &noritake_primo_mv;
932 else
933 vec = &noritake_mv;
934 break;
935 case ST_DEC_2100_A500:
936 if (cpu == EV5_CPU || cpu == EV56_CPU)
937 vec = &sable_gamma_mv;
938 else
939 vec = &sable_mv;
940 break;
941 }
942 }
943 return vec;
944 }
945
946 static struct alpha_machine_vector * __init
get_sysvec_byname(const char * name)947 get_sysvec_byname(const char *name)
948 {
949 static struct alpha_machine_vector *all_vecs[] __initdata =
950 {
951 &alcor_mv,
952 &alphabook1_mv,
953 &avanti_mv,
954 &cabriolet_mv,
955 &clipper_mv,
956 &dp264_mv,
957 &eb164_mv,
958 &eb64p_mv,
959 &eb66_mv,
960 &eb66p_mv,
961 &eiger_mv,
962 &jensen_mv,
963 &lx164_mv,
964 &lynx_mv,
965 &miata_mv,
966 &mikasa_mv,
967 &mikasa_primo_mv,
968 &monet_mv,
969 &nautilus_mv,
970 &noname_mv,
971 &noritake_mv,
972 &noritake_primo_mv,
973 &p2k_mv,
974 &pc164_mv,
975 &privateer_mv,
976 &rawhide_mv,
977 &ruffian_mv,
978 &rx164_mv,
979 &sable_mv,
980 &sable_gamma_mv,
981 &shark_mv,
982 &sx164_mv,
983 &takara_mv,
984 &webbrick_mv,
985 &wildfire_mv,
986 &xl_mv,
987 &xlt_mv
988 };
989
990 size_t i;
991
992 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
993 struct alpha_machine_vector *mv = all_vecs[i];
994 if (strcasecmp(mv->vector_name, name) == 0)
995 return mv;
996 }
997 return NULL;
998 }
999
1000 static void
get_sysnames(unsigned long type,unsigned long variation,unsigned long cpu,char ** type_name,char ** variation_name)1001 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1002 char **type_name, char **variation_name)
1003 {
1004 unsigned long member;
1005
1006 /* If not in the tables, make it UNKNOWN,
1007 else set type name to family */
1008 if (type < ARRAY_SIZE(systype_names)) {
1009 *type_name = systype_names[type];
1010 } else if ((type > ST_API_BIAS) &&
1011 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1012 *type_name = api_names[type - ST_API_BIAS];
1013 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1014 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1015 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1016 } else {
1017 *type_name = sys_unknown;
1018 *variation_name = sys_unknown;
1019 return;
1020 }
1021
1022 /* Set variation to "0"; if variation is zero, done. */
1023 *variation_name = systype_names[0];
1024 if (variation == 0) {
1025 return;
1026 }
1027
1028 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1029
1030 cpu &= 0xffffffff; /* make it usable */
1031
1032 switch (type) { /* select by family */
1033 default: /* default to variation "0" for now */
1034 break;
1035 case ST_DEC_EB164:
1036 if (member >= ARRAY_SIZE(eb164_indices))
1037 break;
1038 *variation_name = eb164_names[eb164_indices[member]];
1039 /* PC164 may show as EB164 variation, but with EV56 CPU,
1040 so, since no true EB164 had anything but EV5... */
1041 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1042 *variation_name = eb164_names[1]; /* make it PC164 */
1043 break;
1044 case ST_DEC_ALCOR:
1045 if (member < ARRAY_SIZE(alcor_indices))
1046 *variation_name = alcor_names[alcor_indices[member]];
1047 break;
1048 case ST_DEC_EB64P:
1049 if (member < ARRAY_SIZE(eb64p_indices))
1050 *variation_name = eb64p_names[eb64p_indices[member]];
1051 break;
1052 case ST_DEC_EB66:
1053 if (member < ARRAY_SIZE(eb66_indices))
1054 *variation_name = eb66_names[eb66_indices[member]];
1055 break;
1056 case ST_DEC_MARVEL:
1057 if (member < ARRAY_SIZE(marvel_indices))
1058 *variation_name = marvel_names[marvel_indices[member]];
1059 break;
1060 case ST_DEC_RAWHIDE:
1061 if (member < ARRAY_SIZE(rawhide_indices))
1062 *variation_name = rawhide_names[rawhide_indices[member]];
1063 break;
1064 case ST_DEC_TITAN:
1065 *variation_name = titan_names[0]; /* default */
1066 if (member < ARRAY_SIZE(titan_indices))
1067 *variation_name = titan_names[titan_indices[member]];
1068 break;
1069 case ST_DEC_TSUNAMI:
1070 if (member < ARRAY_SIZE(tsunami_indices))
1071 *variation_name = tsunami_names[tsunami_indices[member]];
1072 break;
1073 }
1074 }
1075
1076 /*
1077 * A change was made to the HWRPB via an ECO and the following code
1078 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1079 * was not implemented in the console firmware. If it's revision 5 or
1080 * greater we can get the name of the platform as an ASCII string from
1081 * the HWRPB. That's what this function does. It checks the revision
1082 * level and if the string is in the HWRPB it returns the address of
1083 * the string--a pointer to the name of the platform.
1084 *
1085 * Returns:
1086 * - Pointer to a ASCII string if it's in the HWRPB
1087 * - Pointer to a blank string if the data is not in the HWRPB.
1088 */
1089
1090 static char *
platform_string(void)1091 platform_string(void)
1092 {
1093 struct dsr_struct *dsr;
1094 static char unk_system_string[] = "N/A";
1095
1096 /* Go to the console for the string pointer.
1097 * If the rpb_vers is not 5 or greater the rpb
1098 * is old and does not have this data in it.
1099 */
1100 if (hwrpb->revision < 5)
1101 return (unk_system_string);
1102 else {
1103 /* The Dynamic System Recognition struct
1104 * has the system platform name starting
1105 * after the character count of the string.
1106 */
1107 dsr = ((struct dsr_struct *)
1108 ((char *)hwrpb + hwrpb->dsr_offset));
1109 return ((char *)dsr + (dsr->sysname_off +
1110 sizeof(long)));
1111 }
1112 }
1113
1114 static int
get_nr_processors(struct percpu_struct * cpubase,unsigned long num)1115 get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1116 {
1117 struct percpu_struct *cpu;
1118 unsigned long i;
1119 int count = 0;
1120
1121 for (i = 0; i < num; i++) {
1122 cpu = (struct percpu_struct *)
1123 ((char *)cpubase + i*hwrpb->processor_size);
1124 if ((cpu->flags & 0x1cc) == 0x1cc)
1125 count++;
1126 }
1127 return count;
1128 }
1129
1130 static void
show_cache_size(struct seq_file * f,const char * which,int shape)1131 show_cache_size (struct seq_file *f, const char *which, int shape)
1132 {
1133 if (shape == -1)
1134 seq_printf (f, "%s\t\t: n/a\n", which);
1135 else if (shape == 0)
1136 seq_printf (f, "%s\t\t: unknown\n", which);
1137 else
1138 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1139 which, shape >> 10, shape & 15,
1140 1 << ((shape >> 4) & 15));
1141 }
1142
1143 static int
show_cpuinfo(struct seq_file * f,void * slot)1144 show_cpuinfo(struct seq_file *f, void *slot)
1145 {
1146 extern struct unaligned_stat {
1147 unsigned long count, va, pc;
1148 } unaligned[2];
1149
1150 static char cpu_names[][8] = {
1151 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1152 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1153 "EV68CX", "EV7", "EV79", "EV69"
1154 };
1155
1156 struct percpu_struct *cpu = slot;
1157 unsigned int cpu_index;
1158 char *cpu_name;
1159 char *systype_name;
1160 char *sysvariation_name;
1161 int nr_processors;
1162 unsigned long timer_freq;
1163
1164 cpu_index = (unsigned) (cpu->type - 1);
1165 cpu_name = "Unknown";
1166 if (cpu_index < ARRAY_SIZE(cpu_names))
1167 cpu_name = cpu_names[cpu_index];
1168
1169 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1170 cpu->type, &systype_name, &sysvariation_name);
1171
1172 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1173
1174 #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
1175 timer_freq = (100UL * hwrpb->intr_freq) / 4096;
1176 #else
1177 timer_freq = 100UL * CONFIG_HZ;
1178 #endif
1179
1180 seq_printf(f, "cpu\t\t\t: Alpha\n"
1181 "cpu model\t\t: %s\n"
1182 "cpu variation\t\t: %ld\n"
1183 "cpu revision\t\t: %ld\n"
1184 "cpu serial number\t: %s\n"
1185 "system type\t\t: %s\n"
1186 "system variation\t: %s\n"
1187 "system revision\t\t: %ld\n"
1188 "system serial number\t: %s\n"
1189 "cycle frequency [Hz]\t: %lu %s\n"
1190 "timer frequency [Hz]\t: %lu.%02lu\n"
1191 "page size [bytes]\t: %ld\n"
1192 "phys. address bits\t: %ld\n"
1193 "max. addr. space #\t: %ld\n"
1194 "BogoMIPS\t\t: %lu.%02lu\n"
1195 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1196 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1197 "platform string\t\t: %s\n"
1198 "cpus detected\t\t: %d\n",
1199 cpu_name, cpu->variation, cpu->revision,
1200 (char*)cpu->serial_no,
1201 systype_name, sysvariation_name, hwrpb->sys_revision,
1202 (char*)hwrpb->ssn,
1203 est_cycle_freq ? : hwrpb->cycle_freq,
1204 est_cycle_freq ? "est." : "",
1205 timer_freq / 100, timer_freq % 100,
1206 hwrpb->pagesize,
1207 hwrpb->pa_bits,
1208 hwrpb->max_asn,
1209 loops_per_jiffy / (500000/HZ),
1210 (loops_per_jiffy / (5000/HZ)) % 100,
1211 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1212 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1213 platform_string(), nr_processors);
1214
1215 #ifdef CONFIG_SMP
1216 seq_printf(f, "cpus active\t\t: %u\n"
1217 "cpu active mask\t\t: %016lx\n",
1218 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1219 #endif
1220
1221 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1222 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1223 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1224 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1225
1226 return 0;
1227 }
1228
1229 static int __init
read_mem_block(int * addr,int stride,int size)1230 read_mem_block(int *addr, int stride, int size)
1231 {
1232 long nloads = size / stride, cnt, tmp;
1233
1234 __asm__ __volatile__(
1235 " rpcc %0\n"
1236 "1: ldl %3,0(%2)\n"
1237 " subq %1,1,%1\n"
1238 /* Next two XORs introduce an explicit data dependency between
1239 consecutive loads in the loop, which will give us true load
1240 latency. */
1241 " xor %3,%2,%2\n"
1242 " xor %3,%2,%2\n"
1243 " addq %2,%4,%2\n"
1244 " bne %1,1b\n"
1245 " rpcc %3\n"
1246 " subl %3,%0,%0\n"
1247 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1248 : "r" (stride), "1" (nloads), "2" (addr));
1249
1250 return cnt / (size / stride);
1251 }
1252
1253 #define CSHAPE(totalsize, linesize, assoc) \
1254 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1255
1256 /* ??? EV5 supports up to 64M, but did the systems with more than
1257 16M of BCACHE ever exist? */
1258 #define MAX_BCACHE_SIZE 16*1024*1024
1259
1260 /* Note that the offchip caches are direct mapped on all Alphas. */
1261 static int __init
external_cache_probe(int minsize,int width)1262 external_cache_probe(int minsize, int width)
1263 {
1264 int cycles, prev_cycles = 1000000;
1265 int stride = 1 << width;
1266 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1267
1268 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1269 maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
1270
1271 /* Get the first block cached. */
1272 read_mem_block(__va(0), stride, size);
1273
1274 while (size < maxsize) {
1275 /* Get an average load latency in cycles. */
1276 cycles = read_mem_block(__va(0), stride, size);
1277 if (cycles > prev_cycles * 2) {
1278 /* Fine, we exceed the cache. */
1279 printk("%ldK Bcache detected; load hit latency %d "
1280 "cycles, load miss latency %d cycles\n",
1281 size >> 11, prev_cycles, cycles);
1282 return CSHAPE(size >> 1, width, 1);
1283 }
1284 /* Try to get the next block cached. */
1285 read_mem_block(__va(size), stride, size);
1286 prev_cycles = cycles;
1287 size <<= 1;
1288 }
1289 return -1; /* No BCACHE found. */
1290 }
1291
1292 static void __init
determine_cpu_caches(unsigned int cpu_type)1293 determine_cpu_caches (unsigned int cpu_type)
1294 {
1295 int L1I, L1D, L2, L3;
1296
1297 switch (cpu_type) {
1298 case EV4_CPU:
1299 case EV45_CPU:
1300 {
1301 if (cpu_type == EV4_CPU)
1302 L1I = CSHAPE(8*1024, 5, 1);
1303 else
1304 L1I = CSHAPE(16*1024, 5, 1);
1305 L1D = L1I;
1306 L3 = -1;
1307
1308 /* BIU_CTL is a write-only Abox register. PALcode has a
1309 shadow copy, and may be available from some versions
1310 of the CSERVE PALcall. If we can get it, then
1311
1312 unsigned long biu_ctl, size;
1313 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1314 L2 = CSHAPE (size, 5, 1);
1315
1316 Unfortunately, we can't rely on that.
1317 */
1318 L2 = external_cache_probe(128*1024, 5);
1319 break;
1320 }
1321
1322 case LCA4_CPU:
1323 {
1324 unsigned long car, size;
1325
1326 L1I = L1D = CSHAPE(8*1024, 5, 1);
1327 L3 = -1;
1328
1329 car = *(vuip) phys_to_virt (0x120000078UL);
1330 size = 64*1024 * (1 << ((car >> 5) & 7));
1331 /* No typo -- 8 byte cacheline size. Whodathunk. */
1332 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1333 break;
1334 }
1335
1336 case EV5_CPU:
1337 case EV56_CPU:
1338 {
1339 unsigned long sc_ctl, width;
1340
1341 L1I = L1D = CSHAPE(8*1024, 5, 1);
1342
1343 /* Check the line size of the Scache. */
1344 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1345 width = sc_ctl & 0x1000 ? 6 : 5;
1346 L2 = CSHAPE (96*1024, width, 3);
1347
1348 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1349 has a shadow copy, and may be available from some versions
1350 of the CSERVE PALcall. If we can get it, then
1351
1352 unsigned long bc_control, bc_config, size;
1353 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1354 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1355
1356 Unfortunately, we can't rely on that.
1357 */
1358 L3 = external_cache_probe(1024*1024, width);
1359 break;
1360 }
1361
1362 case PCA56_CPU:
1363 case PCA57_CPU:
1364 {
1365 if (cpu_type == PCA56_CPU) {
1366 L1I = CSHAPE(16*1024, 6, 1);
1367 L1D = CSHAPE(8*1024, 5, 1);
1368 } else {
1369 L1I = CSHAPE(32*1024, 6, 2);
1370 L1D = CSHAPE(16*1024, 5, 1);
1371 }
1372 L3 = -1;
1373
1374 #if 0
1375 unsigned long cbox_config, size;
1376
1377 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1378 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1379
1380 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1381 #else
1382 L2 = external_cache_probe(512*1024, 6);
1383 #endif
1384 break;
1385 }
1386
1387 case EV6_CPU:
1388 case EV67_CPU:
1389 case EV68CB_CPU:
1390 case EV68AL_CPU:
1391 case EV68CX_CPU:
1392 case EV69_CPU:
1393 L1I = L1D = CSHAPE(64*1024, 6, 2);
1394 L2 = external_cache_probe(1024*1024, 6);
1395 L3 = -1;
1396 break;
1397
1398 case EV7_CPU:
1399 case EV79_CPU:
1400 L1I = L1D = CSHAPE(64*1024, 6, 2);
1401 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1402 L3 = -1;
1403 break;
1404
1405 default:
1406 /* Nothing known about this cpu type. */
1407 L1I = L1D = L2 = L3 = 0;
1408 break;
1409 }
1410
1411 alpha_l1i_cacheshape = L1I;
1412 alpha_l1d_cacheshape = L1D;
1413 alpha_l2_cacheshape = L2;
1414 alpha_l3_cacheshape = L3;
1415 }
1416
1417 /*
1418 * We show only CPU #0 info.
1419 */
1420 static void *
c_start(struct seq_file * f,loff_t * pos)1421 c_start(struct seq_file *f, loff_t *pos)
1422 {
1423 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1424 }
1425
1426 static void *
c_next(struct seq_file * f,void * v,loff_t * pos)1427 c_next(struct seq_file *f, void *v, loff_t *pos)
1428 {
1429 (*pos)++;
1430 return NULL;
1431 }
1432
1433 static void
c_stop(struct seq_file * f,void * v)1434 c_stop(struct seq_file *f, void *v)
1435 {
1436 }
1437
1438 const struct seq_operations cpuinfo_op = {
1439 .start = c_start,
1440 .next = c_next,
1441 .stop = c_stop,
1442 .show = show_cpuinfo,
1443 };
1444
1445
1446 static int
alpha_panic_event(struct notifier_block * this,unsigned long event,void * ptr)1447 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1448 {
1449 #if 1
1450 /* FIXME FIXME FIXME */
1451 /* If we are using SRM and serial console, just hard halt here. */
1452 if (alpha_using_srm && srmcons_output)
1453 __halt();
1454 #endif
1455 return NOTIFY_DONE;
1456 }
1457
add_pcspkr(void)1458 static __init int add_pcspkr(void)
1459 {
1460 struct platform_device *pd;
1461 int ret;
1462
1463 pd = platform_device_alloc("pcspkr", -1);
1464 if (!pd)
1465 return -ENOMEM;
1466
1467 ret = platform_device_add(pd);
1468 if (ret)
1469 platform_device_put(pd);
1470
1471 return ret;
1472 }
1473 device_initcall(add_pcspkr);
1474