• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2005-2012 Imagination Technologies Ltd.
3  *
4  * This file contains the architecture-dependant parts of system setup.
5  *
6  */
7 
8 #include <linux/export.h>
9 #include <linux/bootmem.h>
10 #include <linux/console.h>
11 #include <linux/cpu.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/genhd.h>
16 #include <linux/init.h>
17 #include <linux/initrd.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/memblock.h>
21 #include <linux/mm.h>
22 #include <linux/of_fdt.h>
23 #include <linux/pfn.h>
24 #include <linux/root_dev.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/start_kernel.h>
28 #include <linux/string.h>
29 
30 #include <asm/cachepart.h>
31 #include <asm/clock.h>
32 #include <asm/core_reg.h>
33 #include <asm/cpu.h>
34 #include <asm/da.h>
35 #include <asm/highmem.h>
36 #include <asm/hwthread.h>
37 #include <asm/l2cache.h>
38 #include <asm/mach/arch.h>
39 #include <asm/metag_mem.h>
40 #include <asm/metag_regs.h>
41 #include <asm/mmu.h>
42 #include <asm/mmzone.h>
43 #include <asm/processor.h>
44 #include <asm/prom.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <asm/traps.h>
48 
49 /* Priv protect as many registers as possible. */
50 #define DEFAULT_PRIV	(TXPRIVEXT_COPRO_BITS		| \
51 			 TXPRIVEXT_TXTRIGGER_BIT	| \
52 			 TXPRIVEXT_TXGBLCREG_BIT	| \
53 			 TXPRIVEXT_ILOCK_BIT		| \
54 			 TXPRIVEXT_TXITACCYC_BIT	| \
55 			 TXPRIVEXT_TXDIVTIME_BIT	| \
56 			 TXPRIVEXT_TXAMAREGX_BIT	| \
57 			 TXPRIVEXT_TXTIMERI_BIT		| \
58 			 TXPRIVEXT_TXSTATUS_BIT		| \
59 			 TXPRIVEXT_TXDISABLE_BIT)
60 
61 /* Meta2 specific bits. */
62 #ifdef CONFIG_METAG_META12
63 #define META2_PRIV	0
64 #else
65 #define META2_PRIV	(TXPRIVEXT_TXTIMER_BIT		| \
66 			 TXPRIVEXT_TRACE_BIT)
67 #endif
68 
69 /* Unaligned access checking bits. */
70 #ifdef CONFIG_METAG_UNALIGNED
71 #define UNALIGNED_PRIV	TXPRIVEXT_ALIGNREW_BIT
72 #else
73 #define UNALIGNED_PRIV	0
74 #endif
75 
76 #define PRIV_BITS 	(DEFAULT_PRIV			| \
77 			 META2_PRIV			| \
78 			 UNALIGNED_PRIV)
79 
80 /*
81  * Protect access to:
82  * 0x06000000-0x07ffffff Direct mapped region
83  * 0x05000000-0x05ffffff MMU table region (Meta1)
84  * 0x04400000-0x047fffff Cache flush region
85  * 0x84000000-0x87ffffff Core cache memory region (Meta2)
86  *
87  * Allow access to:
88  * 0x80000000-0x81ffffff Core code memory region (Meta2)
89  */
90 #ifdef CONFIG_METAG_META12
91 #define PRIVSYSR_BITS	TXPRIVSYSR_ALL_BITS
92 #else
93 #define PRIVSYSR_BITS	(TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
94 #endif
95 
96 /* Protect all 0x02xxxxxx and 0x048xxxxx. */
97 #define PIOREG_BITS	0xffffffff
98 
99 /*
100  * Protect all 0x04000xx0 (system events)
101  * except write combiner flush and write fence (system events 4 and 5).
102  */
103 #define PSYREG_BITS	0xfffffffb
104 
105 
106 extern char _heap_start[];
107 
108 #ifdef CONFIG_METAG_BUILTIN_DTB
109 extern u32 __dtb_start[];
110 #endif
111 
112 #ifdef CONFIG_DA_CONSOLE
113 /* Our early channel based console driver */
114 extern struct console dash_console;
115 #endif
116 
117 struct machine_desc *machine_desc __initdata;
118 
119 /*
120  * Map a Linux CPU number to a hardware thread ID
121  * In SMP this will be setup with the correct mapping at startup; in UP this
122  * will map to the HW thread on which we are running.
123  */
124 u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
125 	[0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
126 };
127 EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
128 
129 /*
130  * Map a hardware thread ID to a Linux CPU number
131  * In SMP this will be fleshed out with the correct CPU ID for a particular
132  * hardware thread. In UP this will be initialised with the boot CPU ID.
133  */
134 u8 hwthread_id_2_cpu[4] __read_mostly = {
135 	[0 ... 3] = BAD_CPU_ID
136 };
137 
138 /* The relative offset of the MMU mapped memory (from ldlk or bootloader)
139  * to the real physical memory.  This is needed as we have to use the
140  * physical addresses in the MMU tables (pte entries), and not the virtual
141  * addresses.
142  * This variable is used in the __pa() and __va() macros, and should
143  * probably only be used via them.
144  */
145 unsigned int meta_memoffset;
146 EXPORT_SYMBOL(meta_memoffset);
147 
148 static char __initdata *original_cmd_line;
149 
150 DEFINE_PER_CPU(PTBI, pTBI);
151 
152 /*
153  * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
154  *
155  *	"hwthread_map=0:1,1:2,2:3,3:0"
156  *
157  *	Linux CPU ID	HWTHREAD_ID
158  *	---------------------------
159  *	    0		      1
160  *	    1		      2
161  *	    2		      3
162  *	    3		      0
163  */
parse_hwthread_map(char * p)164 static int __init parse_hwthread_map(char *p)
165 {
166 	int cpu;
167 
168 	while (*p) {
169 		cpu = (*p++) - '0';
170 		if (cpu < 0 || cpu > 9)
171 			goto err_cpu;
172 
173 		p++;		/* skip semi-colon */
174 		cpu_2_hwthread_id[cpu] = (*p++) - '0';
175 		if (cpu_2_hwthread_id[cpu] >= 4)
176 			goto err_thread;
177 		hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
178 
179 		if (*p == ',')
180 			p++;		/* skip comma */
181 	}
182 
183 	return 0;
184 err_cpu:
185 	pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
186 	return -EINVAL;
187 err_thread:
188 	pr_err("%s: hwthread_map thread argument out of range\n", __func__);
189 	return -EINVAL;
190 }
191 early_param("hwthread_map", parse_hwthread_map);
192 
dump_machine_table(void)193 void __init dump_machine_table(void)
194 {
195 	struct machine_desc *p;
196 	const char **compat;
197 
198 	pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
199 	for_each_machine_desc(p) {
200 		pr_info("\t%s\t[", p->name);
201 		for (compat = p->dt_compat; compat && *compat; ++compat)
202 			printk(" '%s'", *compat);
203 		printk(" ]\n");
204 	}
205 
206 	pr_info("\nPlease check your kernel config and/or bootloader.\n");
207 
208 	hard_processor_halt(HALT_PANIC);
209 }
210 
211 #ifdef CONFIG_METAG_HALT_ON_PANIC
metag_panic_event(struct notifier_block * this,unsigned long event,void * ptr)212 static int metag_panic_event(struct notifier_block *this, unsigned long event,
213 			     void *ptr)
214 {
215 	hard_processor_halt(HALT_PANIC);
216 	return NOTIFY_DONE;
217 }
218 
219 static struct notifier_block metag_panic_block = {
220 	metag_panic_event,
221 	NULL,
222 	0
223 };
224 #endif
225 
setup_arch(char ** cmdline_p)226 void __init setup_arch(char **cmdline_p)
227 {
228 	unsigned long start_pfn;
229 	unsigned long text_start = (unsigned long)(&_stext);
230 	unsigned long cpu = smp_processor_id();
231 	unsigned long heap_start, heap_end;
232 	unsigned long start_pte;
233 	PTBI _pTBI;
234 	PTBISEG p_heap;
235 	int heap_id, i;
236 
237 	metag_cache_probe();
238 
239 	metag_da_probe();
240 #ifdef CONFIG_DA_CONSOLE
241 	if (metag_da_enabled()) {
242 		/* An early channel based console driver */
243 		register_console(&dash_console);
244 		add_preferred_console("ttyDA", 1, NULL);
245 	}
246 #endif
247 
248 	/* try interpreting the argument as a device tree */
249 	machine_desc = setup_machine_fdt(original_cmd_line);
250 	/* if it doesn't look like a device tree it must be a command line */
251 	if (!machine_desc) {
252 #ifdef CONFIG_METAG_BUILTIN_DTB
253 		/* try the embedded device tree */
254 		machine_desc = setup_machine_fdt(__dtb_start);
255 		if (!machine_desc)
256 			panic("Invalid embedded device tree.");
257 #else
258 		/* use the default machine description */
259 		machine_desc = default_machine_desc();
260 #endif
261 #ifndef CONFIG_CMDLINE_FORCE
262 		/* append the bootloader cmdline to any builtin fdt cmdline */
263 		if (boot_command_line[0] && original_cmd_line[0])
264 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
265 		strlcat(boot_command_line, original_cmd_line,
266 			COMMAND_LINE_SIZE);
267 #endif
268 	}
269 	setup_meta_clocks(machine_desc->clocks);
270 
271 	*cmdline_p = boot_command_line;
272 	parse_early_param();
273 
274 	/*
275 	 * Make sure we don't alias in dcache or icache
276 	 */
277 	check_for_cache_aliasing(cpu);
278 
279 
280 #ifdef CONFIG_METAG_HALT_ON_PANIC
281 	atomic_notifier_chain_register(&panic_notifier_list,
282 				       &metag_panic_block);
283 #endif
284 
285 #ifdef CONFIG_DUMMY_CONSOLE
286 	conswitchp = &dummy_con;
287 #endif
288 
289 	if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
290 		panic("Privilege must be enabled for this thread.");
291 
292 	_pTBI = __TBI(TBID_ISTAT_BIT);
293 
294 	per_cpu(pTBI, cpu) = _pTBI;
295 
296 	if (!per_cpu(pTBI, cpu))
297 		panic("No TBI found!");
298 
299 	/*
300 	 * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
301 	 * rather than the version from the bootloader. This makes call
302 	 * stacks easier to understand and may allow us to unmap the
303 	 * bootloader at some point.
304 	 *
305 	 * We need to keep the LWK handler that TBI installed in order to
306 	 * be able to do inter-thread comms.
307 	 */
308 	for (i = 0; i <= TBID_SIGNUM_MAX; i++)
309 		if (i != TBID_SIGNUM_LWK)
310 			_pTBI->fnSigs[i] = __TBIUnExpXXX;
311 
312 	/* A Meta requirement is that the kernel is loaded (virtually)
313 	 * at the PAGE_OFFSET.
314 	 */
315 	if (PAGE_OFFSET != text_start)
316 		panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
317 		      PAGE_OFFSET, text_start);
318 
319 	start_pte = mmu_read_second_level_page(text_start);
320 
321 	/*
322 	 * Kernel pages should have the PRIV bit set by the bootloader.
323 	 */
324 	if (!(start_pte & _PAGE_KERNEL))
325 		panic("kernel pte does not have PRIV set");
326 
327 	/*
328 	 * See __pa and __va in include/asm/page.h.
329 	 * This value is negative when running in local space but the
330 	 * calculations work anyway.
331 	 */
332 	meta_memoffset = text_start - (start_pte & PAGE_MASK);
333 
334 	/* Now lets look at the heap space */
335 	heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
336 		+ TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
337 
338 	p_heap = __TBIFindSeg(NULL, heap_id);
339 
340 	if (!p_heap)
341 		panic("Could not find heap from TBI!");
342 
343 	/* The heap begins at the first full page after the kernel data. */
344 	heap_start = (unsigned long) &_heap_start;
345 
346 	/* The heap ends at the end of the heap segment specified with
347 	 * ldlk.
348 	 */
349 	if (is_global_space(text_start)) {
350 		pr_debug("WARNING: running in global space!\n");
351 		heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
352 	} else {
353 		heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
354 	}
355 
356 	ROOT_DEV = Root_RAM0;
357 
358 	/* init_mm is the mm struct used for the first task.  It is then
359 	 * cloned for all other tasks spawned from that task.
360 	 *
361 	 * Note - we are using the virtual addresses here.
362 	 */
363 	init_mm.start_code = (unsigned long)(&_stext);
364 	init_mm.end_code = (unsigned long)(&_etext);
365 	init_mm.end_data = (unsigned long)(&_edata);
366 	init_mm.brk = (unsigned long)heap_start;
367 
368 	min_low_pfn = PFN_UP(__pa(text_start));
369 	max_low_pfn = PFN_DOWN(__pa(heap_end));
370 
371 	pfn_base = min_low_pfn;
372 
373 	/* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
374 	 * call later makes sure to keep the rounded up pages marked reserved.
375 	 */
376 	max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
377 	max_pfn &= ~((1 << MAX_ORDER) - 1);
378 
379 	start_pfn = PFN_UP(__pa(heap_start));
380 
381 	if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
382 		/* Theoretically, we could expand the space that the
383 		 * bootmem allocator covers - much as we do for the
384 		 * 'high' address, and then tell the bootmem system
385 		 * that the lowest chunk is 'not available'.  Right
386 		 * now it is just much easier to constrain the
387 		 * user to always MAX_ORDER align their kernel space.
388 		 */
389 
390 		panic("Kernel must be %d byte aligned, currently at %#lx.",
391 		      1 << (MAX_ORDER + PAGE_SHIFT),
392 		      min_low_pfn << PAGE_SHIFT);
393 	}
394 
395 #ifdef CONFIG_HIGHMEM
396 	highstart_pfn = highend_pfn = max_pfn;
397 	high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
398 #else
399 	high_memory = (void *)__va(PFN_PHYS(max_pfn));
400 #endif
401 
402 	paging_init(heap_end);
403 
404 	setup_priv();
405 
406 	/* Setup the boot cpu's mapping. The rest will be setup below. */
407 	cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
408 	hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
409 
410 	/* Copy device tree blob into non-init memory before unflattening */
411 	copy_fdt();
412 	unflatten_device_tree();
413 
414 #ifdef CONFIG_SMP
415 	smp_init_cpus();
416 #endif
417 
418 	if (machine_desc->init_early)
419 		machine_desc->init_early();
420 }
421 
customize_machine(void)422 static int __init customize_machine(void)
423 {
424 	/* customizes platform devices, or adds new ones */
425 	if (machine_desc->init_machine)
426 		machine_desc->init_machine();
427 	return 0;
428 }
429 arch_initcall(customize_machine);
430 
init_machine_late(void)431 static int __init init_machine_late(void)
432 {
433 	if (machine_desc->init_late)
434 		machine_desc->init_late();
435 	return 0;
436 }
437 late_initcall(init_machine_late);
438 
439 #ifdef CONFIG_PROC_FS
440 /*
441  *	Get CPU information for use by the procfs.
442  */
get_cpu_capabilities(unsigned int txenable)443 static const char *get_cpu_capabilities(unsigned int txenable)
444 {
445 #ifdef CONFIG_METAG_META21
446 	/* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
447 	int coreid = metag_in32(METAC_CORE_ID);
448 	unsigned int dsp_type = (coreid >> 3) & 7;
449 	unsigned int fpu_type = (coreid >> 7) & 3;
450 
451 	switch (dsp_type | fpu_type << 3) {
452 	case (0x00): return "EDSP";
453 	case (0x01): return "DSP";
454 	case (0x08): return "EDSP+LFPU";
455 	case (0x09): return "DSP+LFPU";
456 	case (0x10): return "EDSP+FPU";
457 	case (0x11): return "DSP+FPU";
458 	}
459 	return "UNKNOWN";
460 
461 #else
462 	if (!(txenable & TXENABLE_CLASS_BITS))
463 		return "DSP";
464 	else
465 		return "";
466 #endif
467 }
468 
show_cpuinfo(struct seq_file * m,void * v)469 static int show_cpuinfo(struct seq_file *m, void *v)
470 {
471 	const char *cpu;
472 	unsigned int txenable, thread_id, major, minor;
473 	unsigned long clockfreq = get_coreclock();
474 #ifdef CONFIG_SMP
475 	int i;
476 	unsigned long lpj;
477 #endif
478 
479 	cpu = "META";
480 
481 	txenable = __core_reg_get(TXENABLE);
482 	major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
483 	minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
484 	thread_id = (txenable >> 8) & 0x3;
485 
486 #ifdef CONFIG_SMP
487 	for_each_online_cpu(i) {
488 		lpj = per_cpu(cpu_data, i).loops_per_jiffy;
489 		txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
490 							cpu_2_hwthread_id[i]);
491 
492 		seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
493 			      "Clocking:\t%lu.%1luMHz\n"
494 			      "BogoMips:\t%lu.%02lu\n"
495 			      "Calibration:\t%lu loops\n"
496 			      "Capabilities:\t%s\n\n",
497 			      cpu, major, minor, i,
498 			      clockfreq / 1000000, (clockfreq / 100000) % 10,
499 			      lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
500 			      lpj,
501 			      get_cpu_capabilities(txenable));
502 	}
503 #else
504 	seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
505 		   "Clocking:\t%lu.%1luMHz\n"
506 		   "BogoMips:\t%lu.%02lu\n"
507 		   "Calibration:\t%lu loops\n"
508 		   "Capabilities:\t%s\n",
509 		   cpu, major, minor, thread_id,
510 		   clockfreq / 1000000, (clockfreq / 100000) % 10,
511 		   loops_per_jiffy / (500000 / HZ),
512 		   (loops_per_jiffy / (5000 / HZ)) % 100,
513 		   loops_per_jiffy,
514 		   get_cpu_capabilities(txenable));
515 #endif /* CONFIG_SMP */
516 
517 #ifdef CONFIG_METAG_L2C
518 	if (meta_l2c_is_present()) {
519 		seq_printf(m, "L2 cache:\t%s\n"
520 			      "L2 cache size:\t%d KB\n",
521 			      meta_l2c_is_enabled() ? "enabled" : "disabled",
522 			      meta_l2c_size() >> 10);
523 	}
524 #endif
525 	return 0;
526 }
527 
c_start(struct seq_file * m,loff_t * pos)528 static void *c_start(struct seq_file *m, loff_t *pos)
529 {
530 	return (void *)(*pos == 0);
531 }
c_next(struct seq_file * m,void * v,loff_t * pos)532 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
533 {
534 	return NULL;
535 }
c_stop(struct seq_file * m,void * v)536 static void c_stop(struct seq_file *m, void *v)
537 {
538 }
539 const struct seq_operations cpuinfo_op = {
540 	.start = c_start,
541 	.next  = c_next,
542 	.stop  = c_stop,
543 	.show  = show_cpuinfo,
544 };
545 #endif /* CONFIG_PROC_FS */
546 
metag_start_kernel(char * args)547 void __init metag_start_kernel(char *args)
548 {
549 	/* Zero the timer register so timestamps are from the point at
550 	 * which the kernel started running.
551 	 */
552 	__core_reg_set(TXTIMER, 0);
553 
554 	/* Clear the bss. */
555 	memset(__bss_start, 0,
556 	       (unsigned long)__bss_stop - (unsigned long)__bss_start);
557 
558 	/* Remember where these are for use in setup_arch */
559 	original_cmd_line = args;
560 
561 	current_thread_info()->cpu = hard_processor_id();
562 
563 	start_kernel();
564 }
565 
566 /**
567  * setup_priv() - Set up privilege protection registers.
568  *
569  * Set up privilege protection registers such as TXPRIVEXT to prevent userland
570  * from touching our precious registers and sensitive memory areas.
571  */
setup_priv(void)572 void setup_priv(void)
573 {
574 	unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
575 
576 	__core_reg_set(TXPRIVEXT, PRIV_BITS);
577 
578 	metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
579 	metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
580 	metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
581 }
582 
pTBI_get(unsigned int cpu)583 PTBI pTBI_get(unsigned int cpu)
584 {
585 	return per_cpu(pTBI, cpu);
586 }
587 EXPORT_SYMBOL(pTBI_get);
588 
589 #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
590 char capabilites[] = "dsp fpu";
591 #elif defined(CONFIG_METAG_DSP)
592 char capabilites[] = "dsp";
593 #elif defined(CONFIG_METAG_FPU)
594 char capabilites[] = "fpu";
595 #else
596 char capabilites[] = "";
597 #endif
598 
599 static struct ctl_table caps_kern_table[] = {
600 	{
601 		.procname	= "capabilities",
602 		.data		= capabilites,
603 		.maxlen		= sizeof(capabilites),
604 		.mode		= 0444,
605 		.proc_handler	= proc_dostring,
606 	},
607 	{}
608 };
609 
610 static struct ctl_table caps_root_table[] = {
611 	{
612 		.procname	= "kernel",
613 		.mode		= 0555,
614 		.child		= caps_kern_table,
615 	},
616 	{}
617 };
618 
capabilities_register_sysctl(void)619 static int __init capabilities_register_sysctl(void)
620 {
621 	struct ctl_table_header *caps_table_header;
622 
623 	caps_table_header = register_sysctl_table(caps_root_table);
624 	if (!caps_table_header) {
625 		pr_err("Unable to register CAPABILITIES sysctl\n");
626 		return -ENOMEM;
627 	}
628 
629 	return 0;
630 }
631 
632 core_initcall(capabilities_register_sysctl);
633