1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2007 Cavium Networks
7 * Copyright (C) 2008 Wind River Systems
8 */
9 #include <linux/init.h>
10 #include <linux/console.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/serial.h>
16 #include <linux/types.h>
17 #include <linux/string.h> /* for memset */
18 #include <linux/tty.h>
19 #include <linux/time.h>
20 #include <linux/platform_device.h>
21 #include <linux/serial_core.h>
22 #include <linux/serial_8250.h>
23
24 #include <asm/processor.h>
25 #include <asm/reboot.h>
26 #include <asm/smp-ops.h>
27 #include <asm/system.h>
28 #include <asm/irq_cpu.h>
29 #include <asm/mipsregs.h>
30 #include <asm/bootinfo.h>
31 #include <asm/sections.h>
32 #include <asm/time.h>
33
34 #include <asm/octeon/octeon.h>
35
36 #ifdef CONFIG_CAVIUM_DECODE_RSL
37 extern void cvmx_interrupt_rsl_decode(void);
38 extern int __cvmx_interrupt_ecc_report_single_bit_errors;
39 extern void cvmx_interrupt_rsl_enable(void);
40 #endif
41
42 extern struct plat_smp_ops octeon_smp_ops;
43
44 #ifdef CONFIG_PCI
45 extern void pci_console_init(const char *arg);
46 #endif
47
48 #ifdef CONFIG_CAVIUM_RESERVE32
49 extern uint64_t octeon_reserve32_memory;
50 #endif
51 static unsigned long long MAX_MEMORY = 512ull << 20;
52
53 struct octeon_boot_descriptor *octeon_boot_desc_ptr;
54
55 struct cvmx_bootinfo *octeon_bootinfo;
56 EXPORT_SYMBOL(octeon_bootinfo);
57
58 #ifdef CONFIG_CAVIUM_RESERVE32
59 uint64_t octeon_reserve32_memory;
60 EXPORT_SYMBOL(octeon_reserve32_memory);
61 #endif
62
63 static int octeon_uart;
64
65 extern asmlinkage void handle_int(void);
66 extern asmlinkage void plat_irq_dispatch(void);
67
68 /**
69 * Return non zero if we are currently running in the Octeon simulator
70 *
71 * Returns
72 */
octeon_is_simulation(void)73 int octeon_is_simulation(void)
74 {
75 return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
76 }
77 EXPORT_SYMBOL(octeon_is_simulation);
78
79 /**
80 * Return true if Octeon is in PCI Host mode. This means
81 * Linux can control the PCI bus.
82 *
83 * Returns Non zero if Octeon in host mode.
84 */
octeon_is_pci_host(void)85 int octeon_is_pci_host(void)
86 {
87 #ifdef CONFIG_PCI
88 return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
89 #else
90 return 0;
91 #endif
92 }
93
94 /**
95 * Get the clock rate of Octeon
96 *
97 * Returns Clock rate in HZ
98 */
octeon_get_clock_rate(void)99 uint64_t octeon_get_clock_rate(void)
100 {
101 if (octeon_is_simulation())
102 octeon_bootinfo->eclock_hz = 6000000;
103 return octeon_bootinfo->eclock_hz;
104 }
105 EXPORT_SYMBOL(octeon_get_clock_rate);
106
107 /**
108 * Write to the LCD display connected to the bootbus. This display
109 * exists on most Cavium evaluation boards. If it doesn't exist, then
110 * this function doesn't do anything.
111 *
112 * @s: String to write
113 */
octeon_write_lcd(const char * s)114 void octeon_write_lcd(const char *s)
115 {
116 if (octeon_bootinfo->led_display_base_addr) {
117 void __iomem *lcd_address =
118 ioremap_nocache(octeon_bootinfo->led_display_base_addr,
119 8);
120 int i;
121 for (i = 0; i < 8; i++, s++) {
122 if (*s)
123 iowrite8(*s, lcd_address + i);
124 else
125 iowrite8(' ', lcd_address + i);
126 }
127 iounmap(lcd_address);
128 }
129 }
130
131 /**
132 * Return the console uart passed by the bootloader
133 *
134 * Returns uart (0 or 1)
135 */
octeon_get_boot_uart(void)136 int octeon_get_boot_uart(void)
137 {
138 int uart;
139 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
140 uart = 1;
141 #else
142 uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
143 1 : 0;
144 #endif
145 return uart;
146 }
147
148 /**
149 * Get the coremask Linux was booted on.
150 *
151 * Returns Core mask
152 */
octeon_get_boot_coremask(void)153 int octeon_get_boot_coremask(void)
154 {
155 return octeon_boot_desc_ptr->core_mask;
156 }
157
158 /**
159 * Check the hardware BIST results for a CPU
160 */
octeon_check_cpu_bist(void)161 void octeon_check_cpu_bist(void)
162 {
163 const int coreid = cvmx_get_core_num();
164 unsigned long long mask;
165 unsigned long long bist_val;
166
167 /* Check BIST results for COP0 registers */
168 mask = 0x1f00000000ull;
169 bist_val = read_octeon_c0_icacheerr();
170 if (bist_val & mask)
171 pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
172 coreid, bist_val);
173
174 bist_val = read_octeon_c0_dcacheerr();
175 if (bist_val & 1)
176 pr_err("Core%d L1 Dcache parity error: "
177 "CacheErr(dcache) = 0x%llx\n",
178 coreid, bist_val);
179
180 mask = 0xfc00000000000000ull;
181 bist_val = read_c0_cvmmemctl();
182 if (bist_val & mask)
183 pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
184 coreid, bist_val);
185
186 write_octeon_c0_dcacheerr(0);
187 }
188
189 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
190 /**
191 * Called on every core to setup the wired tlb entry needed
192 * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
193 *
194 */
octeon_hal_setup_per_cpu_reserved32(void * unused)195 static void octeon_hal_setup_per_cpu_reserved32(void *unused)
196 {
197 /*
198 * The config has selected to wire the reserve32 memory for all
199 * userspace applications. We need to put a wired TLB entry in for each
200 * 512MB of reserve32 memory. We only handle double 256MB pages here,
201 * so reserve32 must be multiple of 512MB.
202 */
203 uint32_t size = CONFIG_CAVIUM_RESERVE32;
204 uint32_t entrylo0 =
205 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
206 uint32_t entrylo1 = entrylo0 + (256 << 14);
207 uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
208 while (size >= 512) {
209 #if 0
210 pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
211 smp_processor_id(), entryhi);
212 #endif
213 add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
214 entrylo0 += 512 << 14;
215 entrylo1 += 512 << 14;
216 entryhi += 512 << 20;
217 size -= 512;
218 }
219 }
220 #endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
221
222 /**
223 * Called to release the named block which was used to made sure
224 * that nobody used the memory for something else during
225 * init. Now we'll free it so userspace apps can use this
226 * memory region with bootmem_alloc.
227 *
228 * This function is called only once from prom_free_prom_memory().
229 */
octeon_hal_setup_reserved32(void)230 void octeon_hal_setup_reserved32(void)
231 {
232 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
233 on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
234 #endif
235 }
236
237 /**
238 * Reboot Octeon
239 *
240 * @command: Command to pass to the bootloader. Currently ignored.
241 */
octeon_restart(char * command)242 static void octeon_restart(char *command)
243 {
244 /* Disable all watchdogs before soft reset. They don't get cleared */
245 #ifdef CONFIG_SMP
246 int cpu;
247 for_each_online_cpu(cpu)
248 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
249 #else
250 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
251 #endif
252
253 mb();
254 while (1)
255 cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
256 }
257
258
259 /**
260 * Permanently stop a core.
261 *
262 * @arg: Ignored.
263 */
octeon_kill_core(void * arg)264 static void octeon_kill_core(void *arg)
265 {
266 mb();
267 if (octeon_is_simulation()) {
268 /* The simulator needs the watchdog to stop for dead cores */
269 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
270 /* A break instruction causes the simulator stop a core */
271 asm volatile ("sync\nbreak");
272 }
273 }
274
275
276 /**
277 * Halt the system
278 */
octeon_halt(void)279 static void octeon_halt(void)
280 {
281 smp_call_function(octeon_kill_core, NULL, 0);
282
283 switch (octeon_bootinfo->board_type) {
284 case CVMX_BOARD_TYPE_NAO38:
285 /* Driving a 1 to GPIO 12 shuts off this board */
286 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
287 cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
288 break;
289 default:
290 octeon_write_lcd("PowerOff");
291 break;
292 }
293
294 octeon_kill_core(NULL);
295 }
296
297 #if 0
298 /**
299 * Platform time init specifics.
300 * Returns
301 */
302 void __init plat_time_init(void)
303 {
304 /* Nothing special here, but we are required to have one */
305 }
306
307 #endif
308
309 /**
310 * Handle all the error condition interrupts that might occur.
311 *
312 */
313 #ifdef CONFIG_CAVIUM_DECODE_RSL
octeon_rlm_interrupt(int cpl,void * dev_id)314 static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
315 {
316 cvmx_interrupt_rsl_decode();
317 return IRQ_HANDLED;
318 }
319 #endif
320
321 /**
322 * Return a string representing the system type
323 *
324 * Returns
325 */
octeon_board_type_string(void)326 const char *octeon_board_type_string(void)
327 {
328 static char name[80];
329 sprintf(name, "%s (%s)",
330 cvmx_board_type_to_string(octeon_bootinfo->board_type),
331 octeon_model_get_string(read_c0_prid()));
332 return name;
333 }
334
335 const char *get_system_type(void)
336 __attribute__ ((alias("octeon_board_type_string")));
337
octeon_user_io_init(void)338 void octeon_user_io_init(void)
339 {
340 union octeon_cvmemctl cvmmemctl;
341 union cvmx_iob_fau_timeout fau_timeout;
342 union cvmx_pow_nw_tim nm_tim;
343 uint64_t cvmctl;
344
345 /* Get the current settings for CP0_CVMMEMCTL_REG */
346 cvmmemctl.u64 = read_c0_cvmmemctl();
347 /* R/W If set, marked write-buffer entries time out the same
348 * as as other entries; if clear, marked write-buffer entries
349 * use the maximum timeout. */
350 cvmmemctl.s.dismarkwblongto = 1;
351 /* R/W If set, a merged store does not clear the write-buffer
352 * entry timeout state. */
353 cvmmemctl.s.dismrgclrwbto = 0;
354 /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
355 * word location for an IOBDMA. The other 8 bits come from the
356 * SCRADDR field of the IOBDMA. */
357 cvmmemctl.s.iobdmascrmsb = 0;
358 /* R/W If set, SYNCWS and SYNCS only order marked stores; if
359 * clear, SYNCWS and SYNCS only order unmarked
360 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
361 * set. */
362 cvmmemctl.s.syncwsmarked = 0;
363 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
364 cvmmemctl.s.dissyncws = 0;
365 /* R/W If set, no stall happens on write buffer full. */
366 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
367 cvmmemctl.s.diswbfst = 1;
368 else
369 cvmmemctl.s.diswbfst = 0;
370 /* R/W If set (and SX set), supervisor-level loads/stores can
371 * use XKPHYS addresses with <48>==0 */
372 cvmmemctl.s.xkmemenas = 0;
373
374 /* R/W If set (and UX set), user-level loads/stores can use
375 * XKPHYS addresses with VA<48>==0 */
376 cvmmemctl.s.xkmemenau = 0;
377
378 /* R/W If set (and SX set), supervisor-level loads/stores can
379 * use XKPHYS addresses with VA<48>==1 */
380 cvmmemctl.s.xkioenas = 0;
381
382 /* R/W If set (and UX set), user-level loads/stores can use
383 * XKPHYS addresses with VA<48>==1 */
384 cvmmemctl.s.xkioenau = 0;
385
386 /* R/W If set, all stores act as SYNCW (NOMERGE must be set
387 * when this is set) RW, reset to 0. */
388 cvmmemctl.s.allsyncw = 0;
389
390 /* R/W If set, no stores merge, and all stores reach the
391 * coherent bus in order. */
392 cvmmemctl.s.nomerge = 0;
393 /* R/W Selects the bit in the counter used for DID time-outs 0
394 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
395 * between 1x and 2x this interval. For example, with
396 * DIDTTO=3, expiration interval is between 16K and 32K. */
397 cvmmemctl.s.didtto = 0;
398 /* R/W If set, the (mem) CSR clock never turns off. */
399 cvmmemctl.s.csrckalwys = 0;
400 /* R/W If set, mclk never turns off. */
401 cvmmemctl.s.mclkalwys = 0;
402 /* R/W Selects the bit in the counter used for write buffer
403 * flush time-outs (WBFLT+11) is the bit position in an
404 * internal counter used to determine expiration. The write
405 * buffer expires between 1x and 2x this interval. For
406 * example, with WBFLT = 0, a write buffer expires between 2K
407 * and 4K cycles after the write buffer entry is allocated. */
408 cvmmemctl.s.wbfltime = 0;
409 /* R/W If set, do not put Istream in the L2 cache. */
410 cvmmemctl.s.istrnol2 = 0;
411 /* R/W The write buffer threshold. */
412 cvmmemctl.s.wbthresh = 10;
413 /* R/W If set, CVMSEG is available for loads/stores in
414 * kernel/debug mode. */
415 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
416 cvmmemctl.s.cvmsegenak = 1;
417 #else
418 cvmmemctl.s.cvmsegenak = 0;
419 #endif
420 /* R/W If set, CVMSEG is available for loads/stores in
421 * supervisor mode. */
422 cvmmemctl.s.cvmsegenas = 0;
423 /* R/W If set, CVMSEG is available for loads/stores in user
424 * mode. */
425 cvmmemctl.s.cvmsegenau = 0;
426 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
427 * is max legal value. */
428 cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
429
430
431 if (smp_processor_id() == 0)
432 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
433 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
434 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
435
436 write_c0_cvmmemctl(cvmmemctl.u64);
437
438 /* Move the performance counter interrupts to IRQ 6 */
439 cvmctl = read_c0_cvmctl();
440 cvmctl &= ~(7 << 7);
441 cvmctl |= 6 << 7;
442 write_c0_cvmctl(cvmctl);
443
444 /* Set a default for the hardware timeouts */
445 fau_timeout.u64 = 0;
446 fau_timeout.s.tout_val = 0xfff;
447 /* Disable tagwait FAU timeout */
448 fau_timeout.s.tout_enb = 0;
449 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
450
451 nm_tim.u64 = 0;
452 /* 4096 cycles */
453 nm_tim.s.nw_tim = 3;
454 cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
455
456 write_octeon_c0_icacheerr(0);
457 write_c0_derraddr1(0);
458 }
459
460 /**
461 * Early entry point for arch setup
462 */
prom_init(void)463 void __init prom_init(void)
464 {
465 struct cvmx_sysinfo *sysinfo;
466 const int coreid = cvmx_get_core_num();
467 int i;
468 int argc;
469 struct uart_port octeon_port;
470 #ifdef CONFIG_CAVIUM_RESERVE32
471 int64_t addr = -1;
472 #endif
473 /*
474 * The bootloader passes a pointer to the boot descriptor in
475 * $a3, this is available as fw_arg3.
476 */
477 octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
478 octeon_bootinfo =
479 cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
480 cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
481
482 /*
483 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
484 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
485 */
486 if (!octeon_is_simulation() &&
487 octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
488 cvmx_write_csr(CVMX_LED_EN, 0);
489 cvmx_write_csr(CVMX_LED_PRT, 0);
490 cvmx_write_csr(CVMX_LED_DBG, 0);
491 cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
492 cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
493 cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
494 cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
495 cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
496 cvmx_write_csr(CVMX_LED_EN, 1);
497 }
498 #ifdef CONFIG_CAVIUM_RESERVE32
499 /*
500 * We need to temporarily allocate all memory in the reserve32
501 * region. This makes sure the kernel doesn't allocate this
502 * memory when it is getting memory from the
503 * bootloader. Later, after the memory allocations are
504 * complete, the reserve32 will be freed.
505 */
506 #ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
507 if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
508 pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
509 "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
510 "is set\n");
511 else
512 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
513 0, 0, 512 << 20,
514 "CAVIUM_RESERVE32", 0);
515 #else
516 /*
517 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
518 * is in case we later use hugetlb entries with it.
519 */
520 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
521 0, 0, 2 << 20,
522 "CAVIUM_RESERVE32", 0);
523 #endif
524 if (addr < 0)
525 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
526 else
527 octeon_reserve32_memory = addr;
528 #endif
529
530 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
531 if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
532 pr_info("Skipping L2 locking due to reduced L2 cache size\n");
533 } else {
534 uint32_t ebase = read_c0_ebase() & 0x3ffff000;
535 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
536 /* TLB refill */
537 cvmx_l2c_lock_mem_region(ebase, 0x100);
538 #endif
539 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
540 /* General exception */
541 cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
542 #endif
543 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
544 /* Interrupt handler */
545 cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
546 #endif
547 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
548 cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
549 cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
550 #endif
551 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
552 cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
553 #endif
554 }
555 #endif
556
557 sysinfo = cvmx_sysinfo_get();
558 memset(sysinfo, 0, sizeof(*sysinfo));
559 sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
560 sysinfo->phy_mem_desc_ptr =
561 cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
562 sysinfo->core_mask = octeon_bootinfo->core_mask;
563 sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
564 sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
565 sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
566 sysinfo->board_type = octeon_bootinfo->board_type;
567 sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
568 sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
569 memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
570 sizeof(sysinfo->mac_addr_base));
571 sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
572 memcpy(sysinfo->board_serial_number,
573 octeon_bootinfo->board_serial_number,
574 sizeof(sysinfo->board_serial_number));
575 sysinfo->compact_flash_common_base_addr =
576 octeon_bootinfo->compact_flash_common_base_addr;
577 sysinfo->compact_flash_attribute_base_addr =
578 octeon_bootinfo->compact_flash_attribute_base_addr;
579 sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
580 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
581 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
582
583
584 octeon_check_cpu_bist();
585
586 octeon_uart = octeon_get_boot_uart();
587
588 /*
589 * Disable All CIU Interrupts. The ones we need will be
590 * enabled later. Read the SUM register so we know the write
591 * completed.
592 */
593 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
594 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
595 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
596 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
597 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
598
599 #ifdef CONFIG_SMP
600 octeon_write_lcd("LinuxSMP");
601 #else
602 octeon_write_lcd("Linux");
603 #endif
604
605 #ifdef CONFIG_CAVIUM_GDB
606 /*
607 * When debugging the linux kernel, force the cores to enter
608 * the debug exception handler to break in.
609 */
610 if (octeon_get_boot_debug_flag()) {
611 cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
612 cvmx_read_csr(CVMX_CIU_DINT);
613 }
614 #endif
615
616 /*
617 * BIST should always be enabled when doing a soft reset. L2
618 * Cache locking for instance is not cleared unless BIST is
619 * enabled. Unfortunately due to a chip errata G-200 for
620 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
621 */
622 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
623 OCTEON_IS_MODEL(OCTEON_CN31XX))
624 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
625 else
626 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
627
628 /* Default to 64MB in the simulator to speed things up */
629 if (octeon_is_simulation())
630 MAX_MEMORY = 64ull << 20;
631
632 arcs_cmdline[0] = 0;
633 argc = octeon_boot_desc_ptr->argc;
634 for (i = 0; i < argc; i++) {
635 const char *arg =
636 cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
637 if ((strncmp(arg, "MEM=", 4) == 0) ||
638 (strncmp(arg, "mem=", 4) == 0)) {
639 sscanf(arg + 4, "%llu", &MAX_MEMORY);
640 MAX_MEMORY <<= 20;
641 if (MAX_MEMORY == 0)
642 MAX_MEMORY = 32ull << 30;
643 } else if (strcmp(arg, "ecc_verbose") == 0) {
644 #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
645 __cvmx_interrupt_ecc_report_single_bit_errors = 1;
646 pr_notice("Reporting of single bit ECC errors is "
647 "turned on\n");
648 #endif
649 } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
650 sizeof(arcs_cmdline) - 1) {
651 strcat(arcs_cmdline, " ");
652 strcat(arcs_cmdline, arg);
653 }
654 }
655
656 if (strstr(arcs_cmdline, "console=") == NULL) {
657 #ifdef CONFIG_GDB_CONSOLE
658 strcat(arcs_cmdline, " console=gdb");
659 #else
660 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
661 strcat(arcs_cmdline, " console=ttyS0,115200");
662 #else
663 if (octeon_uart == 1)
664 strcat(arcs_cmdline, " console=ttyS1,115200");
665 else
666 strcat(arcs_cmdline, " console=ttyS0,115200");
667 #endif
668 #endif
669 }
670
671 if (octeon_is_simulation()) {
672 /*
673 * The simulator uses a mtdram device pre filled with
674 * the filesystem. Also specify the calibration delay
675 * to avoid calculating it every time.
676 */
677 strcat(arcs_cmdline, " rw root=1f00"
678 " lpj=60176 slram=root,0x40000000,+1073741824");
679 }
680
681 mips_hpt_frequency = octeon_get_clock_rate();
682
683 octeon_init_cvmcount();
684
685 _machine_restart = octeon_restart;
686 _machine_halt = octeon_halt;
687
688 memset(&octeon_port, 0, sizeof(octeon_port));
689 /*
690 * For early_serial_setup we don't set the port type or
691 * UPF_FIXED_TYPE.
692 */
693 octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
694 octeon_port.iotype = UPIO_MEM;
695 /* I/O addresses are every 8 bytes */
696 octeon_port.regshift = 3;
697 /* Clock rate of the chip */
698 octeon_port.uartclk = mips_hpt_frequency;
699 octeon_port.fifosize = 64;
700 octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
701 octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
702 octeon_port.serial_in = octeon_serial_in;
703 octeon_port.serial_out = octeon_serial_out;
704 #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
705 octeon_port.line = 0;
706 #else
707 octeon_port.line = octeon_uart;
708 #endif
709 octeon_port.irq = 42 + octeon_uart;
710 early_serial_setup(&octeon_port);
711
712 octeon_user_io_init();
713 register_smp_ops(&octeon_smp_ops);
714 }
715
plat_mem_setup(void)716 void __init plat_mem_setup(void)
717 {
718 uint64_t mem_alloc_size;
719 uint64_t total;
720 int64_t memory;
721
722 total = 0;
723
724 /* First add the init memory we will be returning. */
725 memory = __pa_symbol(&__init_begin) & PAGE_MASK;
726 mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
727 if (mem_alloc_size > 0) {
728 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
729 total += mem_alloc_size;
730 }
731
732 /*
733 * The Mips memory init uses the first memory location for
734 * some memory vectors. When SPARSEMEM is in use, it doesn't
735 * verify that the size is big enough for the final
736 * vectors. Making the smallest chuck 4MB seems to be enough
737 * to consistantly work.
738 */
739 mem_alloc_size = 4 << 20;
740 if (mem_alloc_size > MAX_MEMORY)
741 mem_alloc_size = MAX_MEMORY;
742
743 /*
744 * When allocating memory, we want incrementing addresses from
745 * bootmem_alloc so the code in add_memory_region can merge
746 * regions next to each other.
747 */
748 cvmx_bootmem_lock();
749 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
750 && (total < MAX_MEMORY)) {
751 #if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
752 memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
753 __pa_symbol(&__init_end), -1,
754 0x100000,
755 CVMX_BOOTMEM_FLAG_NO_LOCKING);
756 #elif defined(CONFIG_HIGHMEM)
757 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
758 0x100000,
759 CVMX_BOOTMEM_FLAG_NO_LOCKING);
760 #else
761 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
762 0x100000,
763 CVMX_BOOTMEM_FLAG_NO_LOCKING);
764 #endif
765 if (memory >= 0) {
766 /*
767 * This function automatically merges address
768 * regions next to each other if they are
769 * received in incrementing order.
770 */
771 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
772 total += mem_alloc_size;
773 } else {
774 break;
775 }
776 }
777 cvmx_bootmem_unlock();
778
779 #ifdef CONFIG_CAVIUM_RESERVE32
780 /*
781 * Now that we've allocated the kernel memory it is safe to
782 * free the reserved region. We free it here so that builtin
783 * drivers can use the memory.
784 */
785 if (octeon_reserve32_memory)
786 cvmx_bootmem_free_named("CAVIUM_RESERVE32");
787 #endif /* CONFIG_CAVIUM_RESERVE32 */
788
789 if (total == 0)
790 panic("Unable to allocate memory from "
791 "cvmx_bootmem_phy_alloc\n");
792 }
793
794
prom_putchar(char c)795 int prom_putchar(char c)
796 {
797 uint64_t lsrval;
798
799 /* Spin until there is room */
800 do {
801 lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
802 } while ((lsrval & 0x20) == 0);
803
804 /* Write the byte */
805 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
806 return 1;
807 }
808
prom_free_prom_memory(void)809 void prom_free_prom_memory(void)
810 {
811 #ifdef CONFIG_CAVIUM_DECODE_RSL
812 cvmx_interrupt_rsl_enable();
813
814 /* Add an interrupt handler for general failures. */
815 if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
816 "RML/RSL", octeon_rlm_interrupt)) {
817 panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
818 }
819 #endif
820
821 /* This call is here so that it is performed after any TLB
822 initializations. It needs to be after these in case the
823 CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
824 octeon_hal_setup_reserved32();
825 }
826
827 static struct octeon_cf_data octeon_cf_data;
828
octeon_cf_device_init(void)829 static int __init octeon_cf_device_init(void)
830 {
831 union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
832 unsigned long base_ptr, region_base, region_size;
833 struct platform_device *pd;
834 struct resource cf_resources[3];
835 unsigned int num_resources;
836 int i;
837 int ret = 0;
838
839 /* Setup octeon-cf platform device if present. */
840 base_ptr = 0;
841 if (octeon_bootinfo->major_version == 1
842 && octeon_bootinfo->minor_version >= 1) {
843 if (octeon_bootinfo->compact_flash_common_base_addr)
844 base_ptr =
845 octeon_bootinfo->compact_flash_common_base_addr;
846 } else {
847 base_ptr = 0x1d000800;
848 }
849
850 if (!base_ptr)
851 return ret;
852
853 /* Find CS0 region. */
854 for (i = 0; i < 8; i++) {
855 mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
856 region_base = mio_boot_reg_cfg.s.base << 16;
857 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
858 if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
859 && base_ptr < region_base + region_size)
860 break;
861 }
862 if (i >= 7) {
863 /* i and i + 1 are CS0 and CS1, both must be less than 8. */
864 goto out;
865 }
866 octeon_cf_data.base_region = i;
867 octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
868 octeon_cf_data.base_region_bias = base_ptr - region_base;
869 memset(cf_resources, 0, sizeof(cf_resources));
870 num_resources = 0;
871 cf_resources[num_resources].flags = IORESOURCE_MEM;
872 cf_resources[num_resources].start = region_base;
873 cf_resources[num_resources].end = region_base + region_size - 1;
874 num_resources++;
875
876
877 if (!(base_ptr & 0xfffful)) {
878 /*
879 * Boot loader signals availability of DMA (true_ide
880 * mode) by setting low order bits of base_ptr to
881 * zero.
882 */
883
884 /* Asume that CS1 immediately follows. */
885 mio_boot_reg_cfg.u64 =
886 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
887 region_base = mio_boot_reg_cfg.s.base << 16;
888 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
889 if (!mio_boot_reg_cfg.s.en)
890 goto out;
891
892 cf_resources[num_resources].flags = IORESOURCE_MEM;
893 cf_resources[num_resources].start = region_base;
894 cf_resources[num_resources].end = region_base + region_size - 1;
895 num_resources++;
896
897 octeon_cf_data.dma_engine = 0;
898 cf_resources[num_resources].flags = IORESOURCE_IRQ;
899 cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
900 cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
901 num_resources++;
902 } else {
903 octeon_cf_data.dma_engine = -1;
904 }
905
906 pd = platform_device_alloc("pata_octeon_cf", -1);
907 if (!pd) {
908 ret = -ENOMEM;
909 goto out;
910 }
911 pd->dev.platform_data = &octeon_cf_data;
912
913 ret = platform_device_add_resources(pd, cf_resources, num_resources);
914 if (ret)
915 goto fail;
916
917 ret = platform_device_add(pd);
918 if (ret)
919 goto fail;
920
921 return ret;
922 fail:
923 platform_device_put(pd);
924 out:
925 return ret;
926 }
927 device_initcall(octeon_cf_device_init);
928