1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * TILE startup code. 15 */ 16 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/page.h> 20#include <asm/pgtable.h> 21#include <asm/thread_info.h> 22#include <asm/processor.h> 23#include <asm/asm-offsets.h> 24#include <hv/hypervisor.h> 25#include <arch/chip.h> 26#include <arch/spr_def.h> 27 28/* 29 * This module contains the entry code for kernel images. It performs the 30 * minimal setup needed to call the generic C routines. 31 */ 32 33 __HEAD 34ENTRY(_start) 35 /* Notify the hypervisor of what version of the API we want */ 36 { 37 movei r1, TILE_CHIP 38 movei r2, TILE_CHIP_REV 39 } 40 { 41 moveli r0, _HV_VERSION_OLD_HV_INIT 42 jal _hv_init 43 } 44 /* Get a reasonable default ASID in r0 */ 45 { 46 move r0, zero 47 jal _hv_inquire_asid 48 } 49 /* Install the default page table */ 50 { 51 moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) 52 move r4, r0 /* use starting ASID of range for this page table */ 53 } 54 { 55 moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) 56 auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) 57 } 58 { 59 lw r2, r6 60 addi r6, r6, 4 61 } 62 { 63 lw r3, r6 64 auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) 65 } 66 { 67 finv r6 68 move r1, zero /* high 32 bits of CPA is zero */ 69 } 70 { 71 moveli lr, lo16(1f) 72 moveli r5, CTX_PAGE_FLAG 73 } 74 { 75 auli lr, lr, ha16(1f) 76 j _hv_install_context 77 } 781: 79 80 /* Get our processor number and save it away in SAVE_K_0. */ 81 jal _hv_inquire_topology 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 84 85#ifdef CONFIG_SMP 86 /* 87 * Load up our per-cpu offset. When the first (master) tile 88 * boots, this value is still zero, so we will load boot_pc 89 * with start_kernel, and boot_sp at the top of init_stack. 90 * The master tile initializes the per-cpu offset array, so that 91 * when subsequent (secondary) tiles boot, they will instead load 92 * from their per-cpu versions of boot_sp and boot_pc. 93 */ 94 moveli r5, lo16(__per_cpu_offset) 95 auli r5, r5, ha16(__per_cpu_offset) 96 s2a r5, r4, r5 97 lw r5, r5 98 bnz r5, 1f 99 100 /* 101 * Save the width and height to the smp_topology variable 102 * for later use. 103 */ 104 moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) 105 auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) 106 { 107 sw r0, r2 108 addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET) 109 } 110 sw r0, r3 1111: 112#else 113 move r5, zero 114#endif 115 116 /* Load and go with the correct pc and sp. */ 117 { 118 addli r1, r5, lo16(boot_sp) 119 addli r0, r5, lo16(boot_pc) 120 } 121 { 122 auli r1, r1, ha16(boot_sp) 123 auli r0, r0, ha16(boot_pc) 124 } 125 lw r0, r0 126 lw sp, r1 127 or r4, sp, r4 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ 129 { 130 move lr, zero /* stop backtraces in the called function */ 131 jr r0 132 } 133 ENDPROC(_start) 134 135__PAGE_ALIGNED_BSS 136 .align PAGE_SIZE 137ENTRY(empty_zero_page) 138 .fill PAGE_SIZE,1,0 139 END(empty_zero_page) 140 141 .macro PTE va, cpa, bits1, no_org=0 142 .ifeq \no_org 143 .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE 144 .endif 145 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32)) 148 .endm 149 150__PAGE_ALIGNED_DATA 151 .align PAGE_SIZE 152ENTRY(swapper_pg_dir) 153 /* 154 * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as 155 * VA = PA + PAGE_OFFSET. We remap things with more precise access 156 * permissions and more respect for size of RAM later. 157 */ 158 .set addr, 0 159 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT 160 PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 161 (1 << (HV_PTE_INDEX_WRITABLE - 32)) 162 .set addr, addr + PGDIR_SIZE 163 .endr 164 165 /* The true text VAs are mapped as VA = PA + MEM_SV_START */ 166 PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 167 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 168 .org swapper_pg_dir + PGDIR_SIZE 169 END(swapper_pg_dir) 170 171 /* 172 * Isolate swapper_pgprot to its own cache line, since each cpu 173 * starting up will read it using VA-is-PA and local homing. 174 * This would otherwise likely conflict with other data on the cache 175 * line, once we have set its permanent home in the page tables. 176 */ 177 __INITDATA 178 .align CHIP_L2_LINE_SIZE() 179ENTRY(swapper_pgprot) 180 PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 181 (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1 182 .align CHIP_L2_LINE_SIZE() 183 END(swapper_pgprot) 184