1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. 2 * 3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net> 4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9#include <asm/head.h> 10#include <asm/asi.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/tsb.h> 14 15 .text 16 .align 32 17 18kvmap_itlb: 19 /* g6: TAG TARGET */ 20 mov TLB_TAG_ACCESS, %g4 21 ldxa [%g4] ASI_IMMU, %g4 22 23 /* sun4v_itlb_miss branches here with the missing virtual 24 * address already loaded into %g4 25 */ 26kvmap_itlb_4v: 27 28 /* Catch kernel NULL pointer calls. */ 29 sethi %hi(PAGE_SIZE), %g5 30 cmp %g4, %g5 31 blu,pn %xcc, kvmap_itlb_longpath 32 nop 33 34 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) 35 36kvmap_itlb_tsb_miss: 37 sethi %hi(LOW_OBP_ADDRESS), %g5 38 cmp %g4, %g5 39 blu,pn %xcc, kvmap_itlb_vmalloc_addr 40 mov 0x1, %g5 41 sllx %g5, 32, %g5 42 cmp %g4, %g5 43 blu,pn %xcc, kvmap_itlb_obp 44 nop 45 46kvmap_itlb_vmalloc_addr: 47 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) 48 49 TSB_LOCK_TAG(%g1, %g2, %g7) 50 51 /* Load and check PTE. */ 52 ldxa [%g5] ASI_PHYS_USE_EC, %g5 53 mov 1, %g7 54 sllx %g7, TSB_TAG_INVALID_BIT, %g7 55 brgez,a,pn %g5, kvmap_itlb_longpath 56 TSB_STORE(%g1, %g7) 57 58 TSB_WRITE(%g1, %g5, %g6) 59 60 /* fallthrough to TLB load */ 61 62kvmap_itlb_load: 63 64661: stxa %g5, [%g0] ASI_ITLB_DATA_IN 65 retry 66 .section .sun4v_2insn_patch, "ax" 67 .word 661b 68 nop 69 nop 70 .previous 71 72 /* For sun4v the ASI_ITLB_DATA_IN store and the retry 73 * instruction get nop'd out and we get here to branch 74 * to the sun4v tlb load code. The registers are setup 75 * as follows: 76 * 77 * %g4: vaddr 78 * %g5: PTE 79 * %g6: TAG 80 * 81 * The sun4v TLB load wants the PTE in %g3 so we fix that 82 * up here. 83 */ 84 ba,pt %xcc, sun4v_itlb_load 85 mov %g5, %g3 86 87kvmap_itlb_longpath: 88 89661: rdpr %pstate, %g5 90 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 91 .section .sun4v_2insn_patch, "ax" 92 .word 661b 93 SET_GL(1) 94 nop 95 .previous 96 97 rdpr %tpc, %g5 98 ba,pt %xcc, sparc64_realfault_common 99 mov FAULT_CODE_ITLB, %g4 100 101kvmap_itlb_obp: 102 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) 103 104 TSB_LOCK_TAG(%g1, %g2, %g7) 105 106 TSB_WRITE(%g1, %g5, %g6) 107 108 ba,pt %xcc, kvmap_itlb_load 109 nop 110 111kvmap_dtlb_obp: 112 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) 113 114 TSB_LOCK_TAG(%g1, %g2, %g7) 115 116 TSB_WRITE(%g1, %g5, %g6) 117 118 ba,pt %xcc, kvmap_dtlb_load 119 nop 120 121 .align 32 122kvmap_dtlb_tsb4m_load: 123 TSB_LOCK_TAG(%g1, %g2, %g7) 124 TSB_WRITE(%g1, %g5, %g6) 125 ba,pt %xcc, kvmap_dtlb_load 126 nop 127 128kvmap_dtlb: 129 /* %g6: TAG TARGET */ 130 mov TLB_TAG_ACCESS, %g4 131 ldxa [%g4] ASI_DMMU, %g4 132 133 /* sun4v_dtlb_miss branches here with the missing virtual 134 * address already loaded into %g4 135 */ 136kvmap_dtlb_4v: 137 brgez,pn %g4, kvmap_dtlb_nonlinear 138 nop 139 140#ifdef CONFIG_DEBUG_PAGEALLOC 141 /* Index through the base page size TSB even for linear 142 * mappings when using page allocation debugging. 143 */ 144 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 145#else 146 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ 147 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 148#endif 149 /* TSB entry address left in %g1, lookup linear PTE. 150 * Must preserve %g1 and %g6 (TAG). 151 */ 152kvmap_dtlb_tsb4m_miss: 153 /* Clear the PAGE_OFFSET top virtual bits, shift 154 * down to get PFN, and make sure PFN is in range. 155 */ 156 sllx %g4, 21, %g5 157 158 /* Check to see if we know about valid memory at the 4MB 159 * chunk this physical address will reside within. 160 */ 161 srlx %g5, 21 + 41, %g2 162 brnz,pn %g2, kvmap_dtlb_longpath 163 nop 164 165 /* This unconditional branch and delay-slot nop gets patched 166 * by the sethi sequence once the bitmap is properly setup. 167 */ 168 .globl valid_addr_bitmap_insn 169valid_addr_bitmap_insn: 170 ba,pt %xcc, 2f 171 nop 172 .subsection 2 173 .globl valid_addr_bitmap_patch 174valid_addr_bitmap_patch: 175 sethi %hi(sparc64_valid_addr_bitmap), %g7 176 or %g7, %lo(sparc64_valid_addr_bitmap), %g7 177 .previous 178 179 srlx %g5, 21 + 22, %g2 180 srlx %g2, 6, %g5 181 and %g2, 63, %g2 182 sllx %g5, 3, %g5 183 ldx [%g7 + %g5], %g5 184 mov 1, %g7 185 sllx %g7, %g2, %g7 186 andcc %g5, %g7, %g0 187 be,pn %xcc, kvmap_dtlb_longpath 188 1892: sethi %hi(kpte_linear_bitmap), %g2 190 or %g2, %lo(kpte_linear_bitmap), %g2 191 192 /* Get the 256MB physical address index. */ 193 sllx %g4, 21, %g5 194 mov 1, %g7 195 srlx %g5, 21 + 28, %g5 196 197 /* Don't try this at home kids... this depends upon srlx 198 * only taking the low 6 bits of the shift count in %g5. 199 */ 200 sllx %g7, %g5, %g7 201 202 /* Divide by 64 to get the offset into the bitmask. */ 203 srlx %g5, 6, %g5 204 sllx %g5, 3, %g5 205 206 /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ 207 ldx [%g2 + %g5], %g2 208 andcc %g2, %g7, %g0 209 sethi %hi(kern_linear_pte_xor), %g5 210 or %g5, %lo(kern_linear_pte_xor), %g5 211 bne,a,pt %xcc, 1f 212 add %g5, 8, %g5 213 2141: ldx [%g5], %g2 215 216 .globl kvmap_linear_patch 217kvmap_linear_patch: 218 ba,pt %xcc, kvmap_dtlb_tsb4m_load 219 xor %g2, %g4, %g5 220 221kvmap_dtlb_vmalloc_addr: 222 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) 223 224 TSB_LOCK_TAG(%g1, %g2, %g7) 225 226 /* Load and check PTE. */ 227 ldxa [%g5] ASI_PHYS_USE_EC, %g5 228 mov 1, %g7 229 sllx %g7, TSB_TAG_INVALID_BIT, %g7 230 brgez,a,pn %g5, kvmap_dtlb_longpath 231 TSB_STORE(%g1, %g7) 232 233 TSB_WRITE(%g1, %g5, %g6) 234 235 /* fallthrough to TLB load */ 236 237kvmap_dtlb_load: 238 239661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB 240 retry 241 .section .sun4v_2insn_patch, "ax" 242 .word 661b 243 nop 244 nop 245 .previous 246 247 /* For sun4v the ASI_DTLB_DATA_IN store and the retry 248 * instruction get nop'd out and we get here to branch 249 * to the sun4v tlb load code. The registers are setup 250 * as follows: 251 * 252 * %g4: vaddr 253 * %g5: PTE 254 * %g6: TAG 255 * 256 * The sun4v TLB load wants the PTE in %g3 so we fix that 257 * up here. 258 */ 259 ba,pt %xcc, sun4v_dtlb_load 260 mov %g5, %g3 261 262#ifdef CONFIG_SPARSEMEM_VMEMMAP 263kvmap_vmemmap: 264 sub %g4, %g5, %g5 265 srlx %g5, 22, %g5 266 sethi %hi(vmemmap_table), %g1 267 sllx %g5, 3, %g5 268 or %g1, %lo(vmemmap_table), %g1 269 ba,pt %xcc, kvmap_dtlb_load 270 ldx [%g1 + %g5], %g5 271#endif 272 273kvmap_dtlb_nonlinear: 274 /* Catch kernel NULL pointer derefs. */ 275 sethi %hi(PAGE_SIZE), %g5 276 cmp %g4, %g5 277 bleu,pn %xcc, kvmap_dtlb_longpath 278 nop 279 280#ifdef CONFIG_SPARSEMEM_VMEMMAP 281 /* Do not use the TSB for vmemmap. */ 282 mov (VMEMMAP_BASE >> 40), %g5 283 sllx %g5, 40, %g5 284 cmp %g4,%g5 285 bgeu,pn %xcc, kvmap_vmemmap 286 nop 287#endif 288 289 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) 290 291kvmap_dtlb_tsbmiss: 292 sethi %hi(MODULES_VADDR), %g5 293 cmp %g4, %g5 294 blu,pn %xcc, kvmap_dtlb_longpath 295 mov (VMALLOC_END >> 40), %g5 296 sllx %g5, 40, %g5 297 cmp %g4, %g5 298 bgeu,pn %xcc, kvmap_dtlb_longpath 299 nop 300 301kvmap_check_obp: 302 sethi %hi(LOW_OBP_ADDRESS), %g5 303 cmp %g4, %g5 304 blu,pn %xcc, kvmap_dtlb_vmalloc_addr 305 mov 0x1, %g5 306 sllx %g5, 32, %g5 307 cmp %g4, %g5 308 blu,pn %xcc, kvmap_dtlb_obp 309 nop 310 ba,pt %xcc, kvmap_dtlb_vmalloc_addr 311 nop 312 313kvmap_dtlb_longpath: 314 315661: rdpr %pstate, %g5 316 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate 317 .section .sun4v_2insn_patch, "ax" 318 .word 661b 319 SET_GL(1) 320 ldxa [%g0] ASI_SCRATCHPAD, %g5 321 .previous 322 323 rdpr %tl, %g3 324 cmp %g3, 1 325 326661: mov TLB_TAG_ACCESS, %g4 327 ldxa [%g4] ASI_DMMU, %g5 328 .section .sun4v_2insn_patch, "ax" 329 .word 661b 330 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 331 nop 332 .previous 333 334 be,pt %xcc, sparc64_realfault_common 335 mov FAULT_CODE_DTLB, %g4 336 ba,pt %xcc, winfix_trampoline 337 nop 338