1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * OpenRISC head.S 4 * 5 * Linux architectural port borrowing liberally from similar works of 6 * others. All original copyrights apply as per the original source 7 * declaration. 8 * 9 * Modifications for the OpenRISC architecture: 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 12 */ 13 14#include <linux/linkage.h> 15#include <linux/threads.h> 16#include <linux/errno.h> 17#include <linux/init.h> 18#include <linux/serial_reg.h> 19#include <linux/pgtable.h> 20#include <asm/processor.h> 21#include <asm/page.h> 22#include <asm/mmu.h> 23#include <asm/thread_info.h> 24#include <asm/cache.h> 25#include <asm/spr_defs.h> 26#include <asm/asm-offsets.h> 27#include <linux/of_fdt.h> 28 29#define tophys(rd,rs) \ 30 l.movhi rd,hi(-KERNELBASE) ;\ 31 l.add rd,rd,rs 32 33#define CLEAR_GPR(gpr) \ 34 l.movhi gpr,0x0 35 36#define LOAD_SYMBOL_2_GPR(gpr,symbol) \ 37 l.movhi gpr,hi(symbol) ;\ 38 l.ori gpr,gpr,lo(symbol) 39 40 41#define UART_BASE_ADD 0x90000000 42 43#define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM) 44#define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM) 45 46/* ============================================[ tmp store locations ]=== */ 47 48#define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32) 49 50/* 51 * emergency_print temporary stores 52 */ 53#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS 54#define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14) 55#define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14) 56 57#define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15) 58#define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15) 59 60#define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16) 61#define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16) 62 63#define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7) 64#define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7) 65 66#define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8) 67#define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8) 68 69#define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9) 70#define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9) 71 72#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ 73#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 74#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) 75 76#define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5 77#define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0) 78 79#define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6 80#define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0) 81 82#define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7 83#define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0) 84 85#define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8 86#define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0) 87 88#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 89#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) 90 91#endif 92 93/* 94 * TLB miss handlers temorary stores 95 */ 96#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS 97#define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2) 98#define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2) 99 100#define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3) 101#define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3) 102 103#define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4) 104#define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4) 105 106#define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5) 107#define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5) 108 109#define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6) 110#define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6) 111 112#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ 113#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 114#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) 115 116#define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3 117#define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0) 118 119#define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4 120#define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0) 121 122#define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5 123#define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0) 124 125#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 126#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) 127 128#endif 129 130/* 131 * EXCEPTION_HANDLE temporary stores 132 */ 133 134#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS 135#define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30) 136#define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30) 137 138#define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10) 139#define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10) 140 141#define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1) 142#define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1) 143 144#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ 145#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 146#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) 147 148#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 149#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) 150 151#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 152#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) 153#endif 154 155/* =========================================================[ macros ]=== */ 156 157#ifdef CONFIG_SMP 158#define GET_CURRENT_PGD(reg,t1) \ 159 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ 160 l.mfspr t1,r0,SPR_COREID ;\ 161 l.slli t1,t1,2 ;\ 162 l.add reg,reg,t1 ;\ 163 tophys (t1,reg) ;\ 164 l.lwz reg,0(t1) 165#else 166#define GET_CURRENT_PGD(reg,t1) \ 167 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ 168 tophys (t1,reg) ;\ 169 l.lwz reg,0(t1) 170#endif 171 172/* Load r10 from current_thread_info_set - clobbers r1 and r30 */ 173#ifdef CONFIG_SMP 174#define GET_CURRENT_THREAD_INFO \ 175 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ 176 tophys (r30,r1) ;\ 177 l.mfspr r10,r0,SPR_COREID ;\ 178 l.slli r10,r10,2 ;\ 179 l.add r30,r30,r10 ;\ 180 /* r10: current_thread_info */ ;\ 181 l.lwz r10,0(r30) 182#else 183#define GET_CURRENT_THREAD_INFO \ 184 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ 185 tophys (r30,r1) ;\ 186 /* r10: current_thread_info */ ;\ 187 l.lwz r10,0(r30) 188#endif 189 190/* 191 * DSCR: this is a common hook for handling exceptions. it will save 192 * the needed registers, set up stack and pointer to current 193 * then jump to the handler while enabling MMU 194 * 195 * PRMS: handler - a function to jump to. it has to save the 196 * remaining registers to kernel stack, call 197 * appropriate arch-independant exception handler 198 * and finaly jump to ret_from_except 199 * 200 * PREQ: unchanged state from the time exception happened 201 * 202 * POST: SAVED the following registers original value 203 * to the new created exception frame pointed to by r1 204 * 205 * r1 - ksp pointing to the new (exception) frame 206 * r4 - EEAR exception EA 207 * r10 - current pointing to current_thread_info struct 208 * r12 - syscall 0, since we didn't come from syscall 209 * r30 - handler address of the handler we'll jump to 210 * 211 * handler has to save remaining registers to the exception 212 * ksp frame *before* tainting them! 213 * 214 * NOTE: this function is not reentrant per se. reentrancy is guaranteed 215 * by processor disabling all exceptions/interrupts when exception 216 * accours. 217 * 218 * OPTM: no need to make it so wasteful to extract ksp when in user mode 219 */ 220 221#define EXCEPTION_HANDLE(handler) \ 222 EXCEPTION_T_STORE_GPR30 ;\ 223 l.mfspr r30,r0,SPR_ESR_BASE ;\ 224 l.andi r30,r30,SPR_SR_SM ;\ 225 l.sfeqi r30,0 ;\ 226 EXCEPTION_T_STORE_GPR10 ;\ 227 l.bnf 2f /* kernel_mode */ ;\ 228 EXCEPTION_T_STORE_SP /* delay slot */ ;\ 2291: /* user_mode: */ ;\ 230 GET_CURRENT_THREAD_INFO ;\ 231 tophys (r30,r10) ;\ 232 l.lwz r1,(TI_KSP)(r30) ;\ 233 /* fall through */ ;\ 2342: /* kernel_mode: */ ;\ 235 /* create new stack frame, save only needed gprs */ ;\ 236 /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\ 237 /* r12: temp, syscall indicator */ ;\ 238 l.addi r1,r1,-(INT_FRAME_SIZE) ;\ 239 /* r1 is KSP, r30 is __pa(KSP) */ ;\ 240 tophys (r30,r1) ;\ 241 l.sw PT_GPR12(r30),r12 ;\ 242 /* r4 use for tmp before EA */ ;\ 243 l.mfspr r12,r0,SPR_EPCR_BASE ;\ 244 l.sw PT_PC(r30),r12 ;\ 245 l.mfspr r12,r0,SPR_ESR_BASE ;\ 246 l.sw PT_SR(r30),r12 ;\ 247 /* save r30 */ ;\ 248 EXCEPTION_T_LOAD_GPR30(r12) ;\ 249 l.sw PT_GPR30(r30),r12 ;\ 250 /* save r10 as was prior to exception */ ;\ 251 EXCEPTION_T_LOAD_GPR10(r12) ;\ 252 l.sw PT_GPR10(r30),r12 ;\ 253 /* save PT_SP as was prior to exception */ ;\ 254 EXCEPTION_T_LOAD_SP(r12) ;\ 255 l.sw PT_SP(r30),r12 ;\ 256 /* save exception r4, set r4 = EA */ ;\ 257 l.sw PT_GPR4(r30),r4 ;\ 258 l.mfspr r4,r0,SPR_EEAR_BASE ;\ 259 /* r12 == 1 if we come from syscall */ ;\ 260 CLEAR_GPR(r12) ;\ 261 /* ----- turn on MMU ----- */ ;\ 262 /* Carry DSX into exception SR */ ;\ 263 l.mfspr r30,r0,SPR_SR ;\ 264 l.andi r30,r30,SPR_SR_DSX ;\ 265 l.ori r30,r30,(EXCEPTION_SR) ;\ 266 l.mtspr r0,r30,SPR_ESR_BASE ;\ 267 /* r30: EA address of handler */ ;\ 268 LOAD_SYMBOL_2_GPR(r30,handler) ;\ 269 l.mtspr r0,r30,SPR_EPCR_BASE ;\ 270 l.rfe 271 272/* 273 * this doesn't work 274 * 275 * 276 * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION 277 * #define UNHANDLED_EXCEPTION(handler) \ 278 * l.ori r3,r0,0x1 ;\ 279 * l.mtspr r0,r3,SPR_SR ;\ 280 * l.movhi r3,hi(0xf0000100) ;\ 281 * l.ori r3,r3,lo(0xf0000100) ;\ 282 * l.jr r3 ;\ 283 * l.nop 1 284 * 285 * #endif 286 */ 287 288/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just 289 * a bit more carefull (if we have a PT_SP or current pointer 290 * corruption) and set them up from 'current_set' 291 * 292 */ 293#define UNHANDLED_EXCEPTION(handler) \ 294 EXCEPTION_T_STORE_GPR30 ;\ 295 EXCEPTION_T_STORE_GPR10 ;\ 296 EXCEPTION_T_STORE_SP ;\ 297 /* temporary store r3, r9 into r1, r10 */ ;\ 298 l.addi r1,r3,0x0 ;\ 299 l.addi r10,r9,0x0 ;\ 300 /* the string referenced by r3 must be low enough */ ;\ 301 l.jal _emergency_print ;\ 302 l.ori r3,r0,lo(_string_unhandled_exception) ;\ 303 l.mfspr r3,r0,SPR_NPC ;\ 304 l.jal _emergency_print_nr ;\ 305 l.andi r3,r3,0x1f00 ;\ 306 /* the string referenced by r3 must be low enough */ ;\ 307 l.jal _emergency_print ;\ 308 l.ori r3,r0,lo(_string_epc_prefix) ;\ 309 l.jal _emergency_print_nr ;\ 310 l.mfspr r3,r0,SPR_EPCR_BASE ;\ 311 l.jal _emergency_print ;\ 312 l.ori r3,r0,lo(_string_nl) ;\ 313 /* end of printing */ ;\ 314 l.addi r3,r1,0x0 ;\ 315 l.addi r9,r10,0x0 ;\ 316 /* extract current, ksp from current_set */ ;\ 317 LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\ 318 LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\ 319 /* create new stack frame, save only needed gprs */ ;\ 320 /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ 321 /* r12: temp, syscall indicator, r13 temp */ ;\ 322 l.addi r1,r1,-(INT_FRAME_SIZE) ;\ 323 /* r1 is KSP, r30 is __pa(KSP) */ ;\ 324 tophys (r30,r1) ;\ 325 l.sw PT_GPR12(r30),r12 ;\ 326 l.mfspr r12,r0,SPR_EPCR_BASE ;\ 327 l.sw PT_PC(r30),r12 ;\ 328 l.mfspr r12,r0,SPR_ESR_BASE ;\ 329 l.sw PT_SR(r30),r12 ;\ 330 /* save r31 */ ;\ 331 EXCEPTION_T_LOAD_GPR30(r12) ;\ 332 l.sw PT_GPR30(r30),r12 ;\ 333 /* save r10 as was prior to exception */ ;\ 334 EXCEPTION_T_LOAD_GPR10(r12) ;\ 335 l.sw PT_GPR10(r30),r12 ;\ 336 /* save PT_SP as was prior to exception */ ;\ 337 EXCEPTION_T_LOAD_SP(r12) ;\ 338 l.sw PT_SP(r30),r12 ;\ 339 l.sw PT_GPR13(r30),r13 ;\ 340 /* --> */ ;\ 341 /* save exception r4, set r4 = EA */ ;\ 342 l.sw PT_GPR4(r30),r4 ;\ 343 l.mfspr r4,r0,SPR_EEAR_BASE ;\ 344 /* r12 == 1 if we come from syscall */ ;\ 345 CLEAR_GPR(r12) ;\ 346 /* ----- play a MMU trick ----- */ ;\ 347 l.ori r30,r0,(EXCEPTION_SR) ;\ 348 l.mtspr r0,r30,SPR_ESR_BASE ;\ 349 /* r31: EA address of handler */ ;\ 350 LOAD_SYMBOL_2_GPR(r30,handler) ;\ 351 l.mtspr r0,r30,SPR_EPCR_BASE ;\ 352 l.rfe 353 354/* =====================================================[ exceptions] === */ 355 356/* ---[ 0x100: RESET exception ]----------------------------------------- */ 357 .org 0x100 358 /* Jump to .init code at _start which lives in the .head section 359 * and will be discarded after boot. 360 */ 361 LOAD_SYMBOL_2_GPR(r15, _start) 362 tophys (r13,r15) /* MMU disabled */ 363 l.jr r13 364 l.nop 365 366/* ---[ 0x200: BUS exception ]------------------------------------------- */ 367 .org 0x200 368_dispatch_bus_fault: 369 EXCEPTION_HANDLE(_bus_fault_handler) 370 371/* ---[ 0x300: Data Page Fault exception ]------------------------------- */ 372 .org 0x300 373_dispatch_do_dpage_fault: 374// totaly disable timer interrupt 375// l.mtspr r0,r0,SPR_TTMR 376// DEBUG_TLB_PROBE(0x300) 377// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300) 378 EXCEPTION_HANDLE(_data_page_fault_handler) 379 380/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ 381 .org 0x400 382_dispatch_do_ipage_fault: 383// totaly disable timer interrupt 384// l.mtspr r0,r0,SPR_TTMR 385// DEBUG_TLB_PROBE(0x400) 386// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400) 387 EXCEPTION_HANDLE(_insn_page_fault_handler) 388 389/* ---[ 0x500: Timer exception ]----------------------------------------- */ 390 .org 0x500 391 EXCEPTION_HANDLE(_timer_handler) 392 393/* ---[ 0x600: Alignment exception ]-------------------------------------- */ 394 .org 0x600 395 EXCEPTION_HANDLE(_alignment_handler) 396 397/* ---[ 0x700: Illegal insn exception ]---------------------------------- */ 398 .org 0x700 399 EXCEPTION_HANDLE(_illegal_instruction_handler) 400 401/* ---[ 0x800: External interrupt exception ]---------------------------- */ 402 .org 0x800 403 EXCEPTION_HANDLE(_external_irq_handler) 404 405/* ---[ 0x900: DTLB miss exception ]------------------------------------- */ 406 .org 0x900 407 l.j boot_dtlb_miss_handler 408 l.nop 409 410/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ 411 .org 0xa00 412 l.j boot_itlb_miss_handler 413 l.nop 414 415/* ---[ 0xb00: Range exception ]----------------------------------------- */ 416 .org 0xb00 417 UNHANDLED_EXCEPTION(_vector_0xb00) 418 419/* ---[ 0xc00: Syscall exception ]--------------------------------------- */ 420 .org 0xc00 421 EXCEPTION_HANDLE(_sys_call_handler) 422 423/* ---[ 0xd00: Trap exception ]------------------------------------------ */ 424 .org 0xd00 425 UNHANDLED_EXCEPTION(_vector_0xd00) 426 427/* ---[ 0xe00: Trap exception ]------------------------------------------ */ 428 .org 0xe00 429// UNHANDLED_EXCEPTION(_vector_0xe00) 430 EXCEPTION_HANDLE(_trap_handler) 431 432/* ---[ 0xf00: Reserved exception ]-------------------------------------- */ 433 .org 0xf00 434 UNHANDLED_EXCEPTION(_vector_0xf00) 435 436/* ---[ 0x1000: Reserved exception ]------------------------------------- */ 437 .org 0x1000 438 UNHANDLED_EXCEPTION(_vector_0x1000) 439 440/* ---[ 0x1100: Reserved exception ]------------------------------------- */ 441 .org 0x1100 442 UNHANDLED_EXCEPTION(_vector_0x1100) 443 444/* ---[ 0x1200: Reserved exception ]------------------------------------- */ 445 .org 0x1200 446 UNHANDLED_EXCEPTION(_vector_0x1200) 447 448/* ---[ 0x1300: Reserved exception ]------------------------------------- */ 449 .org 0x1300 450 UNHANDLED_EXCEPTION(_vector_0x1300) 451 452/* ---[ 0x1400: Reserved exception ]------------------------------------- */ 453 .org 0x1400 454 UNHANDLED_EXCEPTION(_vector_0x1400) 455 456/* ---[ 0x1500: Reserved exception ]------------------------------------- */ 457 .org 0x1500 458 UNHANDLED_EXCEPTION(_vector_0x1500) 459 460/* ---[ 0x1600: Reserved exception ]------------------------------------- */ 461 .org 0x1600 462 UNHANDLED_EXCEPTION(_vector_0x1600) 463 464/* ---[ 0x1700: Reserved exception ]------------------------------------- */ 465 .org 0x1700 466 UNHANDLED_EXCEPTION(_vector_0x1700) 467 468/* ---[ 0x1800: Reserved exception ]------------------------------------- */ 469 .org 0x1800 470 UNHANDLED_EXCEPTION(_vector_0x1800) 471 472/* ---[ 0x1900: Reserved exception ]------------------------------------- */ 473 .org 0x1900 474 UNHANDLED_EXCEPTION(_vector_0x1900) 475 476/* ---[ 0x1a00: Reserved exception ]------------------------------------- */ 477 .org 0x1a00 478 UNHANDLED_EXCEPTION(_vector_0x1a00) 479 480/* ---[ 0x1b00: Reserved exception ]------------------------------------- */ 481 .org 0x1b00 482 UNHANDLED_EXCEPTION(_vector_0x1b00) 483 484/* ---[ 0x1c00: Reserved exception ]------------------------------------- */ 485 .org 0x1c00 486 UNHANDLED_EXCEPTION(_vector_0x1c00) 487 488/* ---[ 0x1d00: Reserved exception ]------------------------------------- */ 489 .org 0x1d00 490 UNHANDLED_EXCEPTION(_vector_0x1d00) 491 492/* ---[ 0x1e00: Reserved exception ]------------------------------------- */ 493 .org 0x1e00 494 UNHANDLED_EXCEPTION(_vector_0x1e00) 495 496/* ---[ 0x1f00: Reserved exception ]------------------------------------- */ 497 .org 0x1f00 498 UNHANDLED_EXCEPTION(_vector_0x1f00) 499 500 .org 0x2000 501/* ===================================================[ kernel start ]=== */ 502 503/* .text*/ 504 505/* This early stuff belongs in HEAD, but some of the functions below definitely 506 * don't... */ 507 508 __HEAD 509 .global _start 510_start: 511 /* Init r0 to zero as per spec */ 512 CLEAR_GPR(r0) 513 514 /* save kernel parameters */ 515 l.or r25,r0,r3 /* pointer to fdt */ 516 517 /* 518 * ensure a deterministic start 519 */ 520 521 l.ori r3,r0,0x1 522 l.mtspr r0,r3,SPR_SR 523 524 /* 525 * Start the TTCR as early as possible, so that the RNG can make use of 526 * measurements of boot time from the earliest opportunity. Especially 527 * important is that the TTCR does not return zero by the time we reach 528 * rand_initialize(). 529 */ 530 l.movhi r3,hi(SPR_TTMR_CR) 531 l.mtspr r0,r3,SPR_TTMR 532 533 CLEAR_GPR(r1) 534 CLEAR_GPR(r2) 535 CLEAR_GPR(r3) 536 CLEAR_GPR(r4) 537 CLEAR_GPR(r5) 538 CLEAR_GPR(r6) 539 CLEAR_GPR(r7) 540 CLEAR_GPR(r8) 541 CLEAR_GPR(r9) 542 CLEAR_GPR(r10) 543 CLEAR_GPR(r11) 544 CLEAR_GPR(r12) 545 CLEAR_GPR(r13) 546 CLEAR_GPR(r14) 547 CLEAR_GPR(r15) 548 CLEAR_GPR(r16) 549 CLEAR_GPR(r17) 550 CLEAR_GPR(r18) 551 CLEAR_GPR(r19) 552 CLEAR_GPR(r20) 553 CLEAR_GPR(r21) 554 CLEAR_GPR(r22) 555 CLEAR_GPR(r23) 556 CLEAR_GPR(r24) 557 CLEAR_GPR(r26) 558 CLEAR_GPR(r27) 559 CLEAR_GPR(r28) 560 CLEAR_GPR(r29) 561 CLEAR_GPR(r30) 562 CLEAR_GPR(r31) 563 564#ifdef CONFIG_SMP 565 l.mfspr r26,r0,SPR_COREID 566 l.sfeq r26,r0 567 l.bnf secondary_wait 568 l.nop 569#endif 570 /* 571 * set up initial ksp and current 572 */ 573 /* setup kernel stack */ 574 LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE) 575 LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current 576 tophys (r31,r10) 577 l.sw TI_KSP(r31), r1 578 579 l.ori r4,r0,0x0 580 581 582 /* 583 * .data contains initialized data, 584 * .bss contains uninitialized data - clear it up 585 */ 586clear_bss: 587 LOAD_SYMBOL_2_GPR(r24, __bss_start) 588 LOAD_SYMBOL_2_GPR(r26, _end) 589 tophys(r28,r24) 590 tophys(r30,r26) 591 CLEAR_GPR(r24) 592 CLEAR_GPR(r26) 5931: 594 l.sw (0)(r28),r0 595 l.sfltu r28,r30 596 l.bf 1b 597 l.addi r28,r28,4 598 599enable_ic: 600 l.jal _ic_enable 601 l.nop 602 603enable_dc: 604 l.jal _dc_enable 605 l.nop 606 607flush_tlb: 608 l.jal _flush_tlb 609 l.nop 610 611/* The MMU needs to be enabled before or1k_early_setup is called */ 612 613enable_mmu: 614 /* 615 * enable dmmu & immu 616 * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0 617 */ 618 l.mfspr r30,r0,SPR_SR 619 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) 620 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) 621 l.or r30,r30,r28 622 l.mtspr r0,r30,SPR_SR 623 l.nop 624 l.nop 625 l.nop 626 l.nop 627 l.nop 628 l.nop 629 l.nop 630 l.nop 631 l.nop 632 l.nop 633 l.nop 634 l.nop 635 l.nop 636 l.nop 637 l.nop 638 l.nop 639 640 // reset the simulation counters 641 l.nop 5 642 643 /* check fdt header magic word */ 644 l.lwz r3,0(r25) /* load magic from fdt into r3 */ 645 l.movhi r4,hi(OF_DT_HEADER) 646 l.ori r4,r4,lo(OF_DT_HEADER) 647 l.sfeq r3,r4 648 l.bf _fdt_found 649 l.nop 650 /* magic number mismatch, set fdt pointer to null */ 651 l.or r25,r0,r0 652_fdt_found: 653 /* pass fdt pointer to or1k_early_setup in r3 */ 654 l.or r3,r0,r25 655 LOAD_SYMBOL_2_GPR(r24, or1k_early_setup) 656 l.jalr r24 657 l.nop 658 659clear_regs: 660 /* 661 * clear all GPRS to increase determinism 662 */ 663 CLEAR_GPR(r2) 664 CLEAR_GPR(r3) 665 CLEAR_GPR(r4) 666 CLEAR_GPR(r5) 667 CLEAR_GPR(r6) 668 CLEAR_GPR(r7) 669 CLEAR_GPR(r8) 670 CLEAR_GPR(r9) 671 CLEAR_GPR(r11) 672 CLEAR_GPR(r12) 673 CLEAR_GPR(r13) 674 CLEAR_GPR(r14) 675 CLEAR_GPR(r15) 676 CLEAR_GPR(r16) 677 CLEAR_GPR(r17) 678 CLEAR_GPR(r18) 679 CLEAR_GPR(r19) 680 CLEAR_GPR(r20) 681 CLEAR_GPR(r21) 682 CLEAR_GPR(r22) 683 CLEAR_GPR(r23) 684 CLEAR_GPR(r24) 685 CLEAR_GPR(r25) 686 CLEAR_GPR(r26) 687 CLEAR_GPR(r27) 688 CLEAR_GPR(r28) 689 CLEAR_GPR(r29) 690 CLEAR_GPR(r30) 691 CLEAR_GPR(r31) 692 693jump_start_kernel: 694 /* 695 * jump to kernel entry (start_kernel) 696 */ 697 LOAD_SYMBOL_2_GPR(r30, start_kernel) 698 l.jr r30 699 l.nop 700 701_flush_tlb: 702 /* 703 * I N V A L I D A T E T L B e n t r i e s 704 */ 705 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0)) 706 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0)) 707 l.addi r7,r0,128 /* Maximum number of sets */ 7081: 709 l.mtspr r5,r0,0x0 710 l.mtspr r6,r0,0x0 711 712 l.addi r5,r5,1 713 l.addi r6,r6,1 714 l.sfeq r7,r0 715 l.bnf 1b 716 l.addi r7,r7,-1 717 718 l.jr r9 719 l.nop 720 721#ifdef CONFIG_SMP 722secondary_wait: 723 /* Doze the cpu until we are asked to run */ 724 /* If we dont have power management skip doze */ 725 l.mfspr r25,r0,SPR_UPR 726 l.andi r25,r25,SPR_UPR_PMP 727 l.sfeq r25,r0 728 l.bf secondary_check_release 729 l.nop 730 731 /* Setup special secondary exception handler */ 732 LOAD_SYMBOL_2_GPR(r3, _secondary_evbar) 733 tophys(r25,r3) 734 l.mtspr r0,r25,SPR_EVBAR 735 736 /* Enable Interrupts */ 737 l.mfspr r25,r0,SPR_SR 738 l.ori r25,r25,SPR_SR_IEE 739 l.mtspr r0,r25,SPR_SR 740 741 /* Unmask interrupts interrupts */ 742 l.mfspr r25,r0,SPR_PICMR 743 l.ori r25,r25,0xffff 744 l.mtspr r0,r25,SPR_PICMR 745 746 /* Doze */ 747 l.mfspr r25,r0,SPR_PMR 748 LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME) 749 l.or r25,r25,r3 750 l.mtspr r0,r25,SPR_PMR 751 752 /* Wakeup - Restore exception handler */ 753 l.mtspr r0,r0,SPR_EVBAR 754 755secondary_check_release: 756 /* 757 * Check if we actually got the release signal, if not go-back to 758 * sleep. 759 */ 760 l.mfspr r25,r0,SPR_COREID 761 LOAD_SYMBOL_2_GPR(r3, secondary_release) 762 tophys(r4, r3) 763 l.lwz r3,0(r4) 764 l.sfeq r25,r3 765 l.bnf secondary_wait 766 l.nop 767 /* fall through to secondary_init */ 768 769secondary_init: 770 /* 771 * set up initial ksp and current 772 */ 773 LOAD_SYMBOL_2_GPR(r10, secondary_thread_info) 774 tophys (r30,r10) 775 l.lwz r10,0(r30) 776 l.addi r1,r10,THREAD_SIZE 777 tophys (r30,r10) 778 l.sw TI_KSP(r30),r1 779 780 l.jal _ic_enable 781 l.nop 782 783 l.jal _dc_enable 784 l.nop 785 786 l.jal _flush_tlb 787 l.nop 788 789 /* 790 * enable dmmu & immu 791 */ 792 l.mfspr r30,r0,SPR_SR 793 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) 794 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) 795 l.or r30,r30,r28 796 /* 797 * This is a bit tricky, we need to switch over from physical addresses 798 * to virtual addresses on the fly. 799 * To do that, we first set up ESR with the IME and DME bits set. 800 * Then EPCR is set to secondary_start and then a l.rfe is issued to 801 * "jump" to that. 802 */ 803 l.mtspr r0,r30,SPR_ESR_BASE 804 LOAD_SYMBOL_2_GPR(r30, secondary_start) 805 l.mtspr r0,r30,SPR_EPCR_BASE 806 l.rfe 807 808secondary_start: 809 LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel) 810 l.jr r30 811 l.nop 812 813#endif 814 815/* ========================================[ cache ]=== */ 816 817 /* alignment here so we don't change memory offsets with 818 * memory controller defined 819 */ 820 .align 0x2000 821 822_ic_enable: 823 /* Check if IC present and skip enabling otherwise */ 824 l.mfspr r24,r0,SPR_UPR 825 l.andi r26,r24,SPR_UPR_ICP 826 l.sfeq r26,r0 827 l.bf 9f 828 l.nop 829 830 /* Disable IC */ 831 l.mfspr r6,r0,SPR_SR 832 l.addi r5,r0,-1 833 l.xori r5,r5,SPR_SR_ICE 834 l.and r5,r6,r5 835 l.mtspr r0,r5,SPR_SR 836 837 /* Establish cache block size 838 If BS=0, 16; 839 If BS=1, 32; 840 r14 contain block size 841 */ 842 l.mfspr r24,r0,SPR_ICCFGR 843 l.andi r26,r24,SPR_ICCFGR_CBS 844 l.srli r28,r26,7 845 l.ori r30,r0,16 846 l.sll r14,r30,r28 847 848 /* Establish number of cache sets 849 r16 contains number of cache sets 850 r28 contains log(# of cache sets) 851 */ 852 l.andi r26,r24,SPR_ICCFGR_NCS 853 l.srli r28,r26,3 854 l.ori r30,r0,1 855 l.sll r16,r30,r28 856 857 /* Invalidate IC */ 858 l.addi r6,r0,0 859 l.sll r5,r14,r28 860// l.mul r5,r14,r16 861// l.trap 1 862// l.addi r5,r0,IC_SIZE 8631: 864 l.mtspr r0,r6,SPR_ICBIR 865 l.sfne r6,r5 866 l.bf 1b 867 l.add r6,r6,r14 868 // l.addi r6,r6,IC_LINE 869 870 /* Enable IC */ 871 l.mfspr r6,r0,SPR_SR 872 l.ori r6,r6,SPR_SR_ICE 873 l.mtspr r0,r6,SPR_SR 874 l.nop 875 l.nop 876 l.nop 877 l.nop 878 l.nop 879 l.nop 880 l.nop 881 l.nop 882 l.nop 883 l.nop 8849: 885 l.jr r9 886 l.nop 887 888_dc_enable: 889 /* Check if DC present and skip enabling otherwise */ 890 l.mfspr r24,r0,SPR_UPR 891 l.andi r26,r24,SPR_UPR_DCP 892 l.sfeq r26,r0 893 l.bf 9f 894 l.nop 895 896 /* Disable DC */ 897 l.mfspr r6,r0,SPR_SR 898 l.addi r5,r0,-1 899 l.xori r5,r5,SPR_SR_DCE 900 l.and r5,r6,r5 901 l.mtspr r0,r5,SPR_SR 902 903 /* Establish cache block size 904 If BS=0, 16; 905 If BS=1, 32; 906 r14 contain block size 907 */ 908 l.mfspr r24,r0,SPR_DCCFGR 909 l.andi r26,r24,SPR_DCCFGR_CBS 910 l.srli r28,r26,7 911 l.ori r30,r0,16 912 l.sll r14,r30,r28 913 914 /* Establish number of cache sets 915 r16 contains number of cache sets 916 r28 contains log(# of cache sets) 917 */ 918 l.andi r26,r24,SPR_DCCFGR_NCS 919 l.srli r28,r26,3 920 l.ori r30,r0,1 921 l.sll r16,r30,r28 922 923 /* Invalidate DC */ 924 l.addi r6,r0,0 925 l.sll r5,r14,r28 9261: 927 l.mtspr r0,r6,SPR_DCBIR 928 l.sfne r6,r5 929 l.bf 1b 930 l.add r6,r6,r14 931 932 /* Enable DC */ 933 l.mfspr r6,r0,SPR_SR 934 l.ori r6,r6,SPR_SR_DCE 935 l.mtspr r0,r6,SPR_SR 9369: 937 l.jr r9 938 l.nop 939 940/* ===============================================[ page table masks ]=== */ 941 942#define DTLB_UP_CONVERT_MASK 0x3fa 943#define ITLB_UP_CONVERT_MASK 0x3a 944 945/* for SMP we'd have (this is a bit subtle, CC must be always set 946 * for SMP, but since we have _PAGE_PRESENT bit always defined 947 * we can just modify the mask) 948 */ 949#define DTLB_SMP_CONVERT_MASK 0x3fb 950#define ITLB_SMP_CONVERT_MASK 0x3b 951 952/* ---[ boot dtlb miss handler ]----------------------------------------- */ 953 954boot_dtlb_miss_handler: 955 956/* mask for DTLB_MR register: - (0) sets V (valid) bit, 957 * - (31-12) sets bits belonging to VPN (31-12) 958 */ 959#define DTLB_MR_MASK 0xfffff001 960 961/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit, 962 * - (4) sets A (access) bit, 963 * - (5) sets D (dirty) bit, 964 * - (8) sets SRE (superuser read) bit 965 * - (9) sets SWE (superuser write) bit 966 * - (31-12) sets bits belonging to VPN (31-12) 967 */ 968#define DTLB_TR_MASK 0xfffff332 969 970/* These are for masking out the VPN/PPN value from the MR/TR registers... 971 * it's not the same as the PFN */ 972#define VPN_MASK 0xfffff000 973#define PPN_MASK 0xfffff000 974 975 976 EXCEPTION_STORE_GPR6 977 978#if 0 979 l.mfspr r6,r0,SPR_ESR_BASE // 980 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? 981 l.sfeqi r6,0 // r6 == 0x1 --> SM 982 l.bf exit_with_no_dtranslation // 983 l.nop 984#endif 985 986 /* this could be optimized by moving storing of 987 * non r6 registers here, and jumping r6 restore 988 * if not in supervisor mode 989 */ 990 991 EXCEPTION_STORE_GPR2 992 EXCEPTION_STORE_GPR3 993 EXCEPTION_STORE_GPR4 994 EXCEPTION_STORE_GPR5 995 996 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA 997 998immediate_translation: 999 CLEAR_GPR(r6) 1000 1001 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) 1002 1003 l.mfspr r6, r0, SPR_DMMUCFGR 1004 l.andi r6, r6, SPR_DMMUCFGR_NTS 1005 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF 1006 l.ori r5, r0, 0x1 1007 l.sll r5, r5, r6 // r5 = number DMMU sets 1008 l.addi r6, r5, -1 // r6 = nsets mask 1009 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK 1010 1011 l.or r6,r6,r4 // r6 <- r4 1012 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff 1013 l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000 1014 l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK 1015 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry 1016 l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR 1017 1018 /* set up DTLB with no translation for EA <= 0xbfffffff */ 1019 LOAD_SYMBOL_2_GPR(r6,0xbfffffff) 1020 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA) 1021 l.bf 1f // goto out 1022 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) 1023 1024 tophys(r3,r4) // r3 <- PA 10251: 1026 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff 1027 l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000 1028 l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK 1029 l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry 1030 l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR 1031 1032 EXCEPTION_LOAD_GPR6 1033 EXCEPTION_LOAD_GPR5 1034 EXCEPTION_LOAD_GPR4 1035 EXCEPTION_LOAD_GPR3 1036 EXCEPTION_LOAD_GPR2 1037 1038 l.rfe // SR <- ESR, PC <- EPC 1039 1040exit_with_no_dtranslation: 1041 /* EA out of memory or not in supervisor mode */ 1042 EXCEPTION_LOAD_GPR6 1043 EXCEPTION_LOAD_GPR4 1044 l.j _dispatch_bus_fault 1045 1046/* ---[ boot itlb miss handler ]----------------------------------------- */ 1047 1048boot_itlb_miss_handler: 1049 1050/* mask for ITLB_MR register: - sets V (valid) bit, 1051 * - sets bits belonging to VPN (15-12) 1052 */ 1053#define ITLB_MR_MASK 0xfffff001 1054 1055/* mask for ITLB_TR register: - sets A (access) bit, 1056 * - sets SXE (superuser execute) bit 1057 * - sets bits belonging to VPN (15-12) 1058 */ 1059#define ITLB_TR_MASK 0xfffff050 1060 1061/* 1062#define VPN_MASK 0xffffe000 1063#define PPN_MASK 0xffffe000 1064*/ 1065 1066 1067 1068 EXCEPTION_STORE_GPR2 1069 EXCEPTION_STORE_GPR3 1070 EXCEPTION_STORE_GPR4 1071 EXCEPTION_STORE_GPR5 1072 EXCEPTION_STORE_GPR6 1073 1074#if 0 1075 l.mfspr r6,r0,SPR_ESR_BASE // 1076 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? 1077 l.sfeqi r6,0 // r6 == 0x1 --> SM 1078 l.bf exit_with_no_itranslation 1079 l.nop 1080#endif 1081 1082 1083 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA 1084 1085earlyearly: 1086 CLEAR_GPR(r6) 1087 1088 l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) 1089 1090 l.mfspr r6, r0, SPR_IMMUCFGR 1091 l.andi r6, r6, SPR_IMMUCFGR_NTS 1092 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF 1093 l.ori r5, r0, 0x1 1094 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR 1095 l.addi r6, r5, -1 // r6 = nsets mask 1096 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK 1097 1098 l.or r6,r6,r4 // r6 <- r4 1099 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff 1100 l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000 1101 l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK 1102 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry 1103 l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR 1104 1105 /* 1106 * set up ITLB with no translation for EA <= 0x0fffffff 1107 * 1108 * we need this for head.S mapping (EA = PA). if we move all functions 1109 * which run with mmu enabled into entry.S, we might be able to eliminate this. 1110 * 1111 */ 1112 LOAD_SYMBOL_2_GPR(r6,0x0fffffff) 1113 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA) 1114 l.bf 1f // goto out 1115 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) 1116 1117 tophys(r3,r4) // r3 <- PA 11181: 1119 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff 1120 l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000 1121 l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK 1122 l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry 1123 l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR 1124 1125 EXCEPTION_LOAD_GPR6 1126 EXCEPTION_LOAD_GPR5 1127 EXCEPTION_LOAD_GPR4 1128 EXCEPTION_LOAD_GPR3 1129 EXCEPTION_LOAD_GPR2 1130 1131 l.rfe // SR <- ESR, PC <- EPC 1132 1133exit_with_no_itranslation: 1134 EXCEPTION_LOAD_GPR4 1135 EXCEPTION_LOAD_GPR6 1136 l.j _dispatch_bus_fault 1137 l.nop 1138 1139/* ====================================================================== */ 1140/* 1141 * Stuff below here shouldn't go into .head section... maybe this stuff 1142 * can be moved to entry.S ??? 1143 */ 1144 1145/* ==============================================[ DTLB miss handler ]=== */ 1146 1147/* 1148 * Comments: 1149 * Exception handlers are entered with MMU off so the following handler 1150 * needs to use physical addressing 1151 * 1152 */ 1153 1154 .text 1155ENTRY(dtlb_miss_handler) 1156 EXCEPTION_STORE_GPR2 1157 EXCEPTION_STORE_GPR3 1158 EXCEPTION_STORE_GPR4 1159 /* 1160 * get EA of the miss 1161 */ 1162 l.mfspr r2,r0,SPR_EEAR_BASE 1163 /* 1164 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); 1165 */ 1166 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp 1167 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) 1168 l.slli r4,r4,0x2 // to get address << 2 1169 l.add r3,r4,r3 // r4 is pgd_index(daddr) 1170 /* 1171 * if (pmd_none(*pmd)) 1172 * goto pmd_none: 1173 */ 1174 tophys (r4,r3) 1175 l.lwz r3,0x0(r4) // get *pmd value 1176 l.sfne r3,r0 1177 l.bnf d_pmd_none 1178 l.addi r3,r0,0xffffe000 // PAGE_MASK 1179 1180d_pmd_good: 1181 /* 1182 * pte = *pte_offset(pmd, daddr); 1183 */ 1184 l.lwz r4,0x0(r4) // get **pmd value 1185 l.and r4,r4,r3 // & PAGE_MASK 1186 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR 1187 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 1188 l.slli r3,r3,0x2 // to get address << 2 1189 l.add r3,r3,r4 1190 l.lwz r3,0x0(r3) // this is pte at last 1191 /* 1192 * if (!pte_present(pte)) 1193 */ 1194 l.andi r4,r3,0x1 1195 l.sfne r4,r0 // is pte present 1196 l.bnf d_pte_not_present 1197 l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK 1198 /* 1199 * fill DTLB TR register 1200 */ 1201 l.and r4,r3,r4 // apply the mask 1202 // Determine number of DMMU sets 1203 l.mfspr r2, r0, SPR_DMMUCFGR 1204 l.andi r2, r2, SPR_DMMUCFGR_NTS 1205 l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF 1206 l.ori r3, r0, 0x1 1207 l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR 1208 l.addi r2, r3, -1 // r2 = nsets mask 1209 l.mfspr r3, r0, SPR_EEAR_BASE 1210 l.srli r3, r3, 0xd // >> PAGE_SHIFT 1211 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) 1212 //NUM_TLB_ENTRIES 1213 l.mtspr r2,r4,SPR_DTLBTR_BASE(0) 1214 /* 1215 * fill DTLB MR register 1216 */ 1217 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ 1218 l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry 1219 l.mtspr r2,r4,SPR_DTLBMR_BASE(0) 1220 1221 EXCEPTION_LOAD_GPR2 1222 EXCEPTION_LOAD_GPR3 1223 EXCEPTION_LOAD_GPR4 1224 l.rfe 1225d_pmd_none: 1226d_pte_not_present: 1227 EXCEPTION_LOAD_GPR2 1228 EXCEPTION_LOAD_GPR3 1229 EXCEPTION_LOAD_GPR4 1230 EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler) 1231 1232/* ==============================================[ ITLB miss handler ]=== */ 1233ENTRY(itlb_miss_handler) 1234 EXCEPTION_STORE_GPR2 1235 EXCEPTION_STORE_GPR3 1236 EXCEPTION_STORE_GPR4 1237 /* 1238 * get EA of the miss 1239 */ 1240 l.mfspr r2,r0,SPR_EEAR_BASE 1241 1242 /* 1243 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); 1244 * 1245 */ 1246 GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp 1247 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) 1248 l.slli r4,r4,0x2 // to get address << 2 1249 l.add r3,r4,r3 // r4 is pgd_index(daddr) 1250 /* 1251 * if (pmd_none(*pmd)) 1252 * goto pmd_none: 1253 */ 1254 tophys (r4,r3) 1255 l.lwz r3,0x0(r4) // get *pmd value 1256 l.sfne r3,r0 1257 l.bnf i_pmd_none 1258 l.addi r3,r0,0xffffe000 // PAGE_MASK 1259 1260i_pmd_good: 1261 /* 1262 * pte = *pte_offset(pmd, iaddr); 1263 * 1264 */ 1265 l.lwz r4,0x0(r4) // get **pmd value 1266 l.and r4,r4,r3 // & PAGE_MASK 1267 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR 1268 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 1269 l.slli r3,r3,0x2 // to get address << 2 1270 l.add r3,r3,r4 1271 l.lwz r3,0x0(r3) // this is pte at last 1272 /* 1273 * if (!pte_present(pte)) 1274 * 1275 */ 1276 l.andi r4,r3,0x1 1277 l.sfne r4,r0 // is pte present 1278 l.bnf i_pte_not_present 1279 l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK 1280 /* 1281 * fill ITLB TR register 1282 */ 1283 l.and r4,r3,r4 // apply the mask 1284 l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE 1285 l.sfeq r3,r0 1286 l.bf itlb_tr_fill //_workaround 1287 // Determine number of IMMU sets 1288 l.mfspr r2, r0, SPR_IMMUCFGR 1289 l.andi r2, r2, SPR_IMMUCFGR_NTS 1290 l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF 1291 l.ori r3, r0, 0x1 1292 l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR 1293 l.addi r2, r3, -1 // r2 = nsets mask 1294 l.mfspr r3, r0, SPR_EEAR_BASE 1295 l.srli r3, r3, 0xd // >> PAGE_SHIFT 1296 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) 1297 1298/* 1299 * __PHX__ :: fixme 1300 * we should not just blindly set executable flags, 1301 * but it does help with ping. the clean way would be to find out 1302 * (and fix it) why stack doesn't have execution permissions 1303 */ 1304 1305itlb_tr_fill_workaround: 1306 l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE) 1307itlb_tr_fill: 1308 l.mtspr r2,r4,SPR_ITLBTR_BASE(0) 1309 /* 1310 * fill DTLB MR register 1311 */ 1312 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ 1313 l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry 1314 l.mtspr r2,r4,SPR_ITLBMR_BASE(0) 1315 1316 EXCEPTION_LOAD_GPR2 1317 EXCEPTION_LOAD_GPR3 1318 EXCEPTION_LOAD_GPR4 1319 l.rfe 1320 1321i_pmd_none: 1322i_pte_not_present: 1323 EXCEPTION_LOAD_GPR2 1324 EXCEPTION_LOAD_GPR3 1325 EXCEPTION_LOAD_GPR4 1326 EXCEPTION_HANDLE(_itlb_miss_page_fault_handler) 1327 1328/* ==============================================[ boot tlb handlers ]=== */ 1329 1330 1331/* =================================================[ debugging aids ]=== */ 1332 1333 .align 64 1334_immu_trampoline: 1335 .space 64 1336_immu_trampoline_top: 1337 1338#define TRAMP_SLOT_0 (0x0) 1339#define TRAMP_SLOT_1 (0x4) 1340#define TRAMP_SLOT_2 (0x8) 1341#define TRAMP_SLOT_3 (0xc) 1342#define TRAMP_SLOT_4 (0x10) 1343#define TRAMP_SLOT_5 (0x14) 1344#define TRAMP_FRAME_SIZE (0x18) 1345 1346ENTRY(_immu_trampoline_workaround) 1347 // r2 EEA 1348 // r6 is physical EEA 1349 tophys(r6,r2) 1350 1351 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) 1352 tophys (r3,r5) // r3 is trampoline (physical) 1353 1354 LOAD_SYMBOL_2_GPR(r4,0x15000000) 1355 l.sw TRAMP_SLOT_0(r3),r4 1356 l.sw TRAMP_SLOT_1(r3),r4 1357 l.sw TRAMP_SLOT_4(r3),r4 1358 l.sw TRAMP_SLOT_5(r3),r4 1359 1360 // EPC = EEA - 0x4 1361 l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address) 1362 l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data 1363 l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address) 1364 l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data 1365 1366 l.srli r5,r4,26 // check opcode for write access 1367 l.sfeqi r5,0 // l.j 1368 l.bf 0f 1369 l.sfeqi r5,0x11 // l.jr 1370 l.bf 1f 1371 l.sfeqi r5,1 // l.jal 1372 l.bf 2f 1373 l.sfeqi r5,0x12 // l.jalr 1374 l.bf 3f 1375 l.sfeqi r5,3 // l.bnf 1376 l.bf 4f 1377 l.sfeqi r5,4 // l.bf 1378 l.bf 5f 137999: 1380 l.nop 1381 l.j 99b // should never happen 1382 l.nop 1 1383 1384 // r2 is EEA 1385 // r3 is trampoline address (physical) 1386 // r4 is instruction 1387 // r6 is physical(EEA) 1388 // 1389 // r5 1390 13912: // l.jal 1392 1393 /* 19 20 aa aa l.movhi r9,0xaaaa 1394 * a9 29 bb bb l.ori r9,0xbbbb 1395 * 1396 * where 0xaaaabbbb is EEA + 0x4 shifted right 2 1397 */ 1398 1399 l.addi r6,r2,0x4 // this is 0xaaaabbbb 1400 1401 // l.movhi r9,0xaaaa 1402 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 1403 l.sh (TRAMP_SLOT_0+0x0)(r3),r5 1404 l.srli r5,r6,16 1405 l.sh (TRAMP_SLOT_0+0x2)(r3),r5 1406 1407 // l.ori r9,0xbbbb 1408 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 1409 l.sh (TRAMP_SLOT_1+0x0)(r3),r5 1410 l.andi r5,r6,0xffff 1411 l.sh (TRAMP_SLOT_1+0x2)(r3),r5 1412 1413 /* falthrough, need to set up new jump offset */ 1414 1415 14160: // l.j 1417 l.slli r6,r4,6 // original offset shifted left 6 - 2 1418// l.srli r6,r6,6 // original offset shifted right 2 1419 1420 l.slli r4,r2,4 // old jump position: EEA shifted left 4 1421// l.srli r4,r4,6 // old jump position: shifted right 2 1422 1423 l.addi r5,r3,0xc // new jump position (physical) 1424 l.slli r5,r5,4 // new jump position: shifted left 4 1425 1426 // calculate new jump offset 1427 // new_off = old_off + (old_jump - new_jump) 1428 1429 l.sub r5,r4,r5 // old_jump - new_jump 1430 l.add r5,r6,r5 // orig_off + (old_jump - new_jump) 1431 l.srli r5,r5,6 // new offset shifted right 2 1432 1433 // r5 is new jump offset 1434 // l.j has opcode 0x0... 1435 l.sw TRAMP_SLOT_2(r3),r5 // write it back 1436 1437 l.j trampoline_out 1438 l.nop 1439 1440/* ----------------------------- */ 1441 14423: // l.jalr 1443 1444 /* 19 20 aa aa l.movhi r9,0xaaaa 1445 * a9 29 bb bb l.ori r9,0xbbbb 1446 * 1447 * where 0xaaaabbbb is EEA + 0x4 shifted right 2 1448 */ 1449 1450 l.addi r6,r2,0x4 // this is 0xaaaabbbb 1451 1452 // l.movhi r9,0xaaaa 1453 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9 1454 l.sh (TRAMP_SLOT_0+0x0)(r3),r5 1455 l.srli r5,r6,16 1456 l.sh (TRAMP_SLOT_0+0x2)(r3),r5 1457 1458 // l.ori r9,0xbbbb 1459 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9 1460 l.sh (TRAMP_SLOT_1+0x0)(r3),r5 1461 l.andi r5,r6,0xffff 1462 l.sh (TRAMP_SLOT_1+0x2)(r3),r5 1463 1464 l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction 1465 l.andi r5,r5,0x3ff // clear out opcode part 1466 l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr 1467 l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back 1468 1469 /* falthrough */ 1470 14711: // l.jr 1472 l.j trampoline_out 1473 l.nop 1474 1475/* ----------------------------- */ 1476 14774: // l.bnf 14785: // l.bf 1479 l.slli r6,r4,6 // original offset shifted left 6 - 2 1480// l.srli r6,r6,6 // original offset shifted right 2 1481 1482 l.slli r4,r2,4 // old jump position: EEA shifted left 4 1483// l.srli r4,r4,6 // old jump position: shifted right 2 1484 1485 l.addi r5,r3,0xc // new jump position (physical) 1486 l.slli r5,r5,4 // new jump position: shifted left 4 1487 1488 // calculate new jump offset 1489 // new_off = old_off + (old_jump - new_jump) 1490 1491 l.add r6,r6,r4 // (orig_off + old_jump) 1492 l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump 1493 l.srli r6,r6,6 // new offset shifted right 2 1494 1495 // r6 is new jump offset 1496 l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction 1497 l.srli r4,r4,16 1498 l.andi r4,r4,0xfc00 // get opcode part 1499 l.slli r4,r4,16 1500 l.or r6,r4,r6 // l.b(n)f new offset 1501 l.sw TRAMP_SLOT_2(r3),r6 // write it back 1502 1503 /* we need to add l.j to EEA + 0x8 */ 1504 tophys (r4,r2) // may not be needed (due to shifts down_ 1505 l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8) 1506 // jump position = r5 + 0x8 (0x8 compensated) 1507 l.sub r4,r4,r5 // jump offset = target - new_position + 0x8 1508 1509 l.slli r4,r4,4 // the amount of info in imediate of jump 1510 l.srli r4,r4,6 // jump instruction with offset 1511 l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot 1512 1513 /* fallthrough */ 1514 1515trampoline_out: 1516 // set up new EPC to point to our trampoline code 1517 LOAD_SYMBOL_2_GPR(r5,_immu_trampoline) 1518 l.mtspr r0,r5,SPR_EPCR_BASE 1519 1520 // immu_trampoline is (4x) CACHE_LINE aligned 1521 // and only 6 instructions long, 1522 // so we need to invalidate only 2 lines 1523 1524 /* Establish cache block size 1525 If BS=0, 16; 1526 If BS=1, 32; 1527 r14 contain block size 1528 */ 1529 l.mfspr r21,r0,SPR_ICCFGR 1530 l.andi r21,r21,SPR_ICCFGR_CBS 1531 l.srli r21,r21,7 1532 l.ori r23,r0,16 1533 l.sll r14,r23,r21 1534 1535 l.mtspr r0,r5,SPR_ICBIR 1536 l.add r5,r5,r14 1537 l.mtspr r0,r5,SPR_ICBIR 1538 1539 l.jr r9 1540 l.nop 1541 1542 1543/* 1544 * DSCR: prints a string referenced by r3. 1545 * 1546 * PRMS: r3 - address of the first character of null 1547 * terminated string to be printed 1548 * 1549 * PREQ: UART at UART_BASE_ADD has to be initialized 1550 * 1551 * POST: caller should be aware that r3, r9 are changed 1552 */ 1553ENTRY(_emergency_print) 1554 EMERGENCY_PRINT_STORE_GPR4 1555 EMERGENCY_PRINT_STORE_GPR5 1556 EMERGENCY_PRINT_STORE_GPR6 1557 EMERGENCY_PRINT_STORE_GPR7 15582: 1559 l.lbz r7,0(r3) 1560 l.sfeq r7,r0 1561 l.bf 9f 1562 l.nop 1563 1564// putc: 1565 l.movhi r4,hi(UART_BASE_ADD) 1566 1567 l.addi r6,r0,0x20 15681: l.lbz r5,5(r4) 1569 l.andi r5,r5,0x20 1570 l.sfeq r5,r6 1571 l.bnf 1b 1572 l.nop 1573 1574 l.sb 0(r4),r7 1575 1576 l.addi r6,r0,0x60 15771: l.lbz r5,5(r4) 1578 l.andi r5,r5,0x60 1579 l.sfeq r5,r6 1580 l.bnf 1b 1581 l.nop 1582 1583 /* next character */ 1584 l.j 2b 1585 l.addi r3,r3,0x1 1586 15879: 1588 EMERGENCY_PRINT_LOAD_GPR7 1589 EMERGENCY_PRINT_LOAD_GPR6 1590 EMERGENCY_PRINT_LOAD_GPR5 1591 EMERGENCY_PRINT_LOAD_GPR4 1592 l.jr r9 1593 l.nop 1594 1595ENTRY(_emergency_print_nr) 1596 EMERGENCY_PRINT_STORE_GPR4 1597 EMERGENCY_PRINT_STORE_GPR5 1598 EMERGENCY_PRINT_STORE_GPR6 1599 EMERGENCY_PRINT_STORE_GPR7 1600 EMERGENCY_PRINT_STORE_GPR8 1601 1602 l.addi r8,r0,32 // shift register 1603 16041: /* remove leading zeros */ 1605 l.addi r8,r8,-0x4 1606 l.srl r7,r3,r8 1607 l.andi r7,r7,0xf 1608 1609 /* don't skip the last zero if number == 0x0 */ 1610 l.sfeqi r8,0x4 1611 l.bf 2f 1612 l.nop 1613 1614 l.sfeq r7,r0 1615 l.bf 1b 1616 l.nop 1617 16182: 1619 l.srl r7,r3,r8 1620 1621 l.andi r7,r7,0xf 1622 l.sflts r8,r0 1623 l.bf 9f 1624 1625 l.sfgtui r7,0x9 1626 l.bnf 8f 1627 l.nop 1628 l.addi r7,r7,0x27 1629 16308: 1631 l.addi r7,r7,0x30 1632// putc: 1633 l.movhi r4,hi(UART_BASE_ADD) 1634 1635 l.addi r6,r0,0x20 16361: l.lbz r5,5(r4) 1637 l.andi r5,r5,0x20 1638 l.sfeq r5,r6 1639 l.bnf 1b 1640 l.nop 1641 1642 l.sb 0(r4),r7 1643 1644 l.addi r6,r0,0x60 16451: l.lbz r5,5(r4) 1646 l.andi r5,r5,0x60 1647 l.sfeq r5,r6 1648 l.bnf 1b 1649 l.nop 1650 1651 /* next character */ 1652 l.j 2b 1653 l.addi r8,r8,-0x4 1654 16559: 1656 EMERGENCY_PRINT_LOAD_GPR8 1657 EMERGENCY_PRINT_LOAD_GPR7 1658 EMERGENCY_PRINT_LOAD_GPR6 1659 EMERGENCY_PRINT_LOAD_GPR5 1660 EMERGENCY_PRINT_LOAD_GPR4 1661 l.jr r9 1662 l.nop 1663 1664 1665/* 1666 * This should be used for debugging only. 1667 * It messes up the Linux early serial output 1668 * somehow, so use it sparingly and essentially 1669 * only if you need to debug something that goes wrong 1670 * before Linux gets the early serial going. 1671 * 1672 * Furthermore, you'll have to make sure you set the 1673 * UART_DEVISOR correctly according to the system 1674 * clock rate. 1675 * 1676 * 1677 */ 1678 1679 1680 1681#define SYS_CLK 20000000 1682//#define SYS_CLK 1843200 1683#define OR32_CONSOLE_BAUD 115200 1684#define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD) 1685 1686ENTRY(_early_uart_init) 1687 l.movhi r3,hi(UART_BASE_ADD) 1688 1689 l.addi r4,r0,0x7 1690 l.sb 0x2(r3),r4 1691 1692 l.addi r4,r0,0x0 1693 l.sb 0x1(r3),r4 1694 1695 l.addi r4,r0,0x3 1696 l.sb 0x3(r3),r4 1697 1698 l.lbz r5,3(r3) 1699 l.ori r4,r5,0x80 1700 l.sb 0x3(r3),r4 1701 l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff) 1702 l.sb UART_DLM(r3),r4 1703 l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) 1704 l.sb UART_DLL(r3),r4 1705 l.sb 0x3(r3),r5 1706 1707 l.jr r9 1708 l.nop 1709 1710 .align 0x1000 1711 .global _secondary_evbar 1712_secondary_evbar: 1713 1714 .space 0x800 1715 /* Just disable interrupts and Return */ 1716 l.ori r3,r0,SPR_SR_SM 1717 l.mtspr r0,r3,SPR_ESR_BASE 1718 l.rfe 1719 1720 1721 .section .rodata 1722_string_unhandled_exception: 1723 .string "\n\rRunarunaround: Unhandled exception 0x\0" 1724 1725_string_epc_prefix: 1726 .string ": EPC=0x\0" 1727 1728_string_nl: 1729 .string "\n\r\0" 1730 1731 1732/* ========================================[ page aligned structures ]=== */ 1733 1734/* 1735 * .data section should be page aligned 1736 * (look into arch/openrisc/kernel/vmlinux.lds.S) 1737 */ 1738 .section .data,"aw" 1739 .align 8192 1740 .global empty_zero_page 1741empty_zero_page: 1742 .space 8192 1743 1744 .global swapper_pg_dir 1745swapper_pg_dir: 1746 .space 8192 1747 1748 .global _unhandled_stack 1749_unhandled_stack: 1750 .space 8192 1751_unhandled_stack_top: 1752 1753/* ============================================================[ EOF ]=== */ 1754