Lines Matching refs:r6
59 #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
60 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
90 #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
91 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
459 CLEAR_GPR(r6)
526 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
530 l.mtspr r6,r0,0x0
533 l.addi r6,r6,1
595 CLEAR_GPR(r6)
645 l.mfspr r6,r0,SPR_SR
648 l.and r5,r6,r5
672 l.addi r6,r0,0
678 l.mtspr r0,r6,SPR_ICBIR
679 l.sfne r6,r5
681 l.add r6,r6,r14
685 l.mfspr r6,r0,SPR_SR
686 l.ori r6,r6,SPR_SR_ICE
687 l.mtspr r0,r6,SPR_SR
711 l.mfspr r6,r0,SPR_SR
714 l.and r5,r6,r5
738 l.addi r6,r0,0
741 l.mtspr r0,r6,SPR_DCBIR
742 l.sfne r6,r5
744 l.add r6,r6,r14
747 l.mfspr r6,r0,SPR_SR
748 l.ori r6,r6,SPR_SR_DCE
749 l.mtspr r0,r6,SPR_SR
798 l.mfspr r6,r0,SPR_ESR_BASE //
799 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
800 l.sfeqi r6,0 // r6 == 0x1 --> SM
818 CLEAR_GPR(r6)
822 l.mfspr r6, r0, SPR_DMMUCFGR
823 l.andi r6, r6, SPR_DMMUCFGR_NTS
824 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
826 l.sll r5, r5, r6 // r5 = number DMMU sets
827 l.addi r6, r5, -1 // r6 = nsets mask
828 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
830 l.or r6,r6,r4 // r6 <- r4
831 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
834 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
838 LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
839 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
894 l.mfspr r6,r0,SPR_ESR_BASE //
895 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
896 l.sfeqi r6,0 // r6 == 0x1 --> SM
905 CLEAR_GPR(r6)
909 l.mfspr r6, r0, SPR_IMMUCFGR
910 l.andi r6, r6, SPR_IMMUCFGR_NTS
911 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
913 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
914 l.addi r6, r5, -1 // r6 = nsets mask
915 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
917 l.or r6,r6,r4 // r6 <- r4
918 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
921 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
931 LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
932 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
1033 l.mfspr r6, r0, SPR_DMMUCFGR
1034 l.andi r6, r6, SPR_DMMUCFGR_NTS
1035 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
1037 l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR
1038 l.addi r6, r3, -1 // r6 = nsets mask
1039 l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
1145 l.mfspr r6, r0, SPR_IMMUCFGR
1146 l.andi r6, r6, SPR_IMMUCFGR_NTS
1147 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
1149 l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR
1150 l.addi r6, r3, -1 // r6 = nsets mask
1151 l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
1219 tophys(r6,r2)
1231 l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
1233 l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
1269 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1274 l.srli r5,r6,16
1280 l.andi r5,r6,0xffff
1287 l.slli r6,r4,6 // original offset shifted left 6 - 2
1300 l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
1320 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1325 l.srli r5,r6,16
1331 l.andi r5,r6,0xffff
1349 l.slli r6,r4,6 // original offset shifted left 6 - 2
1361 l.add r6,r6,r4 // (orig_off + old_jump)
1362 l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
1363 l.srli r6,r6,6 // new offset shifted right 2
1370 l.or r6,r4,r6 // l.b(n)f new offset
1371 l.sw TRAMP_SLOT_2(r3),r6 // write it back
1437 l.addi r6,r0,0x20
1440 l.sfeq r5,r6
1446 l.addi r6,r0,0x60
1449 l.sfeq r5,r6
1505 l.addi r6,r0,0x20
1508 l.sfeq r5,r6
1514 l.addi r6,r0,0x60
1517 l.sfeq r5,r6