Searched refs:R13 (Results 1 – 20 of 20) sorted by relevance
63 memd(R0 + #_PT_R1312) = R13:12; \69 R13 = lc1; } \78 { memd(R0 + #_PT_LC1SA1) = R13:12; \107 memd(R0 + #_PT_R1312) = R13:12; \114 R13:12 = C3:2; } \119 memd(R0 + #_PT_LC1SA1) = R13:12; \143 { R13:12 = memd(R0 + #_PT_LC1SA1); \150 lc1 = R13; } \157 { R13:12 = memd(R0 + #_PT_R1312); \176 R13:12 = memd(R0 + #_PT_LC1SA1); \[all …]
28 [R13 >> 3] = HOST_R13,68 case R13: in putreg()145 case R13: in getreg()
68 DEFINE_LONGS(HOST_R13, R13); in foo()
190 GETREG(R13, r13); in copy_sc_from_user()277 PUTREG(R13, r13); in copy_sc_to_user()
30 #define R13 16 macro
22 COPY(R12); COPY(R13); COPY(R14); COPY(R15); in get_regs_from_mc()
58 #define R13 16 macro
209 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */249 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */
143 movq %r13, R13(%rsp)178 movq R13(%rsp), %r13441 CFI_REL_OFFSET r13, R13+\offset497 movq_cfi r13, R13+16523 movq_cfi r13, R13+8870 movq_cfi_restore R13+8, r131602 movq_cfi r13, R13+8
243 movem [$sp+], $r13 ; Registers R0-R13.417 subq 14*4, $sp ; Make room for R0-R13.418 movem $r13, [$sp] ; Push R0-R13.
497 move.d [$acr], $r13 ; Restore R13
314 R12, R13, SP, ACR, enumerator
238 PPC_STL r5, VCPU_GPR(R13)(r4)344 PPC_STL r3, VCPU_GPR(R13)(r11)373 PPC_STL r13, VCPU_GPR(R13)(r11)678 PPC_LL r13, VCPU_GPR(R13)(r4)
225 stw r13, VCPU_GPR(R13)(r4)438 lwz r13, VCPU_GPR(R13)(r4)
563 ld r13, VCPU_GPR(R13)(r4)634 std r3, VCPU_GPR(R13)(r9)
155 #define data70 R13:12 /* lo 8 bytes of non-aligned transfer */
385 R12, R13, SP, PC, enumerator
998 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r11003 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1176 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
540 cd: BSWAP RBP/EBP/R13/R13D