/arch/sh/kernel/ |
D | head_64.S | 186 movi MMUIR_FIRST, r21 189 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ 190 addi r21, MMUIR_STEP, r21 191 bne r21, r22, tr1 195 movi MMUDR_FIRST, r21 198 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ 199 addi r21, MMUDR_STEP, r21 200 bne r21, r22, tr1 203 movi MMUIR_FIRST, r21 206 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ [all …]
|
/arch/sh/boot/compressed/ |
D | head_64.S | 68 movi ITLB_FIXED, r21 70 1: putcfg r21, 0, r63 /* Clear MMUIR[n].PTEH.V */ 71 addi r21, TLB_STEP, r21 72 bne r21, r22, tr1 76 movi DTLB_FIXED, r21 78 1: putcfg r21, 0, r63 /* Clear MMUDR[n].PTEH.V */ 79 addi r21, TLB_STEP, r21 80 bne r21, r22, tr1 83 movi ITLB_FIXED, r21 85 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ [all …]
|
/arch/sh/lib64/ |
D | sdivsi3.S | 13 shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */ 16 ldx.ub r20, r21, r19 /* u0.8 */ 18 shlli r21, 1, r21 20 ldx.w r20, r21, r21 /* s2.14 */ 23 sub r21, r19, r19 /* some 11 bit inverse in s1.14 */ 24 muls.l r19, r19, r21 /* u0.28 */ 27 muls.l r25, r21, r18 /* s2.58 */ 41 xor r21, r0, r21 /* You could also use the constant 1 << 27. */ 42 add r21, r25, r21 43 sub r21, r19, r21 [all …]
|
D | udivdi3.S | 9 movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */ 10 sub r21,r5,r1 35 mshalds.l r1,r21,r1 42 shlri r2,22,r21 43 mulu.l r21,r1,r21 46 shlrd r21,r0,r21 47 mulu.l r21,r3,r5 48 add r8,r21,r8 49 mcmpgt.l r21,r63,r21 // See Note 1 51 mshfhi.l r63,r21,r21 [all …]
|
D | udivsi3.S | 16 sub r20,r25,r21 17 mmulfx.w r21,r21,r19 18 mshflo.w r21,r63,r21 23 msub.w r21,r19,r19 30 addi r19,-2,r21 31 mulu.l r4,r21,r18 33 shlli r21,15,r21 36 mmacnfx.wl r25,r19,r21 40 mulu.l r25,r21,r19 49 mulu.l r25,r21,r19
|
D | strcpy.S | 35 sub r3, r2, r21 36 addi r21, 8, r20 37 ldx.q r0, r21, r5 87 ldx.q r0, r21, r5
|
/arch/ia64/lib/ |
D | flush.S | 29 mov r21=1 36 shl r21=r21,r20 // r21: stride size of the i-cache(s) 52 add r24=r21,r24 // we flush "stride size" bytes per iteration 81 mov r21=1 89 shl r21=r21,r20 // r21: stride size of the i-cache(s) 107 add r24=r21,r24 // we flush "stride size" bytes per iteration
|
D | ip_fast_csum.S | 43 (p7) ld4 r21=[r15],8 50 add r20=r20,r21 98 ld4 r21=[in1],4 108 add r16=r20,r21
|
D | memcpy_mck.S | 41 #define src_pre_l2 r21 172 and r21=-8,tmp 178 add src0=src0,r21 // setting up src pointer 179 add dst0=dst0,r21 // setting up dest pointer 294 shr.u r21=in2,7 // this much cache line 299 cmp.lt p7,p8=1,r21 300 add cnt=-1,r21 362 (p6) or r21=r28,r27 392 EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy 512 shrp r21=r22,r38,shift; /* speculative work */ \ [all …]
|
/arch/parisc/hpux/ |
D | gate.S | 32 ldw -52(%r30), %r21 ;! 5th argument 66 STREG %r21, TASK_PT_GR21(%r1) /* 5th argument */ 86 stw %r21, -52(%r30) ;! 5th argument 91 ldil L%hpux_call_table, %r21 92 ldo R%hpux_call_table(%r21), %r21 95 LDREGX %r22(%r21), %r21 97 be 0(%sr7,%r21)
|
/arch/parisc/kernel/ |
D | pacache.S | 81 LDREG ITLB_SID_STRIDE(%r1), %r21 94 add %r21, %r20, %r20 /* increment space */ 117 add %r21, %r20, %r20 /* increment space */ 124 LDREG DTLB_SID_STRIDE(%r1), %r21 137 add %r21, %r20, %r20 /* increment space */ 160 add %r21, %r20, %r20 /* increment space */ 301 ldd 16(%r25), %r21 308 std %r21, 16(%r26) 311 ldd 48(%r25), %r21 318 std %r21, 48(%r26) [all …]
|
D | syscall.S | 110 depdi 0, 31, 32, %r21 152 STREG %r21, TASK_PT_GR21(%r1) 179 stw %r21, -56(%r30) /* 6th argument */ 309 LDREG TASK_PT_GR21(%r1), %r21 416 LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */ 419 be,n 0(%sr2,%r21) 422 ldo -ENOSYS(%r0),%r21 /* set errno */ 515 mfctl %cr27, %r21 /* Get current thread register */ 516 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 518 ldo -EDEADLOCK(%r0), %r21 [all …]
|
/arch/ia64/kvm/ |
D | optvfault.S | 27 add r16=VMM_VPD_BASE_OFFSET,r21; \ 50 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 123 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21 124 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21 157 add r27=VMM_VCPU_VRR0_OFFSET,r21 188 add r27=VMM_VCPU_VRR0_OFFSET,r21 234 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21 235 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21 264 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21 296 add r26=VMM_VCPU_META_RR0_OFFSET,r21 [all …]
|
D | vmm_ivt.S | 371 .mem.offset 8,0; st8.spill [r3]=r21,16 430 (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 606 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 609 adds r17 = VMM_VCPU_GP_OFFSET, r21 628 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 633 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 634 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 642 adds r18=VMM_VPD_BASE_OFFSET,r21 885 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 886 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 [all …]
|
D | kvm_minstate.h | 56 add r25 = VMM_VPD_BASE_OFFSET, r21; \ 69 #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 164 mov r13 = r21; /* establish `current' */ \ 215 .mem.offset 8,0; st8.spill [r3] = r21,16; \
|
/arch/ia64/kernel/ |
D | ivt.S | 122 shl r21=r16,3 // shift bit 60 into sign bit 125 shr.u r22=r21,3 145 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 146 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 150 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 176 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 178 (p7) ld8 r18=[r21] // read *pte 225 ld8 r25=[r21] // read *pte again 343 MOV_FROM_IPSR(p0, r21) 358 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl [all …]
|
D | entry.S | 199 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 213 ld8 sp=[r21] // load kernel stack pointer of new task 308 mov r21=b0 322 st8 [r14]=r21,SW(B1)-SW(B0) // save b0 329 mov r21=ar.lc // I-unit 339 st8 [r15]=r21 // save ar.lc 367 mov r21=pr 370 st8 [r3]=r21 // save predicate registers 399 ld8 r21=[r2],16 // restore b0 434 mov b0=r21 [all …]
|
D | fsys.S | 248 add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 265 ld8 r30 = [r21] // clocksource->mmio_ptr 328 mov r21 = r8 347 (p14) shr.u r21 = r2, 4 350 EX(.fail_efault, st8 [r23] = r21) 601 mov r21=ar.fpsr 720 ld8 r21=[r17] // cumulated utime 727 add r21=r21,r18 // sum utime 730 st8 [r17]=r21 // update utime
|
D | mca_asm.S | 68 ld4 r21=[r17],4 // r21=ptce_stride[0] 88 add r18=r21,r18 856 movl r21=PAGE_KERNEL // page properties 860 or r21=r20,r21 // construct PA | page properties 867 itr.d dtr[r20]=r21 1030 movl r21=PAGE_KERNEL // page properties 1033 or r21=r20,r21 // construct PA | page properties 1043 itr.d dtr[r20]=r21
|
/arch/powerpc/kvm/ |
D | booke_interrupts.S | 141 stw r21, VCPU_GPR(r21)(r4) 219 lwz r21, VCPU_GPR(r21)(r4) 249 stw r21, VCPU_GPR(r21)(r4) 269 lwz r21, HOST_NV_GPR(r21)(r1) 310 stw r21, HOST_NV_GPR(r21)(r1) 330 lwz r21, VCPU_GPR(r21)(r4)
|
/arch/powerpc/boot/ |
D | ppc_asm.h | 50 #define r21 21 macro
|
/arch/parisc/include/asm/ |
D | asmregs.h | 39 arg5: .reg r21 67 r21: .reg %r21
|
/arch/alpha/include/asm/ |
D | ptrace.h | 31 unsigned long r21; member
|
/arch/powerpc/kernel/ |
D | misc.S | 80 PPC_STL r21,12*SZL(r3) 106 PPC_LL r21,12*SZL(r3)
|
/arch/powerpc/lib/ |
D | copyuser_64.S | 415 std r21,-112(1) 423 21: ld r21,512(4) 437 33: std r21,520(3) 455 51: ld r21,528(4) 474 69: std r21,520(3) 503 ld r21,-112(1) 522 ld r21,-112(1)
|