/arch/ia64/lib/ |
D | ip_fast_csum.S | 42 (p7) ld4 r20=[in0],8 50 add r20=r20,r21 53 add r20=r20,r22 55 add r20=r20,r24 57 shr.u ret0=r20,16 // now need to add the carry 58 zxt2 r20=r20 60 add r20=ret0,r20 62 shr.u ret0=r20,16 // add carry again 63 zxt2 r20=r20 65 add r20=ret0,r20 [all …]
|
D | flush.S | 31 ld8 r20=[r3] // r20: stride shift 34 shr.u r23=in0,r20 // start / (stride size) 35 shr.u r22=r22,r20 // (last byte address) / (stride size) 36 shl r21=r21,r20 // r21: stride size of the i-cache(s) 39 shl r24=r23,r20 // r24: addresses for "fc.i" = 84 ld8 r20=[r3] // r20: stride shift 87 shr.u r23=in0,r20 // start / (stride size) 88 shr.u r22=r22,r20 // (last byte address) / (stride size) 89 shl r21=r21,r20 // r21: stride size of the i-cache(s) 92 shl r24=r23,r20 // r24: addresses for "fc" =
|
/arch/ia64/xen/ |
D | xenivt.S | 40 movl r20=XSI_PSR_I_ADDR 42 ld8 r20=[r20] 44 adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending 46 ld1 r20=[r20] 48 cmp.ne p6,p0=r20,r0 // if there are pending events,
|
/arch/sh/lib64/ |
D | udivsi3.S | 15 movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */ 16 sub r20,r25,r21 21 sub r20,r0,r0 35 mulu.l r18,r22,r20 38 sub r4,r20,r25 44 mulu.l r19,r22,r20 47 sub.l r25,r20,r25 53 mulu.l r19,r22,r20 57 cmpgt r25,r20,r25
|
D | sdivsi3.S | 13 gettr tr0,r20 20 ldx.ub r20, r21, r19 /* u0.8 */ 24 ldx.w r20, r21, r21 /* s2.14 */
|
D | udivdi3.S | 13 sub r63,r22,r20 // r63 == 64 % 64 16 addi r20,32,r9 45 addi r20,30-22,r0 50 addi r20,30,r0
|
/arch/parisc/kernel/ |
D | syscall.S | 150 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */ 201 comiclr,>> __NR_Linux_syscalls, %r20, %r0 204 LDREGX %r20(%r19), %r19 212 comb,= %r2,%r20,.Lrt_sigreturn 301 copy %ret0,%r20 317 comiclr,>>= __NR_Linux_syscalls, %r20, %r0 320 LDREGX %r20(%r19), %r19 328 comb,= %r2,%r20,.Ltrace_rt_sigreturn 437 depdi 0, 31, 32, %r20 441 comiclr,>> __NR_lws_entries, %r20, %r0 [all …]
|
D | pacache.S | 80 LDREG ITLB_SID_BASE(%r1), %r20 93 mtsp %r20, %sr1 94 add %r21, %r20, %r20 /* increment space */ 108 mtsp %r20, %sr1 117 add %r21, %r20, %r20 /* increment space */ 123 LDREG DTLB_SID_BASE(%r1), %r20 136 mtsp %r20, %sr1 137 add %r21, %r20, %r20 /* increment space */ 151 mtsp %r20, %sr1 160 add %r21, %r20, %r20 /* increment space */ [all …]
|
/arch/tile/kernel/ |
D | intvec_64.S | 431 push_reg r20, r52 456 mfspr r20, SPR_SYSTEM_SAVE_K_0 461 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1 464 shl3add r20, r20, r21 465 ld tp, r20 501 shli r20, r1, 5 503 moveli r20, INT_SWINT_1 << 5 508 add r20, r20, r21 509 jalr r20 524 IRQ_DISABLE_ALL(r20) [all …]
|
D | intvec_32.S | 442 push_reg r20, r52 466 mfspr r20, SPR_SYSTEM_SAVE_K_0 471 mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 473 s2a r20, r20, r21 474 lw tp, r20 512 shli r20, r1, 5 514 moveli r20, INT_SWINT_1 << 5 516 addli r20, r20, lo16(intvec_feedback) 517 auli r20, r20, ha16(intvec_feedback) 518 jalr r20 [all …]
|
/arch/ia64/kvm/ |
D | optvfault.S | 32 mov r20 = r31; \ 48 mov r31 = r20 133 addl r20=@gprel(asm_mov_to_reg),gp 136 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 137 shladd r17=r17,4,r20 160 addl r20=@gprel(asm_mov_to_reg),gp 163 shladd r17=r17,4,r20 165 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20 181 addl r20=@gprel(asm_mov_from_reg),gp 183 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20 [all …]
|
D | vmm_ivt.S | 158 movl r20=IA64_GRANULE_SHIFT<<2 160 mov cr.itir=r20 181 movl r20=IA64_GRANULE_SHIFT<<2 183 mov cr.itir=r20 277 mov r20=r1 /* A */ 329 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ 370 .mem.offset 0,0; st8.spill [r2]=r20,16 968 ld8.fill r20=[r2],16 1038 ld8 r20=[r16],16 // ar.fpsr 1052 mov ar.fpsr=r20 [all …]
|
/arch/powerpc/kvm/ |
D | book3s_rmhandlers.S | 197 PPC_STL r20, _NIP(r1); \ 198 mfmsr r20; \ 200 andc r3,r20,r3; /* Disable DR,EE */ \ 205 mtmsr r20; /* Enable DR,EE */ \ 207 PPC_LL r20, _NIP(r1)
|
D | book3s_interrupts.S | 48 PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 140 PPC_STL r20, VCPU_GPR(r20)(r7)
|
D | booke_interrupts.S | 143 stw r20, VCPU_GPR(r20)(r4) 227 lwz r20, VCPU_GPR(r20)(r4) 265 stw r20, VCPU_GPR(r20)(r4) 285 lwz r20, HOST_NV_GPR(r20)(r1) 330 stw r20, HOST_NV_GPR(r20)(r1) 350 lwz r20, VCPU_GPR(r20)(r4)
|
/arch/ia64/kernel/ |
D | fsys.S | 243 movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address 247 add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time 248 add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 253 (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time 255 add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last 261 ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first 266 add r24 = IA64_CLKSRC_MULT_OFFSET,r20 268 add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20 269 add r14 = IA64_CLKSRC_MASK_OFFSET,r20 314 ld4 r10 = [r20] // gtod_lock.sequence [all …]
|
D | mca_asm.S | 71 ld4 r20=[r2] // r20=ptce_count[1] 75 adds r20=-1,r20 82 mov ar.lc=r20 200 mov r20=IA64_TR_PALCODE 202 itr.i itr[r20]=r18 213 movl r20=PAGE_KERNEL 215 add r16=r20,r16 220 mov r20=IA64_TR_CURRENT_STACK 222 itr.d dtr[r20]=r16 829 ld8 r20=[temp2],16 // prev_task [all …]
|
D | efi_stub.S | 74 mov loc6=r20 79 mov r20=loc6
|
D | esi_stub.S | 84 mov loc6=r20 // old sp 89 mov r20=loc6 // save virtual mode sp
|
D | ivt.S | 170 (p7) ld8 r20=[r17] // get *pmd (may be 0) 173 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? 174 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 183 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 230 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change 280 mov r20=PAGE_SHIFT<<2 // setup page size for purge 284 (p7) ptc.l r16,r20 324 mov r20=PAGE_SHIFT<<2 // setup page size for purge 328 (p7) ptc.l r16,r20 379 MOV_FROM_ISR(r20) [all …]
|
/arch/microblaze/kernel/ |
D | mcount.S | 36 swi r20, r1, 68; \ 67 lwi r20, r1, 68; \ 128 lwi r20, r0, ftrace_trace_function; 130 cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */ 143 brald r15, r20; /* MS: jump to ftrace handler */
|
/arch/parisc/hpux/ |
D | gate.S | 31 ldw -56(%r30), %r20 ;! 6th argument 65 STREG %r20, TASK_PT_GR20(%r1) /* 6th argument */ 87 stw %r20, -56(%r30) ;! 6th argument
|
/arch/microblaze/lib/ |
D | uaccess_old.S | 110 3: lwi r20, r6, 0x0008 + offset; \ 118 11: swi r20, r5, 0x0008 + offset; \ 198 swi r20, r1, 16 221 lwi r20, r1, 16 241 lwi r20, r1, 16
|
/arch/ia64/include/asm/xen/ |
D | minstate.h | 5 MOV_FROM_ITC(pUStk, p6, r20, r2); 39 mov r20=r1; /* A */ \ 122 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
|
/arch/powerpc/boot/ |
D | ppc_asm.h | 49 #define r20 20 macro
|