/arch/m32r/lib/ |
D | ashxdi3.S | 10 ; input r2 shift val 23 cmpz r2 || ldi r3, #32 24 jc r14 || cmpu r2, r3 28 addi r2, #-32 29 sra r1, r2 33 mv r3, r0 || srl r1, r2 34 sra r0, r2 || neg r2, r2 35 sll r3, r2 43 cmpz r2 || ldi r3, #32 44 jc r14 || cmpu r2, r3 [all …]
|
D | checksum.S | 57 ; r2: unsigned int sum 59 push r2 || ldi r2, #0 66 ldi r3, #0 || addx r2, r4 67 addx r2, r3 80 addx r2, r4 || addi r0, #2 81 addx r2, r3 93 ld r3, @r0+ || addx r2, r3 ; +12 94 ld r4, @r0+ || addx r2, r4 ; +16 95 ld r5, @r0+ || addx r2, r5 ; +20 96 ld r3, @r0+ || addx r2, r3 ; +24 [all …]
|
D | memset.S | 23 mv r4, r0 || cmpz r2 25 cmpui r2, #16 27 cmpui r2, #4 36 addi r2, #-1 || addi r3, #-1 38 cmpui r2, #4 45 or r1, r3 || addi r2, #-4 47 st r1, @+r4 || addi r2, #-4 48 bgtz r2, word_set_loop 49 bnez r2, byte_set_wrap 64 st r1, @+r4 || addi r2, #-16 [all …]
|
D | memcpy.S | 24 or r7, r1 || cmpz r2 25 jc r14 || cmpeq r0, r1 ; return if r2=0 30 srl3 r3, r2, #2 31 and3 r2, r2, #3 36 st r7, @+r4 || cmpz r2 38 addi r4, #4 || jc r14 ; return if r2=0 42 addi r2, #-1 || stb r7, @r4+ 43 bnez r2, byte_copy 47 addi r2, #-1 || stb r7, @r4 49 bnez r2, byte_copy [all …]
|
/arch/hexagon/kernel/ |
D | head.S | 50 r2.h = #0xffc0; 51 r2.l = #0x0000; 52 r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */ 76 r2.l = #LO(stext); 81 r2.h = #HI(stext); 85 r1 = sub(r1, r2); 92 r2.h = #0xffc0; 93 r2.l = #0x0000; /* round back down to 4MB boundary */ 94 r1 = and(r1,r2); 95 r2 = lsr(r1, #22) /* 4MB page number */ define [all …]
|
/arch/sh/lib/ |
D | __clear_user.S | 21 mov r4, r2 26 add #31, r2 27 and r1, r2 28 cmp/eq r4, r2 30 mov r2, r3 33 mov r4, r2 36 0: mov.b r0, @r2 38 add #1, r2 41 mov r2, r4 46 cmp/hi r2, r3 [all …]
|
/arch/ia64/kvm/ |
D | trampoline.S | 17 add r2 = CTX(B0),r32; \ 22 st8 [r2]=r16,16; \ 28 st8 [r2]=r16,16; \ 34 st8 [r2]=r16; \ 42 add r2 = CTX(B0),r33; \ 45 ld8 r16=[r2],16; \ 51 ld8 r16=[r2],16; \ 57 ld8 r16=[r2]; \ 71 add r2=CTX(R4),r32; \ 75 st8.spill [r2]=r4,16; \ [all …]
|
/arch/arm/lib/ |
D | findbit.S | 27 mov r2, #0 29 ARM( ldrb r3, [r0, r2, lsr #3] ) 30 THUMB( lsr r3, r2, #3 ) 34 add r2, r2, #8 @ next bit pointer 35 2: cmp r2, r1 @ any more? 48 ands ip, r2, #7 50 ARM( ldrb r3, [r0, r2, lsr #3] ) 51 THUMB( lsr r3, r2, #3 ) 56 orr r2, r2, #7 @ if zero, then no bits here 57 add r2, r2, #1 @ align bit pointer [all …]
|
D | memzero.S | 24 strltb r2, [r0], #1 @ 1 25 strleb r2, [r0], #1 @ 1 26 strb r2, [r0], #1 @ 1 34 mov r2, #0 @ 1 50 mov ip, r2 @ 1 51 mov lr, r2 @ 1 54 stmgeia r0!, {r2, r3, ip, lr} @ 4 55 stmgeia r0!, {r2, r3, ip, lr} @ 4 56 stmgeia r0!, {r2, r3, ip, lr} @ 4 57 stmgeia r0!, {r2, r3, ip, lr} @ 4 [all …]
|
D | bitops.h | 10 mov r2, #1 19 mov r3, r2, lsl r3 20 1: ldrex r2, [r1] 21 \instr r2, r2, r3 22 strex r0, r2, [r1] 35 mov r2, #1 39 mov r3, r2, lsl r3 @ create mask 46 1: ldrex r2, [r1] 47 ands r0, r2, r3 @ save old value of bit 48 \instr r2, r2, r3 @ toggle bit [all …]
|
D | getuser.S | 36 check_uaccess r0, 1, r1, r2, __get_user_bad 37 1: TUSER(ldrb) r2, [r0] 43 check_uaccess r0, 2, r1, r2, __get_user_bad 46 2: ldrbt r2, [r0], #1 50 2: ldrb r2, [r0] 54 orr r2, r2, rb, lsl #8 56 orr r2, rb, r2, lsl #8 63 check_uaccess r0, 4, r1, r2, __get_user_bad 64 4: TUSER(ldr) r2, [r0] 70 check_uaccess r0, 8, r1, r2, __get_user_bad [all …]
|
D | memset.S | 28 cmp r2, #16 40 2: subs r2, r2, #64 50 tst r2, #32 53 tst r2, #16 72 cmp r2, #96 78 sub r2, r2, r8 86 3: subs r2, r2, #64 92 tst r2, #32 94 tst r2, #16 100 4: tst r2, #8 [all …]
|
/arch/hexagon/lib/ |
D | memset.S | 42 p0 = cmp.eq(r2, #0) 43 p1 = cmp.gtu(r2, #7) 60 loop0(1f, r2) /* byte loop */ 72 p1 = cmp.eq(r2, #1) 85 p1 = cmp.eq(r2, #2) 97 p0 = cmp.gtu(r2, #7) 98 p1 = cmp.eq(r2, #4) 104 p0 = cmp.gtu(r2, #11) 110 r10 = lsr(r2, #3) 127 p1 = cmp.eq(r2, #8) [all …]
|
/arch/arc/lib/ |
D | strchr-700.S | 19 bmsk r2,r0,1 22 breq.d r2,r0,.Laligned 24 sub_s r0,r0,r2 25 asl r7,r2,3 26 ld_s r2,[r0] 34 sub r12,r2,r7 35 bic_s r12,r12,r2 38 xor r6,r2,r5 39 ld.a r2,[r0,4] 50 bic r2,r7,r6 [all …]
|
D | strcmp.S | 19 or r2,r0,r1 20 bmsk_s r2,r2,1 21 brne r2,0,.Lcharloop 25 ld.ab r2,[r0,4] 28 sub r4,r2,r12 29 bic r4,r4,r2 32 breq r2,r3,.Lwordloop 34 xor r0,r2,r3 ; mask for difference 39 and_s r2,r2,r0 42 cmp_s r2,r3 [all …]
|
D | strcpy-700.S | 22 or r2,r0,r1 23 bmsk_s r2,r2,1 24 brne.d r2,0,charloop 30 sub r2,r3,r8 31 bic_s r2,r2,r3 32 tst_s r2,r12 41 sub r2,r3,r8 42 bic_s r2,r2,r3 43 tst_s r2,r12 46 sub r2,r4,r8 [all …]
|
/arch/arm/kvm/ |
D | interrupts_head.S | 20 VFPFMRX r2, FPEXC 22 orr r6, r2, #FPEXC_EN 26 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture 31 tst r2, #FPEXC_FP2V 33 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable 37 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 43 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 46 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture 49 tst r2, #FPEXC_FP2V 52 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) [all …]
|
/arch/arm/crypto/ |
D | sha256-core.S_shipped | 102 add r2,r1,r2,lsl#6 @ len to point at the end of inp 103 stmdb sp!,{r0,r1,r2,r4-r11,lr} 109 ldr r2,[r1],#4 111 ldrb r2,[r1,#3] 116 @ ldr r2,[r1],#4 @ 0 124 rev r2,r2 127 @ ldrb r2,[r1,#3] @ 0 131 orr r2,r2,r12,lsl#8 133 orr r2,r2,r0,lsl#16 138 orr r2,r2,r12,lsl#24 [all …]
|
/arch/sh/lib64/ |
D | copy_page.S | 49 alloco r2, 0x00 51 alloco r2, 0x20 55 add r2, r6, r6 58 sub r3, r2, r60 70 bge/u r2, r6, tr2 ! skip prefetch for last 4 lines 71 ldx.q r2, r22, r63 ! prefetch 4 lines hence 74 bge/u r2, r7, tr3 ! skip alloco for last 2 lines 75 alloco r2, 0x40 ! alloc destination line 2 lines ahead 78 ldx.q r2, r60, r36 79 ldx.q r2, r61, r37 [all …]
|
/arch/m32r/mm/ |
D | mmu.S | 32 st r2, @-sp 49 ;; r2: &tlb_entry_{i|d}_dat 53 seth r2, #high(tlb_entry_d_dat) 54 or3 r2, r2, #low(tlb_entry_d_dat) 57 seth r2, #high(tlb_entry_d_dat) 58 or3 r2, r2, #low(tlb_entry_d_dat) 62 add r2, r1 78 ;; r2: &tlb_entry_{i|d}_dat 86 seth r2, #high(tlb_entry_i_dat) 87 or3 r2, r2, #low(tlb_entry_i_dat) [all …]
|
/arch/blackfin/mach-bf561/ |
D | atomic.S | 46 safe_testset p0, r2; 48 SSYNC(r2); 53 CSYNC(r2); 55 SSYNC(r2); 70 safe_testset p0, r2; 72 SSYNC(r2); 81 CSYNC(r2); 97 SSYNC(r2); 132 SSYNC(r2); 170 SSYNC(r2); [all …]
|
/arch/unicore32/lib/ |
D | findbit.S | 23 mov r2, #0 24 1: ldb r3, [r0+], r2 >> #3 27 add r2, r2, #8 @ next bit pointer 28 2: csub.a r2, r1 @ any more? 42 and.a ip, r2, #7 44 ldb r3, [r0+], r2 >> #3 48 or r2, r2, #7 @ if zero, then no bits here 49 add r2, r2, #1 @ align bit pointer 61 mov r2, #0 62 1: ldb r3, [r0+], r2 >> #3 [all …]
|
/arch/s390/net/ |
D | bpf_jit.S | 46 lgr %r9,%r2 # save %r2 51 ltgr %r2,%r2 # set cc to (%r2 != 0) 52 lgr %r2,%r9 # restore %r2 71 lgr %r9,%r2 # save %r2 77 ltgr %r2,%r2 # set cc to (%r2 != 0) 78 lgr %r2,%r9 # restore %r2 97 lgr %r9,%r2 # save %r2 103 ltgr %r2,%r2 # set cc to (%r2 != 0) 104 lgr %r2,%r9 # restore %r2 120 lgr %r9,%r2 # save %r2 [all …]
|
/arch/sh/kernel/cpu/sh2/ |
D | entry.S | 46 mov.l r2,@-sp 49 mov.l $cpu_mode,r2 50 mov.l @r2,r0 58 mov.l r0,@r2 ! enter kernel mode 59 mov.l $current_thread_info,r2 60 mov.l @r2,r2 63 add r2,r0 64 mov r15,r2 ! r2 = user stack top 70 mov.l @(5*4,r2),r0 73 mov.l @(4*4,r2),r0 [all …]
|
/arch/m32r/boot/compressed/ |
D | head.S | 49 ld r2, @r3 50 add r2, r12 51 st r2, @r3 63 seth r2, #high(__bss_start) 64 or3 r2, r2, #low(__bss_start) 65 add r2, r12 69 sub r3, r2 73 srli r4, #4 || addi r2, #-4 78 ld r0, @(4,r2) 80 st r1, @+r2 || addi r4, #-1 [all …]
|