/arch/m32r/lib/ |
D | ashxdi3.S | 10 ; input r2 shift val 23 cmpz r2 || ldi r3, #32 24 jc r14 || cmpu r2, r3 28 addi r2, #-32 29 sra r1, r2 33 mv r3, r0 || srl r1, r2 34 sra r0, r2 || neg r2, r2 35 sll r3, r2 43 cmpz r2 || ldi r3, #32 44 jc r14 || cmpu r2, r3 [all …]
|
D | checksum.S | 57 ; r2: unsigned int sum 59 push r2 || ldi r2, #0 66 ldi r3, #0 || addx r2, r4 67 addx r2, r3 80 addx r2, r4 || addi r0, #2 81 addx r2, r3 93 ld r3, @r0+ || addx r2, r3 ; +12 94 ld r4, @r0+ || addx r2, r4 ; +16 95 ld r5, @r0+ || addx r2, r5 ; +20 96 ld r3, @r0+ || addx r2, r3 ; +24 [all …]
|
D | memset.S | 23 mv r4, r0 || cmpz r2 25 cmpui r2, #16 27 cmpui r2, #4 36 addi r2, #-1 || addi r3, #-1 38 cmpui r2, #4 45 or r1, r3 || addi r2, #-4 47 st r1, @+r4 || addi r2, #-4 48 bgtz r2, word_set_loop 49 bnez r2, byte_set_wrap 64 st r1, @+r4 || addi r2, #-16 [all …]
|
D | memcpy.S | 24 or r7, r1 || cmpz r2 25 jc r14 || cmpeq r0, r1 ; return if r2=0 30 srl3 r3, r2, #2 31 and3 r2, r2, #3 36 st r7, @+r4 || cmpz r2 38 addi r4, #4 || jc r14 ; return if r2=0 42 addi r2, #-1 || stb r7, @r4+ 43 bnez r2, byte_copy 47 addi r2, #-1 || stb r7, @r4 49 bnez r2, byte_copy [all …]
|
/arch/hexagon/kernel/ |
D | head.S | 50 r2.h = #0xffc0; 51 r2.l = #0x0000; 52 r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */ 76 r2.l = #LO(stext); 81 r2.h = #HI(stext); 85 r1 = sub(r1, r2); 92 r2.h = #0xffc0; 93 r2.l = #0x0000; /* round back down to 4MB boundary */ 94 r1 = and(r1,r2); 95 r2 = lsr(r1, #22) /* 4MB page number */ define [all …]
|
/arch/arm/mach-socfpga/ |
D | self-refresh.S | 60 mrc p15, 0, r2, c15, c0, 0 61 orr r2, r2, #1 62 mcr p15, 0, r2, c15, c0, 0 65 ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] 66 orr r2, r2, #SELFRSHREQ_MASK 67 str r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] 72 ldr r2, [r0, #SDR_CTRLGRP_LOWPWRACK_ADDR] 73 and r2, r2, #SELFRFSHACK_MASK 74 cmp r2, #SELFRFSHACK_MASK 101 ldr r2, [r0, #SDR_CTRLGRP_LOWPWREQ_ADDR] [all …]
|
/arch/sh/lib/ |
D | __clear_user.S | 21 mov r4, r2 26 add #31, r2 27 and r1, r2 28 cmp/eq r4, r2 30 mov r2, r3 33 mov r4, r2 36 0: mov.b r0, @r2 38 add #1, r2 41 mov r2, r4 46 cmp/hi r2, r3 [all …]
|
/arch/arm/lib/ |
D | findbit.S | 27 mov r2, #0 29 ARM( ldrb r3, [r0, r2, lsr #3] ) 30 THUMB( lsr r3, r2, #3 ) 34 add r2, r2, #8 @ next bit pointer 35 2: cmp r2, r1 @ any more? 48 ands ip, r2, #7 50 ARM( ldrb r3, [r0, r2, lsr #3] ) 51 THUMB( lsr r3, r2, #3 ) 56 orr r2, r2, #7 @ if zero, then no bits here 57 add r2, r2, #1 @ align bit pointer [all …]
|
D | memzero.S | 26 strltb r2, [r0], #1 @ 1 27 strleb r2, [r0], #1 @ 1 28 strb r2, [r0], #1 @ 1 36 mov r2, #0 @ 1 55 mov ip, r2 @ 1 56 mov lr, r2 @ 1 59 stmgeia r0!, {r2, r3, ip, lr} @ 4 60 stmgeia r0!, {r2, r3, ip, lr} @ 4 61 stmgeia r0!, {r2, r3, ip, lr} @ 4 62 stmgeia r0!, {r2, r3, ip, lr} @ 4 [all …]
|
D | bitops.h | 10 mov r2, #1 19 mov r3, r2, lsl r3 20 1: ldrex r2, [r1] 21 \instr r2, r2, r3 22 strex r0, r2, [r1] 35 mov r2, #1 39 mov r3, r2, lsl r3 @ create mask 46 1: ldrex r2, [r1] 47 ands r0, r2, r3 @ save old value of bit 48 \instr r2, r2, r3 @ toggle bit [all …]
|
D | getuser.S | 36 check_uaccess r0, 1, r1, r2, __get_user_bad 37 1: TUSER(ldrb) r2, [r0] 43 check_uaccess r0, 2, r1, r2, __get_user_bad 46 2: ldrbt r2, [r0], #1 50 2: ldrb r2, [r0] 54 orr r2, r2, rb, lsl #8 56 orr r2, rb, r2, lsl #8 63 check_uaccess r0, 4, r1, r2, __get_user_bad 64 4: TUSER(ldr) r2, [r0] 70 check_uaccess r0, 8, r1, r2, __get_user_bad8 [all …]
|
/arch/hexagon/lib/ |
D | memset.S | 42 p0 = cmp.eq(r2, #0) 43 p1 = cmp.gtu(r2, #7) 60 loop0(1f, r2) /* byte loop */ 72 p1 = cmp.eq(r2, #1) 85 p1 = cmp.eq(r2, #2) 97 p0 = cmp.gtu(r2, #7) 98 p1 = cmp.eq(r2, #4) 104 p0 = cmp.gtu(r2, #11) 110 r10 = lsr(r2, #3) 127 p1 = cmp.eq(r2, #8) [all …]
|
/arch/arc/lib/ |
D | strchr-700.S | 19 bmsk r2,r0,1 22 breq.d r2,r0,.Laligned 24 sub_s r0,r0,r2 25 asl r7,r2,3 26 ld_s r2,[r0] 34 sub r12,r2,r7 35 bic_s r12,r12,r2 38 xor r6,r2,r5 39 ld.a r2,[r0,4] 50 bic r2,r7,r6 [all …]
|
D | strcmp.S | 19 or r2,r0,r1 20 bmsk_s r2,r2,1 21 brne r2,0,.Lcharloop 25 ld.ab r2,[r0,4] 28 sub r4,r2,r12 29 bic r4,r4,r2 32 breq r2,r3,.Lwordloop 34 xor r0,r2,r3 ; mask for difference 39 and_s r2,r2,r0 42 cmp_s r2,r3 [all …]
|
D | strcmp-archs.S | 12 or r2, r0, r1 13 bmsk_s r2, r2, 1 14 brne r2, 0, @.Lcharloop 17 ld.ab r2, [r0, 4] 25 sub r4, r2, r12 27 bic r4, r4, r2 31 cmp r2, r3 33 mov.eq r2, r5 39 swape r2, r2 43 cmp_s r2, r3 [all …]
|
D | strcpy-700.S | 22 or r2,r0,r1 23 bmsk_s r2,r2,1 24 brne.d r2,0,charloop 30 sub r2,r3,r8 31 bic_s r2,r2,r3 32 tst_s r2,r12 41 sub r2,r3,r8 42 bic_s r2,r2,r3 43 tst_s r2,r12 46 sub r2,r4,r8 [all …]
|
/arch/arm/mach-imx/ |
D | suspend-imx53.S | 54 add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET 58 ldr r5, [r2], #12 /* IOMUXC register offset */ 60 str r6, [r2], #4 /* save area */ 67 ldr r2,[r1, #M4IF_MCR0_OFFSET] 68 orr r2, r2, #M4IF_MCR0_FDVFS 69 str r2,[r1, #M4IF_MCR0_OFFSET] 73 ldr r2,[r1, #M4IF_MCR0_OFFSET] 74 ands r2, r2, #M4IF_MCR0_FDVACK 82 add r2, r0, #SUSPEND_INFO_MX53_IO_STATE_OFFSET 86 ldr r5, [r2], #4 /* IOMUXC register offset */ [all …]
|
/arch/arm/crypto/ |
D | sha256-core.S_shipped | 102 add r2,r1,r2,lsl#6 @ len to point at the end of inp 103 stmdb sp!,{r0,r1,r2,r4-r11,lr} 109 ldr r2,[r1],#4 111 ldrb r2,[r1,#3] 116 @ ldr r2,[r1],#4 @ 0 124 rev r2,r2 127 @ ldrb r2,[r1,#3] @ 0 131 orr r2,r2,r12,lsl#8 133 orr r2,r2,r0,lsl#16 138 orr r2,r2,r12,lsl#24 [all …]
|
/arch/nios2/boot/compressed/ |
D | head.S | 26 movui r2, NIOS2_ICACHE_LINE_SIZE 28 sub r1, r1, r2 32 movui r2, NIOS2_DCACHE_LINE_SIZE 34 sub r1, r1, r2 39 movia r2, chkadr 40 beq r1, r2, finish_move /* We are running in correct address, 44 movia r2, _start /* Destination */ 47 stw r8, 0(r2) /* stort a word to dest [r2] */ 49 addi r2, r2, 4 /* inc the dest addr */ 50 blt r2, r3, 1b [all …]
|
/arch/blackfin/mach-bf561/ |
D | atomic.S | 46 safe_testset p0, r2; 48 SSYNC(r2); 53 CSYNC(r2); 55 SSYNC(r2); 70 safe_testset p0, r2; 72 SSYNC(r2); 81 CSYNC(r2); 97 SSYNC(r2); 132 SSYNC(r2); 170 SSYNC(r2); [all …]
|
/arch/m32r/mm/ |
D | mmu.S | 32 st r2, @-sp 49 ;; r2: &tlb_entry_{i|d}_dat 53 seth r2, #high(tlb_entry_d_dat) 54 or3 r2, r2, #low(tlb_entry_d_dat) 57 seth r2, #high(tlb_entry_d_dat) 58 or3 r2, r2, #low(tlb_entry_d_dat) 62 add r2, r1 78 ;; r2: &tlb_entry_{i|d}_dat 86 seth r2, #high(tlb_entry_i_dat) 87 or3 r2, r2, #low(tlb_entry_i_dat) [all …]
|
/arch/sh/lib64/ |
D | copy_page.S | 49 alloco r2, 0x00 51 alloco r2, 0x20 55 add r2, r6, r6 58 sub r3, r2, r60 70 bge/u r2, r6, tr2 ! skip prefetch for last 4 lines 71 ldx.q r2, r22, r63 ! prefetch 4 lines hence 74 bge/u r2, r7, tr3 ! skip alloco for last 2 lines 75 alloco r2, 0x40 ! alloc destination line 2 lines ahead 78 ldx.q r2, r60, r36 79 ldx.q r2, r61, r37 [all …]
|
/arch/unicore32/lib/ |
D | findbit.S | 23 mov r2, #0 24 1: ldb r3, [r0+], r2 >> #3 27 add r2, r2, #8 @ next bit pointer 28 2: csub.a r2, r1 @ any more? 42 and.a ip, r2, #7 44 ldb r3, [r0+], r2 >> #3 48 or r2, r2, #7 @ if zero, then no bits here 49 add r2, r2, #1 @ align bit pointer 61 mov r2, #0 62 1: ldb r3, [r0+], r2 >> #3 [all …]
|
/arch/m32r/boot/compressed/ |
D | head.S | 49 ld r2, @r3 50 add r2, r12 51 st r2, @r3 63 seth r2, #high(__bss_start) 64 or3 r2, r2, #low(__bss_start) 65 add r2, r12 69 sub r3, r2 73 srli r4, #4 || addi r2, #-4 78 ld r0, @(4,r2) 80 st r1, @+r2 || addi r4, #-1 [all …]
|
/arch/nios2/kernel/ |
D | head.S | 69 movui r2, NIOS2_ICACHE_LINE_SIZE 73 sub r1, r1, r2 119 movui r2, NIOS2_DCACHE_LINE_SIZE 123 sub r1, r1, r2 128 movia r2, chkadr 129 beq r1, r2,finish_move /* We are running in RAM done */ 131 movia r2, _start /* Destination */ 136 stw r8, 0(r2) /* store a word to dest [r2] */ 137 flushd 0(r2) /* Flush cache for safety */ 139 addi r2, r2, 4 /* inc the dest addr */ [all …]
|