/external/libvpx/vp8/encoder/arm/armv5te/ |
D | vp8_packtokens_mbrow_armv5.asm | 29 push {r4-r11, lr} 33 ldr r4, _VP8_COMP_common_ 35 add r4, r0, r4 37 ldr r5, [r4, r6] ; load up mb_rows 43 ldr r4, _VP8_COMP_tplist_ 44 add r4, r0, r4 45 ldr r7, [r4, #0] ; dereference cpi->tp_list 66 ldr r4, [sp, #20] ; vp8_coef_encodings 68 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t 73 ldr r6, [r4, #vp8_token_value] ; v [all …]
|
D | vp8_packtokens_armv5.asm | 29 push {r4-r11, lr} 44 ldr r4, [sp, #8] ; vp8_coef_encodings 46 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t 51 ldr r6, [r4, #vp8_token_value] ; v 52 ldr r8, [r4, #vp8_token_len] ; n 59 rsb r4, r8, #32 ; 32-n 63 lsl r12, r6, r4 ; r12 = v << 32 - n 67 ldrb r4, [r9, lr, asr #1] ; pp [i>>1] 74 mul r4, r4, r7 ; ((range-1) * pp[i>>1])) 82 add r4, r7, r4, lsr #8 ; 1 + (((range-1) * pp[i>>1]) >> 8) [all …]
|
D | vp8_packtokens_partitions_armv5.asm | 31 push {r4-r11, lr} 35 ldr r4, _VP8_COMP_common_ 37 add r4, r0, r4 39 ldr r5, [r4, r6] ; load up mb_rows 54 ldr r4, _VP8_COMP_tplist_ 55 add r4, r0, r4 56 ldr r7, [r4, #0] ; dereference cpi->tp_list 96 ldr r4, [sp, #80] ; vp8_coef_encodings 98 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t 103 ldr r6, [r4, #vp8_token_value] ; v [all …]
|
D | boolhuff_armv5te.asm | 45 push {r4-r9, lr} 47 mov r4, r2 56 mul r4, r4, r7 ; ((range-1) * probability) 59 add r4, r7, r4, lsr #8 ; 1 + (((range-1) * probability) >> 8) 61 addne r2, r2, r4 ; if (bit) lowvalue += split 62 subne r4, r5, r4 ; if (bit) range = range-split 65 clz r6, r4 71 lsl r5, r4, r6 ; range <<= shift 75 sub r4, r6, #1 ; offset-1 76 lsls r4, r2, r4 ; if((lowvalue<<(offset-1)) & 0x80000000 ) [all …]
|
/external/llvm/test/MC/ARM/ |
D | basic-arm-instructions.s | 48 adc r4, r5, r6 50 adc r4, r5, r6, lsl #1 51 adc r4, r5, r6, lsl #31 52 adc r4, r5, r6, lsr #1 53 adc r4, r5, r6, lsr #31 54 adc r4, r5, r6, lsr #32 55 adc r4, r5, r6, asr #1 56 adc r4, r5, r6, asr #31 57 adc r4, r5, r6, asr #32 58 adc r4, r5, r6, ror #1 [all …]
|
D | basic-thumb2-instructions.s | 27 adc r4, r2, #0x7f800000 28 adc r4, r2, #0x00000680 37 @ CHECK: adc r4, r2, #2139095040 @ encoding: [0x42,0xf1,0xff,0x44] 38 @ CHECK: adc r4, r2, #1664 @ encoding: [0x42,0xf5,0xd0,0x64] 43 adc r4, r5, r6 44 adcs r4, r5, r6 52 @ CHECK: adc.w r4, r5, r6 @ encoding: [0x45,0xeb,0x06,0x04] 53 @ CHECK: adcs.w r4, r5, r6 @ encoding: [0x55,0xeb,0x06,0x04] 68 addeq r4, r5, #293 84 @ CHECK: addweq r4, r5, #293 @ encoding: [0x05,0xf2,0x25,0x14] [all …]
|
/external/libvpx/vpx_scale/arm/armv4/ |
D | gen_scalers_armv4.asm | 58 stmdb sp!, {r4 - r11, lr} 68 and r4, r3, mask ; a = src[0] 70 strb r4, [dest], #1 72 orr r6, r4, r5, lsl #16 ; b | a 104 and r4, r3, mask 106 strb r4, [dest], #1 108 orr r6, r4, r5, lsl #16 ; b | a 132 ldmia sp!, {r4 - r11, pc} 161 stmdb sp!, {r4 - r11, lr} 168 ldrb r4, [r3], r1 ; a = des [0] [all …]
|
/external/libvpx/vp8/common/arm/armv6/ |
D | copymem8x8_v6.asm | 21 ;push {r4-r5} 22 stmdb sp!, {r4-r5} 29 ands r4, r0, #7 32 ands r4, r0, #3 36 ldrb r4, [r0] 42 strb r4, [r2] 45 ldrb r4, [r0, #2] 50 strb r4, [r2, #2] 53 ldrb r4, [r0, #4] 56 strb r4, [r2, #4] [all …]
|
D | copymem8x4_v6.asm | 21 ;push {r4-r5} 22 stmdb sp!, {r4-r5} 29 ands r4, r0, #7 32 ands r4, r0, #3 36 ldrb r4, [r0] 42 strb r4, [r2] 45 ldrb r4, [r0, #2] 50 strb r4, [r2, #2] 53 ldrb r4, [r0, #4] 56 strb r4, [r2, #4] [all …]
|
D | copymem16x16_v6.asm | 21 stmdb sp!, {r4 - r7} 22 ;push {r4-r7} 29 ands r4, r0, #15 32 ands r4, r0, #7 35 ands r4, r0, #3 39 ldrb r4, [r0] 47 strb r4, [r2] 52 ldrb r4, [r0, #4] 59 strb r4, [r2, #4] 64 ldrb r4, [r0, #8] [all …]
|
D | recon_v6.asm | 41 stmdb sp!, {r4 - r9, lr} 44 ldr r4, [prd], #16 ; 3 | 2 | 1 | 0 51 uxtab16 r8, r8, r4 ; 2 | 0 + 3 | 2 | 2 | 0 52 uxtab16 r9, r9, r4, ror #8 ; 3 | 1 + 0 | 3 | 2 | 1 62 ldr r4, [prd], #16 ; 3 | 2 | 1 | 0 71 uxtab16 r8, r8, r4 ; 2 | 0 + 3 | 2 | 2 | 0 72 uxtab16 r9, r9, r4, ror #8 ; 3 | 1 + 0 | 3 | 2 | 1 82 ldr r4, [prd], #16 ; 3 | 2 | 1 | 0 91 uxtab16 r8, r8, r4 ; 2 | 0 + 3 | 2 | 2 | 0 92 uxtab16 r9, r9, r4, ror #8 ; 3 | 1 + 0 | 3 | 2 | 1 [all …]
|
D | dc_only_idct_add_v6.asm | 23 stmdb sp!, {r4 - r7, lr} 27 ldr r4, [r1], r3 33 uxtab16 r5, r0, r4 ; a1+2 | a1+0 34 uxtab16 r4, r0, r4, ror #8 ; a1+3 | a1+1 38 usat16 r4, #8, r4 41 orr r5, r5, r4, lsl #8 43 ldr r4, [r1], r3 48 uxtab16 r5, r0, r4 49 uxtab16 r4, r0, r4, ror #8 53 usat16 r4, #8, r4 [all …]
|
/external/qemu/distrib/jpeg-6b/ |
D | armv6_idct.S | 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 80 sub r4, sp, #236 81 bic sp, r4, #31 84 stm r5, {r2, r3, r4} 92 ldrsh r4, [r14, #-2] ! 102 mul r4, r8, r4 117 mla r0, r10, r0, r4 125 rsb r4, r0, r4, lsl #1 175 add r4, r4, r2 177 sub r2, r4, r2, lsl #1 [all …]
|
/external/jpeg/ |
D | armv6_idct.S | 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 80 sub r4, sp, #236 81 bic sp, r4, #31 84 stm r5, {r2, r3, r4} 92 ldrsh r4, [r14, #-2] ! 102 mul r4, r8, r4 117 mla r0, r10, r0, r4 125 rsb r4, r0, r4, lsl #1 175 add r4, r4, r2 177 sub r2, r4, r2, lsl #1 [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | basic-arm-instructions.txt | 37 # CHECK: adc r4, r5, r6 39 # CHECK: adc r4, r5, r6, lsl #1 40 # CHECK: adc r4, r5, r6, lsl #31 41 # CHECK: adc r4, r5, r6, lsr #1 42 # CHECK: adc r4, r5, r6, lsr #31 43 # CHECK: adc r4, r5, r6, lsr #32 44 # CHECK: adc r4, r5, r6, asr #1 45 # CHECK: adc r4, r5, r6, asr #31 46 # CHECK: adc r4, r5, r6, asr #32 47 # CHECK: adc r4, r5, r6, ror #1 [all …]
|
/external/openssl/crypto/bn/asm/ |
D | armv4-gf2m.s | 37 mov r4,#0 39 str r4,[sp,#0] @ tab[0]=0 48 eor r4,r6,r8 @ a2^a4 51 str r4,[sp,#24] @ tab[6]=a2^a4 62 mov r4,r7,lsr#29 67 eor r4,r4,r6,lsr#26 72 eor r4,r4,r7,lsr#23 77 eor r4,r4,r6,lsr#20 82 eor r4,r4,r7,lsr#17 87 eor r4,r4,r6,lsr#14 [all …]
|
D | armv4-mont.s | 15 stmdb sp!,{r4-r12,lr} @ save 10 registers 21 add r4,r2,r0 @ &bp[num-1] 29 str r4,[r0,#15*4] @ save &bp[num] 36 mov r4,sp 47 str r12,[r4],#4 @ tp[j-1]=,tp++ 49 cmp r4,r0 53 ldr r4,[r0,#13*4] @ restore bp 63 ldr r2,[r4,#4]! @ *(++bp) 72 str r4,[r0,#13*4] @ save bp 76 mov r4,sp [all …]
|
/external/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_variance_halfpixvar16x16_hv_armv6.asm | 27 stmfd sp!, {r4-r12, lr} 36 ldr r4, [r0, #0] ; load source pixels a, row N 43 uhsub8 r4, r4, r6 44 eor r4, r4, r10 51 uhsub8 r4, r4, r5 53 eor r4, r4, r10 55 usub8 r6, r4, r5 ; calculate difference 57 usub8 r6, r5, r4 ; calculate difference with reversed operands 61 usad8 r4, r7, lr ; calculate sum of positive differences 65 adds r8, r8, r4 ; add positive differences to sum [all …]
|
D | vp8_variance_halfpixvar16x16_v_armv6.asm | 27 stmfd sp!, {r4-r12, lr} 36 ldr r4, [r0, #0] ; load 4 src pixels 42 uhsub8 r4, r4, r6 43 eor r4, r4, r10 45 usub8 r6, r4, r5 ; calculate difference 47 usub8 r6, r5, r4 ; calculate difference with reversed operands 51 usad8 r4, r7, lr ; calculate sum of positive differences 55 adds r8, r8, r4 ; add positive differences to sum 64 ldr r4, [r0, #4] ; load 4 src pixels 70 uhsub8 r4, r4, r6 [all …]
|
D | vp8_variance_halfpixvar16x16_h_armv6.asm | 27 stmfd sp!, {r4-r12, lr} 35 ldr r4, [r0, #0] ; load 4 src pixels 41 uhsub8 r4, r4, r6 42 eor r4, r4, r10 44 usub8 r6, r4, r5 ; calculate difference 46 usub8 r6, r5, r4 ; calculate difference with reversed operands 50 usad8 r4, r7, lr ; calculate sum of positive differences 54 adds r8, r8, r4 ; add positive differences to sum 63 ldr r4, [r0, #4] ; load 4 src pixels 69 uhsub8 r4, r4, r6 [all …]
|
/external/llvm/test/CodeGen/Thumb2/ |
D | aligned-spill.ll | 10 ; The caller-saved r4 is used as a scratch register for stack realignment. 11 ; CHECK: push {r4, r7, lr} 12 ; CHECK: bic r4, r4, #7 13 ; CHECK: mov sp, r4 24 ; NEON: push {r4, r7, lr} 25 ; NEON: sub.w r4, sp, #64 26 ; NEON: bic r4, r4, #15 28 ; NEON: mov sp, r4 29 ; NEON: vst1.64 {d8, d9, d10, d11}, [r4, :128]! 30 ; NEON: vst1.64 {d12, d13, d14, d15}, [r4, :128] [all …]
|
/external/openssl/crypto/modes/asm/ |
D | ghash-armv4.s | 31 ldmia r12,{r4-r11} @ copy rem_4bit ... 32 stmdb sp!,{r4-r11} @ ... to stack 43 ldmia r7,{r4-r7} @ load Htbl[nlo] 47 and r14,r4,#0xf @ rem 50 eor r4,r8,r4,lsr#4 52 eor r4,r4,r5,lsl#28 66 and r12,r4,#0xf @ rem 70 eor r4,r8,r4,lsr#4 71 eor r4,r4,r5,lsl#28 81 and r14,r4,#0xf @ rem [all …]
|
/external/aac/libSBRdec/src/arm/ |
D | env_calc_arm.cpp | 116 PUSH {r4-r5} in FDK_get_maxval() 124 LDR r4, [r1], #4 in FDK_get_maxval() 126 EOR r4, r4, r4, ASR #31 in FDK_get_maxval() 128 ORR r0, r0, r4 in FDK_get_maxval() 132 LDR r4, [r1], #4 in FDK_get_maxval() 134 EOR r4, r4, r4, ASR #31 in FDK_get_maxval() 136 ORR r0, r0, r4 in FDK_get_maxval() 143 POP {r4-r5} in FDK_get_maxval()
|
/external/openssl/crypto/aes/asm/ |
D | aes-armv4.s | 118 stmdb sp!,{r1,r4-r12,lr} 124 ldrb r4,[r12,#2] @ manner... 127 orr r0,r0,r4,lsl#8 130 ldrb r4,[r12,#6] 134 orr r1,r1,r4,lsl#8 137 ldrb r4,[r12,#10] 141 orr r2,r2,r4,lsl#8 144 ldrb r4,[r12,#14] 148 orr r3,r3,r4,lsl#8 178 mov r4,r0,lsr#24 @ write output in endian-neutral [all …]
|
/external/valgrind/main/coregrind/m_dispatch/ |
D | dispatch-arm-linux.S | 57 push {r0, r1, r4, r5, r6, r7, r8, r9, fp, lr} 60 mov r4, #0 61 fmxr fpscr, r4 110 movw r4, #:lower16:VG_(tt_fast) 113 movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast) 115 add r1, r4, r2, LSL #3 // r1 = &tt_fast[entry#] 117 ldrd r4, r5, [r1, #0] // r4 = .guest, r5 = .host 119 cmp r4, r0 166 movw r4, #:lower16:VG_(tt_fast) 169 movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast) [all …]
|