/external/llvm/test/MC/ARM/ |
D | basic-arm-instructions.s | 48 adc r4, r5, r6 50 adc r4, r5, r6, lsl #1 51 adc r4, r5, r6, lsl #31 52 adc r4, r5, r6, lsr #1 53 adc r4, r5, r6, lsr #31 54 adc r4, r5, r6, lsr #32 55 adc r4, r5, r6, asr #1 56 adc r4, r5, r6, asr #31 57 adc r4, r5, r6, asr #32 58 adc r4, r5, r6, ror #1 [all …]
|
D | basic-thumb-instructions.s | 25 adcs r4, r6 27 @ CHECK: adcs r4, r6 @ encoding: [0x74,0x41] 151 bics r1, r6 153 @ CHECK: bics r1, r6 @ encoding: [0xb1,0x43] 206 cmp r6, #32 210 @ CHECK: cmp r6, #32 @ encoding: [0x20,0x2e] 225 ldm r3, {r0, r1, r2, r3, r4, r5, r6, r7} 229 @ CHECK: ldm r3, {r0, r1, r2, r3, r4, r5, r6, r7} @ encoding: [0xff,0xcb] 238 ldr r2, [r6, #32] 246 @ CHECK: ldr r2, [r6, #32] @ encoding: [0x32,0x6a] [all …]
|
D | basic-thumb2-instructions.s | 43 adc r4, r5, r6 44 adcs r4, r5, r6 53 @ CHECK: adc.w r4, r5, r6 @ encoding: [0x45,0xeb,0x06,0x04] 54 @ CHECK: adcs.w r4, r5, r6 @ encoding: [0x55,0xeb,0x06,0x04] 75 add r12, r6, #0x100 76 addw r12, r6, #0x100 97 @ CHECK: add.w r12, r6, #256 @ encoding: [0x06,0xf5,0x80,0x7c] 98 @ CHECK: addw r12, r6, #256 @ encoding: [0x06,0xf2,0x00,0x1c] 118 adds.w r0, r3, r6, lsr #25 126 @ CHECK: adds.w r0, r3, r6, lsr #25 @ encoding: [0x13,0xeb,0x56,0x60] [all …]
|
/external/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_variance_halfpixvar16x16_v_armv6.asm | 37 ldr r6, [r9, #0] ; load 4 src pixels from next row 41 mvn r6, r6 42 uhsub8 r4, r4, r6 45 usub8 r6, r4, r5 ; calculate difference 46 sel r7, r6, lr ; select bytes with positive difference 47 usub8 r6, r5, r4 ; calculate difference with reversed operands 48 sel r6, r6, lr ; select bytes with negative difference 52 usad8 r5, r6, lr ; calculate sum of negative differences 53 orr r6, r6, r7 ; differences of all 4 pixels 59 uxtb16 r5, r6 ; byte (two pixels) to halfwords [all …]
|
D | vp8_variance_halfpixvar16x16_h_armv6.asm | 36 ldr r6, [r0, #1] ; load 4 src pixels with 1 byte offset 40 mvn r6, r6 41 uhsub8 r4, r4, r6 44 usub8 r6, r4, r5 ; calculate difference 45 sel r7, r6, lr ; select bytes with positive difference 46 usub8 r6, r5, r4 ; calculate difference with reversed operands 47 sel r6, r6, lr ; select bytes with negative difference 51 usad8 r5, r6, lr ; calculate sum of negative differences 52 orr r6, r6, r7 ; differences of all 4 pixels 58 uxtb16 r5, r6 ; byte (two pixels) to halfwords [all …]
|
D | vp8_variance_halfpixvar16x16_hv_armv6.asm | 37 ldr r6, [r0, #1] ; load source pixels b, row N 42 mvn r6, r6 43 uhsub8 r4, r4, r6 55 usub8 r6, r4, r5 ; calculate difference 56 sel r7, r6, lr ; select bytes with positive difference 57 usub8 r6, r5, r4 ; calculate difference with reversed operands 58 sel r6, r6, lr ; select bytes with negative difference 62 usad8 r5, r6, lr ; calculate sum of negative differences 63 orr r6, r6, r7 ; differences of all 4 pixels 69 uxtb16 r5, r6 ; byte (two pixels) to halfwords [all …]
|
D | vp8_variance16x16_armv6.asm | 39 usub8 r6, r4, r5 ; calculate difference 40 sel r7, r6, lr ; select bytes with positive difference 42 sel r6, r9, lr ; select bytes with negative difference 46 usad8 r5, r6, lr ; calculate sum of negative differences 47 orr r6, r6, r7 ; differences of all 4 pixels 53 uxtb16 r5, r6 ; byte (two pixels) to halfwords 54 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords 62 usub8 r6, r4, r5 ; calculate difference 63 sel r7, r6, lr ; select bytes with positive difference 65 sel r6, r9, lr ; select bytes with negative difference [all …]
|
D | vp8_mse16x16_armv6.asm | 37 ldr r6, [r2, #0x0] ; load 4 ref pixels 41 usub8 r8, r5, r6 ; calculate difference 43 usub8 r9, r6, r5 ; calculate difference with reversed operands 48 usad8 r6, r8, lr ; calculate sum of negative differences 54 uxtb16 r6, r8 ; byte (two pixels) to halfwords 56 smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1) 59 ldr r6, [r2, #0x4] ; load 4 ref pixels 62 usub8 r8, r5, r6 ; calculate difference 64 usub8 r9, r6, r5 ; calculate difference with reversed operands 69 usad8 r6, r8, lr ; calculate sum of negative differences [all …]
|
D | vp8_subtract_armv6.asm | 33 ldr r6, [r0, #vp8_block_src_diff] 60 str r0, [r6, #0] ; diff 61 str r1, [r6, #4] ; diff 63 add r6, r6, r2, lsl #1 ; update diff pointer 89 ldr r6, [r1] ; src (A) 92 uxtb16 r8, r6 ; [s2 | s0] (A) 94 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A) 97 usub16 r6, r8, r9 ; [d2 | d0] (A) 103 pkhbt r8, r6, r7, lsl #16 ; [d1 | d0] (A) 104 pkhtb r9, r7, r6, asr #16 ; [d3 | d2] (A) [all …]
|
D | vp8_fast_fdct4x4_armv6.asm | 34 qadd16 r6, r4, r5 ; [i1+i2 | i0+i3] = [b1 | a1] without shift 43 smuad r4, r6, lr ; o0 = (i1+i2)*8 + (i0+i3)*8 44 smusd r5, r6, lr ; o2 = (i1+i2)*8 - (i0+i3)*8 46 smlad r6, r7, r12, r11 ; o1 = (c1 * 2217 + d1 * 5352 + 14500) 51 pkhbt r3, r4, r6, lsl #4 ; [o1 | o0], keep in register for PART 2 52 pkhbt r6, r5, r7, lsl #4 ; [o3 | o2] 54 str r6, [r1, #4] 59 qadd16 r6, r8, r9 ; [i5+i6 | i4+i7] = [b1 | a1] without shift 68 smuad r9, r6, lr ; o4 = (i5+i6)*8 + (i4+i7)*8 69 smusd r8, r6, lr ; o6 = (i5+i6)*8 - (i4+i7)*8 [all …]
|
/external/libvpx/vpx_scale/arm/neon/ |
D | vp8_vpxyv12_extendframeborders_neon.asm | 44 add r6, r1, lr 45 sub r6, r6, r3, lsl #1 ;destptr2 46 sub r2, r6, #1 ;srcptr2 73 vst1.8 {q2, q3}, [r6], lr 75 vst1.8 {q6, q7}, [r6], lr 77 vst1.8 {q10, q11}, [r6], lr 79 vst1.8 {q14, q15}, [r6], lr 89 sub r6, r1, r3 ;destptr2 90 sub r2, r6, lr ;srcptr2 110 vst1.8 {q8, q9}, [r6]! [all …]
|
D | vp8_vpxyv12_copyframeyonly_neon.asm | 32 ldr r6, [r0, #yv12_buffer_config_y_stride] 43 add r10, r2, r6 71 add r2, r2, r6, lsl #1 102 add r6, r1, lr 103 sub r6, r6, r3, lsl #1 ;destptr2 104 sub r2, r6, #1 ;srcptr2 131 vst1.8 {q2, q3}, [r6], lr 133 vst1.8 {q6, q7}, [r6], lr 135 vst1.8 {q10, q11}, [r6], lr 137 vst1.8 {q14, q15}, [r6], lr [all …]
|
/external/libvpx/vp8/decoder/arm/armv6/ |
D | dequant_dc_idct_v6.asm | 29 ldr r6, [sp, #44] 42 strh r6, [r0], #2 45 smulbb r6, r4, r5 51 strh r6, [r0], #2 57 smulbb r6, r4, r5 63 strh r6, [r0], #2 66 smulbb r6, r4, r5 74 strh r6, [r0], #2 85 ldr r6, [r0, #8] 90 smulwt r9, r3, r6 [all …]
|
D | dequant_idct_v6.asm | 35 smulbb r6, r4, r5 41 strh r6, [r0], #2 44 smulbb r6, r4, r5 52 strh r6, [r0], #2 63 ldr r6, [r0, #8] 68 smulwt r9, r3, r6 69 smulwb r7, r3, r6 70 smulwt r10, r4, r6 71 smulwb r8, r4, r6 75 uadd16 r6, r6, r7 [all …]
|
/external/tremolo/Tremolo/ |
D | mdctARM.s | 61 LDMDB r2!,{r5,r6,r7,r12} 64 MOV r6, r6, ASR #9 @ r6 = (*--r)>>9 78 MOV r14,r6, ASR #15 80 EORNE r6, r4, r14,ASR #31 81 STRH r6, [r0], r3 123 LDR r6, [r2],#8 128 RSB r6, r6, #0 133 MOV r6, r6, ASR #9 @ r6 = (-*l)>>9 146 MOV r14,r6, ASR #15 148 EORNE r6, r4, r14,ASR #31 [all …]
|
D | mdctLARM.s | 61 LDMDB r2!,{r5,r6,r7,r12} 64 MOV r6, r6, ASR #9 @ r6 = (*--r)>>9 78 MOV r14,r6, ASR #15 80 EORNE r6, r4, r14,ASR #31 81 STRH r6, [r0], r3 123 LDR r6, [r2],#8 128 RSB r6, r6, #0 133 MOV r6, r6, ASR #9 @ r6 = (-*l)>>9 146 MOV r14,r6, ASR #15 148 EORNE r6, r4, r14,ASR #31 [all …]
|
D | bitwiseARM.s | 67 STMFD r13!,{r5,r6} 79 LDRLT r6,[r3] @ r6 = ptr[1] 82 ORRLT r10,r10,r6,LSL r12 @ r10= first bitsLeftInSeg bits+crap 97 LDMIA r11,{r6,r12,r14} @ r6 = buffer 100 LDR r6,[r6] @ r6 = buffer->data 103 ADD r6,r6,r12 @ r6 = buffer->data+begin 105 LDRB r12,[r6],#1 @ r12= *buffer 116 LDMFD r13!,{r5,r6,r10,r11,PC} 125 LDMFD r13!,{r5,r6,r10,r11,PC} 130 LDMFD r13!,{r5,r6,r10,r11,PC} [all …]
|
/external/libvpx/vp8/common/arm/armv6/ |
D | recon_v6.asm | 45 ldr r6, [dif, #0] ; 1 | 0 48 pkhbt r8, r6, r7, lsl #16 ; 2 | 0 49 pkhtb r9, r7, r6, asr #16 ; 3 | 1 63 ;; ldr r6, [dif, #8] ; 1 | 0 65 ldr r6, [dif, #0] ; 1 | 0 68 pkhbt r8, r6, r7, lsl #16 ; 2 | 0 69 pkhtb r9, r7, r6, asr #16 ; 3 | 1 83 ;; ldr r6, [dif, #16] ; 1 | 0 85 ldr r6, [dif, #0] ; 1 | 0 88 pkhbt r8, r6, r7, lsl #16 ; 2 | 0 [all …]
|
D | loopfilter_v6.asm | 71 ldr r6, [sp, #36] ; load thresh address 80 ldr r3, [r6], #4 ; thresh 89 uqsub8 r6, r9, r10 ; p3 - p2 94 orr r6, r6, r7 ; abs (p3-p2) 96 uqsub8 lr, r6, r2 ; compare to limit. lr: vp8_filter_mask 98 uqsub8 r6, r11, r12 ; p1 - p0 103 orr r6, r6, r7 ; abs (p1-p0) 104 uqsub8 r7, r6, r2 ; compare to limit 105 uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later 108 uqsub8 r6, r11, r10 ; p1 - q1 [all …]
|
/external/libvpx/vpx_scale/arm/armv4/ |
D | gen_scalers_armv4.asm | 72 orr r6, r4, r5, lsl #16 ; b | a 74 mul r6, c51_205, r6 ; a * 51 + 205 * b 78 add r6, r6, #0x8000 80 mov r6, r6, lsr #24 81 strb r6, [dest], #1 108 orr r6, r4, r5, lsl #16 ; b | a 109 mul r6, c51_205, r6 114 add r6, r6, #0x8000 116 mov r6, r6, lsr #24 117 strb r6, [dest], #1 [all …]
|
/external/jpeg/ |
D | armv6_idct.S | 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 98 ldrsh r6, [r14, #96] 108 orreqs r8, r6, r7 118 mla r6, r11, r6, r2 126 rsb r2, r6, r2, lsl #1 163 add r0, r0, r6 164 sub r2, r2, r6 165 sub r6, r0, r6, lsl #1 197 add r6, r6, r5 199 sub r5, r6, r5, lsl #1 [all …]
|
/external/qemu/distrib/jpeg-6b/ |
D | armv6_idct.S | 77 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14} 98 ldrsh r6, [r14, #96] 108 orreqs r8, r6, r7 118 mla r6, r11, r6, r2 126 rsb r2, r6, r2, lsl #1 163 add r0, r0, r6 164 sub r2, r2, r6 165 sub r6, r0, r6, lsl #1 197 add r6, r6, r5 199 sub r5, r6, r5, lsl #1 [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | basic-arm-instructions.txt | 37 # CHECK: adc r4, r5, r6 39 # CHECK: adc r4, r5, r6, lsl #1 40 # CHECK: adc r4, r5, r6, lsl #31 41 # CHECK: adc r4, r5, r6, lsr #1 42 # CHECK: adc r4, r5, r6, lsr #31 43 # CHECK: adc r4, r5, r6, lsr #32 44 # CHECK: adc r4, r5, r6, asr #1 45 # CHECK: adc r4, r5, r6, asr #31 46 # CHECK: adc r4, r5, r6, asr #32 47 # CHECK: adc r4, r5, r6, ror #1 [all …]
|
/external/openssl/crypto/bn/asm/ |
D | s390x.S | 26 stmg %r6,%r10,48(%r15) 36 mlgr %r6,%r5 // *=w 38 alcgr %r6,zero 44 alcgr %r9,%r6 50 mlgr %r6,%r5 52 alcgr %r6,zero 58 alcgr %r9,%r6 72 lmg %r6,%r10,48(%r15) 77 mlgr %r6,%r5 // *=w 79 alcgr %r6,zero [all …]
|
/external/libvpx/vp8/encoder/arm/armv5te/ |
D | vp8_packtokens_mbrow_armv5.asm | 34 ldr r6, _VP8_COMMON_MBrows_ 37 ldr r5, [r4, r6] ; load up mb_rows 65 ldrb r6, [r1, #tokenextra_token] ; t 68 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t 73 ldr r6, [r4, #vp8_token_value] ; v 85 lsl r12, r6, r4 ; r12 = v << 32 - n 110 clz r6, r4 111 sub r6, r6, #24 ; shift 115 adds r3, r3, r6 ; count += shift 116 lsl r5, r4, r6 ; range <<= shift [all …]
|