/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
D | syn_filt_opt.s | 48 LDRH r7, [r4], #2 57 STRH r7, [r5], #2 66 LDRH r7, [r4], #2 75 STRH r7, [r5], #2 91 LDRSH r7, [r0, #4] @ load a[2] 96 ORR r10, r6, r7, LSL #16 @ -a[2] -- -a[1] 102 LDRSH r7, [r0, #12] @ load a[6] 107 ORR r10, r6, r7, LSL #16 @ -a[6] -- -a[5] 113 LDRSH r7, [r0, #20] @ load a[10] 118 ORR r10, r6, r7, LSL #16 @ -a[10] -- -a[9] [all …]
|
D | Syn_filt_32_opt.s | 46 ADD r7, r3, #4 @ 4 + Q_new 47 MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new) 51 LDRSH r7, [r0, #4] @ load Aq[2] 56 ORR r10, r6, r7, LSL #16 @ Aq[2] -- Aq[1] 62 LDRSH r7, [r0, #12] @ load Aq[6] 67 ORR r10, r6, r7, LSL #16 @ Aq[6] -- Aq[5] 73 LDRSH r7, [r0, #20] @ load Aq[10] 78 ORR r10, r6, r7, LSL #16 @ Aq[10] -- Aq[9] 84 LDRSH r7, [r0, #28] @ load Aq[14] 89 ORR r10, r6, r7, LSL #16 @ Aq[14] -- Aq[13] [all …]
|
D | Filt_6k_7k_opt.s | 54 MOV r7, r3 @ get signal[i] 56 LDRSH r8, [r7], #2 57 LDRSH r9, [r7], #2 60 LDRSH r11, [r7], #2 61 LDRSH r12, [r7], #2 68 LDRSH r8, [r7], #2 69 LDRSH r9, [r7], #2 72 LDRSH r11, [r7], #2 73 LDRSH r12, [r7], #2 96 LDRSH r7, [r4, #58] @ load x[i + 29] [all …]
|
D | Deemph_32_opt.s | 39 LDRSH r7, [r1], #2 @load x_lo[0] 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 52 LDRSH r7, [r1], #2 @load x_lo[1] 59 ADD r12, r10, r7, LSL #4 70 LDRSH r7, [r1], #2 73 ADD r12, r10, r7, LSL #4 80 LDRSH r7, [r1], #2 84 ADD r12, r10, r7, LSL #4
|
D | Dot_p_opt.s | 40 LDR r7, [r1], #4 42 SMLABB r4, r6, r7, r4 44 SMLATT r4, r6, r7, r4 49 LDR r7, [r1], #4 53 SMLABB r4, r6, r7, r4 55 SMLATT r4, r6, r7, r4
|
D | residu_asm_opt.s | 40 LDRH r7, [r0], #2 41 ORR r6, r7, r6, LSL #16 @r6 --- a2, a3 43 LDRH r7, [r0], #2 45 ORR r7, r8, r7, LSL #16 @r7 --- a4, a5 98 SMLATT r12, r7, r2, r12 @i4(4) --- r12 += x[-1] * a4 102 SMLATB r11,r7, r2, r11 @ i3 (4) 103 SMLABB r12,r7, r2, r12 @ i4 (5) 107 SMLATT r4, r7, r2, r4 @ i2 (4) 108 SMLABT r11,r7, r2, r11 @ i3 (5) 110 SMLATB r3, r7, r2, r3 @ i1 (4) [all …]
|
D | Norm_Corr_opt.s | 74 MOV r7, #1 92 ADD r9, r7, r6, LSL #1 @L_tmp = (L_tmp << 1) + 1 93 CLZ r7, r9 94 SUB r6, r7, #1 @exp = norm_l(L_tmp) 95 RSB r7, r6, #32 @exp = 32 - exp 96 MOV r6, r7, ASR #1 97 RSB r7, r6, #0 @scale = -(exp >> 1) 101 @r7 --- scale r4 --- t_min r8 --- excf[] 155 STMFD sp!, {r0 - r4, r7 - r12, r14} 163 LDMFD sp!, {r0 - r4, r7 - r12, r14} [all …]
|
D | cor_h_vec_opt.s | 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 45 @r3 --- sign[], r4 --- i, r7 --- p0 72 ADD r8, r7, #32 81 LDRSH r10, [r7], #2 @*p0++ 119 ADD r8, r7, #32 128 LDRSH r10, [r7], #2 @*p0++
|
D | scale_sig_opt.s | 35 RSB r7, r2, #0 @exp = -exp 59 MOV r5, r6, ASR r7 @L_tmp >>= exp
|
/frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/ |
D | idct.cpp | 131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local 156 r7 = blk[B_SIZE * 3 + i]; in idct_intra() 158 if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) in idct_intra() 187 r8 = W3 * (r6 + r7); in idct_intra() 189 r7 = (r8 - (W3 + W5) * r7); in idct_intra() 201 r6 = r5 + r7; in idct_intra() 202 r5 -= r7; in idct_intra() 205 r7 = r8 + r3; in idct_intra() 218 tmpBLK32[0 + i] = (r7 + r1) >> 8; in idct_intra() 225 tmpBLK32[(7<<3) + i] = (r7 - r1) >> 8; in idct_intra() [all …]
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
D | pvmp3_dct_9_gcc.s | 57 ldr r7,[r0, #0xc] 59 add r6,r5,r7 60 sub r5,r5,r7 61 add r7,r1,r12 63 add r7,r7,r6 64 add r10,r7,r8 65 rsb r7,r8,r7,asr #1 66 str r7,[r0, #0x18] 70 rsb r7,r2,#0 74 mov r8,r7 [all …]
|
D | pvmp3_dct_16_gcc.s | 82 smull r7,r4,r6,r4 84 add r7,r1,r12 132 sub r1,r7,r8 135 add r1,r7,r8 137 sub r7,r9,r1 141 smull r7,r8,r1,r7 142 sub r7,r3,r10 144 mov r7,r7,lsl #1 145 smull r8,r7,r1,r7 147 add r3,r3,r7 [all …]
|
D | pvmp3_mdct_18_gcc.s | 45 mov r7,r2 146 ldr lr,[r7,r4,lsl #2] 164 ldr lr,[r7,r4,lsl #2] 181 ldr r10,[r7,#0x1c] 191 ldr r3,[r7,#0x20] 194 ldr lr,[r7,#0x24] 202 ldr r3,[r7,#0x44] 210 ldr r3,[r7,#0x40] 220 ldr r12,[r7,#0x3c] 228 ldr r12,[r7,#0x38] [all …]
|
D | pvmp3_polyphase_filter_window_gcc.s | 76 ldr r7,[r12,#0x80] 83 smlal r6,r9,r5,r7 88 smlal r7,r11,r5,r7 95 ldr r7,[r12,#0x180] 101 smlal r5,r9,r6,r7 108 smlal r7,r11,r6,r7 113 ldr r7,[r12,#0x280] 119 smlal r5,r9,r6,r7 125 smlal r7,r11,r6,r7 129 smlal r7,r9,r8,r5 [all …]
|
/frameworks/native/opengl/libagl/ |
D | iterators.S | 51 stmfd sp!, {r4, r5, r6, r7, r8, lr} 65 smull r6, r7, r4, r12 68 smlal r6, r7, r3, r12 78 mla r7, r3, r7, r5 81 adc r7, r7, lr 84 adc r6, r6, r7, lsl #28 88 ldmfd sp!, {r4, r5, r6, r7, r8, pc}
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
D | Deemph_32_neon.s | 39 LDRSH r7, [r1], #2 @load x_lo[0] 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 52 LDRSH r7, [r1], #2 @load x_lo[1] 59 ADD r12, r10, r7, LSL #4 70 LDRSH r7, [r1], #2 73 ADD r12, r10, r7, LSL #4 80 LDRSH r7, [r1], #2 84 ADD r12, r10, r7, LSL #4
|
D | cor_h_vec_neon.s | 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 45 @r3 --- sign[], r4 --- i, r7 --- p0 73 ADD r8, r7, #32 82 LDRSH r10, [r7], #2 @*p0++ 120 ADD r8, r7, #32 129 LDRSH r10, [r7], #2 @*p0++
|
D | residu_asm_neon.s | 34 SUB r7, r3, #4 @i = lg - 4 41 ADD r9, r1, r7, LSL #1 @copy the address 42 ADD r10, r2, r7, LSL #1 111 SUB r7, r7, #4 @i-=4 116 CMP r7, #0
|
D | Norm_Corr_neon.s | 72 MOV r7, #1 100 QADD r9, r6, r7 @L_tmp = (L_tmp << 1) + 1; 101 CLZ r7, r9 102 SUB r6, r7, #1 @exp = norm_l(L_tmp) 103 RSB r7, r6, #32 @exp = 32 - exp 104 MOV r6, r7, ASR #1 105 RSB r7, r6, #0 @scale = -(exp >> 1) 109 @r7 --- scale r4 --- t_min r8 --- excf[] 195 STMFD sp!, {r0 - r4, r7 - r12, r14} 203 LDMFD sp!, {r0 - r4, r7 - r12, r14} [all …]
|
D | Filt_6k_7k_neon.s | 58 MOV r7, r3 @ get signal[i] 63 VLD1.S16 {Q0, Q1}, [r7]! @ signal[0] ~ signal[15] 64 VLD1.S16 {Q2, Q3}, [r7]! @ signal[16] ~ signal[31] 65 VLD1.S16 {Q4, Q5}, [r7]! @ signal[32] ~ signal[47] 66 VLD1.S16 {Q6, Q7}, [r7]! @ signal[48] ~ signal[63] 67 VLD1.S16 {Q8, Q9}, [r7]! @ signal[64] ~ signal[79]
|
D | Syn_filt_32_neon.s | 46 ADD r7, r3, #4 @ 4 + Q_new 47 MOV r3, r6, ASR r7 @ a0 = Aq[0] >> (4 + Q_new)
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_3DLUT.S | 25 vmov r6, r7, \src 28 add r7, r7, r3 31 vld1.u8 d17, [r7], r4 34 vld1.u8 d19, [r7], r5 47 vld1.u8 d19, [r7] 50 sub r7, r7, r4 53 vld1.u8 d17, [r7] 109 push {r4,r5,r6,r7} 113 ldr r7, [sp, #28] 119 vmov.u16 d8[1], r7 [all …]
|
D | rsCpuIntrinsics_neon_Convolve.S | 119 push {r4-r7, lr} 141 mov r7, #8 145 vld1.8 {d24, d25, d26}, [r1], r7 @ y0 ( y - 2 ) 146 vld1.8 {d27, d28, d29}, [r2], r7 @ y0 ( y - 1 ) 149 pld [r1, r7] 150 pld [r2, r7] 191 vld1.8 {d24, d25, d26}, [r3], r7 @ y0 ( y ) 192 vld1.8 {d27, d28, d29}, [r4], r7 @ y0 ( y + 1 ) 195 pld [r3, r7] 196 pld [r4, r7] [all …]
|
D | rsCpuIntrinsics_neon_Resize.S | 58 vld1.u8 d22, [r7]! 86 vld1.u32 d22[0], [r7]! 178 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} 201 ldrd r6,r7, [lr,#112] // src1, src2 277 sub r7, r7, r10, LSL #COMPONENT_SHIFT 329 sub r7, r7, #8 333 add r7, r7, lr, LSL #COMPONENT_SHIFT 380 sub r7, r7, #COMPONENT_COUNT 797 pop {r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
|
D | rsCpuIntrinsics_neon_Blur.S | 102 mla r11, r2, r7, r1 153 cmp r7, #\i 1597 cmpeq r5, r7 1743 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} 1749 sub r7, r3, r6 // h - y 1752 sub r7, r7, #1 // h - y - 1 1760 cmp r7, r5 1761 movhi r7, r5 // rdn = min(r, h - y - 1) 1774 pop {r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} 1790 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} [all …]
|