/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV5E/ |
D | Dot_p_opt.s | 34 STMFD r13!, {r4 - r12, r14} 35 MOV r4, #0 @ L_sum = 0 42 SMLABB r4, r6, r7, r4 44 SMLATT r4, r6, r7, r4 47 SMLABB r4, r8, r9, r4 50 SMLATT r4, r8, r9, r4 53 SMLABB r4, r6, r7, r4 55 SMLATT r4, r6, r7, r4 57 SMLABB r4, r8, r9, r4 59 SMLATT r4, r8, r9, r4 [all …]
|
D | residu_asm_opt.s | 33 STMFD r13!, {r4 - r12, r14} 64 LDRH r4, [r0], #2 65 ORR r12, r4, r12, LSL #16 @r12 --- a14, a15 69 LDRH r4, [r0], #2 @load a16 73 ORR r14, r4, r14, LSL #16 @r14 --- loopnum, a16 81 SMULTT r4, r5, r2 @i2(0) --- r4 = x[1] * a0 85 SMLABB r4, r5, r2, r4 @i2(1) --- r4 += x[0] * a1 96 SMLATT r4, r6, r2, r4 @i2(2) --- r4 += x[-1] * a2 101 SMLABB r4, r6, r2, r4 @ i2 (3) 107 SMLATT r4, r7, r2, r4 @ i2 (4) [all …]
|
D | pred_lt4_1_opt.s | 39 STMFD r13!, {r4 - r12, r14} 40 RSB r4, r1, #0 @-T0 42 ADD r5, r0, r4, LSL #1 @x = exc - T0 47 RSB r4, r2, #3 @k = 3 - frac 52 MOV r8, r4, LSL #6 66 LDRSH r4, [r1], #2 @x[0] 70 SMULBB r10, r4, r3 @x[0] * h[0] 74 LDRSH r4, [r1], #2 @x[3] 77 SMLABT r12, r4, r3, r12 @x[3] * h[1] 82 SMLABB r11, r4, r3, r11 @x[3] * h[2] [all …]
|
D | Filt_6k_7k_opt.s | 36 STMFD r13!, {r4 - r12, r14} 39 MOV r4, r1 @ copy lg address 87 @ not use registers: r4, r10, r12, r14, r5 88 MOV r4, r13 93 LDRSH r1, [r4] @ load x[i] 94 LDRSH r2, [r4, #60] @ load x[i + 30] 95 LDRSH r6, [r4, #2] @ load x[i + 1] 96 LDRSH r7, [r4, #58] @ load x[i + 29] 99 LDRSH r8, [r4, #4] @ load x[i + 2] 100 LDRSH r9, [r4, #56] @ load x[i + 28] [all …]
|
D | convolve_opt.s | 34 STMFD r13!, {r4 - r12, r14} 39 ADD r4, r1, r3, LSL #1 @ tmpH address 43 LDRSH r10, [r4], #-2 @ *tmpH-- 51 LDRSH r10, [r4], #-2 @ *tmpH-- 53 LDRSH r14, [r4], #-2 @ *tmpH-- 57 LDRSH r10, [r4], #-2 @ *tmpH-- 59 LDRSH r14, [r4], #-2 @ *tmpH-- 74 ADD r4, r1, r3, LSL #1 @tmpH address 78 LDRSH r10, [r4], #-2 80 LDRSH r14, [r4], #-2 [all …]
|
D | cor_h_vec_opt.s | 29 @r4 ---- rrixix[][NB_POS] 39 STMFD r13!, {r4 - r12, r14} 40 LDR r4, [r13, #40] @load rrixix[][NB_POS] 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 42 MOV r4, #0 @i=0 45 @r3 --- sign[], r4 --- i, r7 --- p0 83 ADD r9, r9, r4, LSL #1 84 ADD r12, r12, r4, LSL #1 97 ADD r4, r4, #1 @i++ 130 ADD r9, r9, r4, LSL #1 [all …]
|
D | syn_filt_opt.s | 36 STMFD r13!, {r4 - r12, r14} 39 MOV r4, r3 @ copy mem[] address 47 LDRH r6, [r4], #2 48 LDRH r7, [r4], #2 49 LDRH r8, [r4], #2 50 LDRH r9, [r4], #2 51 LDRH r10, [r4], #2 52 LDRH r11, [r4], #2 53 LDRH r12, [r4], #2 54 LDRH r14, [r4], #2 [all …]
|
D | scale_sig_opt.s | 32 STMFD r13!, {r4 - r12, r14} 37 ADD r4, r0, r3, LSL #1 @x[i] address 44 LDRSH r5, [r4] @load x[i] 51 STRH r12, [r4], #-2 57 LDRSH r5, [r4] @load x[i] 63 STRH r12, [r4], #-2 67 LDMFD r13!, {r4 - r12, r15}
|
D | Syn_filt_32_opt.s | 32 @ sig_hi[] --- r4 41 STMFD r13!, {r4 - r12, r14} 42 LDR r4, [r13, #40] @ get sig_hi[] address 149 LDRSH r6, [r4, #-2] @ load sig_hi[i-1] 150 LDRSH r7, [r4, #-4] @ load sig_hi[i-2] 153 LDRSH r9, [r4, #-6] @ load sig_hi[i-3] 154 LDRSH r10, [r4, #-8] @ load sig_hi[i-4] 156 LDRSH r6, [r4, #-10] @ load sig_hi[i-5] 160 LDRSH r7, [r4, #-12] @ load sig_hi[i-6] 163 LDRSH r9, [r4, #-14] @ load sig_hi[i-7] [all …]
|
D | Norm_Corr_opt.s | 30 @ r4 --- t_min 53 STMFD r13!, {r4 - r12, r14} 57 LDR r4, [r13, #T_MIN] @get t_min 58 RSB r11, r4, #0 @k = -t_min 101 @r7 --- scale r4 --- t_min r8 --- excf[] 155 STMFD sp!, {r0 - r4, r7 - r12, r14} 163 LDMFD sp!, {r0 - r4, r7 - r12, r14} 185 ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address 188 CMP r4, r6 191 ADD r4, r4, #1 @ t_min ++ [all …]
|
D | Deemph_32_opt.s | 36 STMFD r13!, {r4 - r12, r14} 37 MOV r4, #2 @i=0 89 ADD r4, r4, #2 92 CMP r4, #64 99 LDMFD r13!, {r4 - r12, r15}
|
/frameworks/av/media/codecs/m4v_h263/dec/src/ |
D | idct.cpp | 131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local 153 r4 = blk[B_SIZE * 1 + i]; in idct_intra() 158 if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) in idct_intra() 182 r8 = W7 * (r4 + r5); in idct_intra() 183 r4 = (r8 + (W1 - W7) * r4); in idct_intra() 199 r1 = r4 + r6; in idct_intra() 200 r4 -= r6; in idct_intra() 209 r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ in idct_intra() 210 r4 = (181 * (r4 - r5) + 128) >> 8; in idct_intra() 220 tmpBLK32[(2<<3) + i] = (r0 + r4) >> 8; in idct_intra() [all …]
|
/frameworks/av/media/codecs/mp3dec/src/asm/ |
D | pvmp3_dct_16_gcc.s | 49 stmfd sp!,{r0,r1,r4-r11,lr} 63 smull r4,r3,lr,r3 74 sub r4,r1,r12 77 smull r4,r5,lr,r4 80 sub r4,r12,lr 81 mov r4,r4,lsl #1 82 smull r7,r4,r6,r4 87 str r4,[sp,#4] 88 smull r12,r4,r1,r12 153 sub r12,r4,lr [all …]
|
D | pvmp3_polyphase_filter_window_gcc.s | 47 stmfd sp!,{r0-r2,r4-r11,lr} 64 mov r4, #0x10 66 add r2,r4,r10 68 sub r2,r4,r10 82 add r4,r4,#0x200 142 cmp r4,#0x210 153 mov r4,r3, asr #15 154 teq r4,r3, asr #31 158 ldr r4,[sp,#8] 160 add r4,r4,r2,lsl #1 [all …]
|
D | pvmp3_mdct_18_gcc.s | 44 stmfd sp!,{r4-r11,lr} 56 mov r4,#9 72 subs r4,r4,#1 141 add r1,r5,r4,lsl #2 143 ldr r3,[r6,r4,lsl #2] 146 ldr lr,[r7,r4,lsl #2] 152 str r3,[r5,r4,lsl #2] 153 str r2,[r6,r4,lsl #2] 154 add r4,r4,#1 155 cmp r4,#6 [all …]
|
D | pvmp3_dct_9_gcc.s | 43 stmfd sp!,{r4-r11,lr} 50 ldr r4,[r0, #0x18] 54 add r12,r4,r5 55 sub r4,r4,r5 124 mov r2,r4,lsl #1 133 add r4,r5,r4 135 sub lr,r4,lr 138 ldr r4,cos_pi_6 148 smull r5,lr,r4,lr 159 smlal r5,lr,r4,r3 [all …]
|
/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV7/ |
D | convolve_neon.s | 34 STMFD r13!, {r4 - r12, r14} 40 ADD r4, r1, r3, LSL #1 @ tmpH address 44 LDRSH r10, [r4] @ *tmpH-- 52 SUB r4, r4, #8 53 MOV r9, r4 72 ADD r4, r1, r3, LSL #1 @tmpH address 76 LDRSH r10, [r4], #-2 78 LDRSH r14, [r4] 88 SUB r4, r4, #8 89 MOV r9, r4 [all …]
|
D | cor_h_vec_neon.s | 30 @r4 ---- rrixix[][NB_POS] 39 STMFD r13!, {r4 - r12, r14} 40 LDR r4, [r13, #40] @load rrixix[][NB_POS] 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 42 MOV r4, #0 @i=0 45 @r3 --- sign[], r4 --- i, r7 --- p0 84 ADD r9, r9, r4, LSL #1 85 ADD r12, r12, r4, LSL #1 98 ADD r4, r4, #1 @i++ 131 ADD r9, r9, r4, LSL #1 [all …]
|
D | pred_lt4_1_neon.s | 36 STMFD r13!, {r4 - r12, r14} 37 SUB r4, r0, r1, LSL #1 @ x = exc - T0 39 SUB r4, r4, #30 @ x -= L_INTERPOL2 - 1 42 SUBLT r4, r4, #2 @ x-- 56 VLD1.S16 {Q4, Q5}, [r4]! @load 16 x[] 57 VLD1.S16 {Q6, Q7}, [r4]! @load 16 x[] 70 LDRSH r12, [r4], #2 97 LDMFD r13!, {r4 - r12, r15}
|
D | scale_sig_neon.s | 33 STMFD r13!, {r4 - r12, r14} 34 MOV r4, #4 39 MOVEQ r4, #1 42 MOVEQ r4, #2 47 MOVEQ r4, #1 128 SUBS r4, r4, #1 134 LDMFD r13!, {r4 - r12, r15}
|
D | Deemph_32_neon.s | 36 STMFD r13!, {r4 - r12, r14} 37 MOV r4, #2 @i=0 89 ADD r4, r4, #2 92 CMP r4, #64 99 LDMFD r13!, {r4 - r12, r15}
|
D | Norm_Corr_neon.s | 30 @ r4 --- t_min 53 STMFD r13!, {r4 - r12, r14} 57 LDR r4, [r13, #T_MIN] @get t_min 58 RSB r11, r4, #0 @k = -t_min 109 @r7 --- scale r4 --- t_min r8 --- excf[] 195 STMFD sp!, {r0 - r4, r7 - r12, r14} 203 LDMFD sp!, {r0 - r4, r7 - r12, r14} 225 ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address 228 CMP r4, r6 231 ADD r4, r4, #1 @ t_min ++ [all …]
|
D | syn_filt_neon.s | 35 STMFD r13!, {r4 - r12, r14} 38 MOV r4, r3 @ copy mem[] address 45 VLD1.S16 {D0, D1, D2, D3}, [r4]! @load 16 mems 60 ADD r4, r13, #32 @ yy[i] address 68 ADD r10, r4, r8, LSL #1 @ y[i], yy[i] address 102 LDMFD r13!, {r4 - r12, r15}
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_YuvToRGB.S | 113 vld1.u8 d21, [r4]! 115 pld [r4, #128] 147 vld1.u32 d21[1], [r4]! 157 vld1.u16 d21[1], [r4]! 166 vld1.u8 d21[1], [r4]! 175 vld1.u8 d21[0], [r4]! 227 push {r4,r5} 229 mov r4, r3 236 add r4, r5, LSR #1 244 pop {r4,r5} [all …]
|
D | rsCpuIntrinsics_neon_Convolve.S | 30 push {r4-r8, r10, r11, lr} 35 ldr r4, [sp, #32+64] 36 vld1.16 {q0, q1}, [r4] 39 ldr r4, [sp, #36+64] 96 subs r4, r4, #1 101 pop {r4-r8, r10, r11, lr} 119 push {r4-r7, lr} 123 ldr r4, [sp, #20 + 64] 192 vld1.8 {d27, d28, d29}, [r4], r7 @ y0 ( y + 1 ) 196 pld [r4, r7] [all …]
|