/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/ |
D | residu_asm_opt.s | 25 @y[] --- r2 72 MOV r0, r2 78 LDR r2, [r1], #-4 @r2 --- x[1], x[0] 80 SMULTB r3, r5, r2 @i1(0) --- r3 = x[0] * a0 81 SMULTT r4, r5, r2 @i2(0) --- r4 = x[1] * a0 85 SMLABB r4, r5, r2, r4 @i2(1) --- r4 += x[0] * a1 86 SMLABT r11, r5, r2, r11 @i3(1) --- r11 += x[1] * a0 89 SMLATB r11, r6, r2, r11 @i3(2) --- r11 += x[0] * a2 90 SMLATT r12, r6, r2, r12 @i4(2) --- r12 += x[1] * a2 91 SMLABB r12, r6, r2, r12 @i4(3) --- r12 += x[0] * a3 [all …]
|
D | pred_lt4_1_opt.s | 29 @ r2 --- frac 40 RSB r2, r2, #0 @frac = -frac 42 CMP r2, #0 43 ADDLT r2, r2, #4 @frac += UP_SAMP 46 RSB r4, r2, #3 @k = 3 - frac 60 MOV r2, r8 @ptr = ptr2 61 LDR r3, [r2], #4 @h[0], h[1] 75 LDR r3, [r2], #4 @h[2], h[3] 86 LDR r3, [r2], #4 @h[4], h[5] 97 LDR r3, [r2], #4 @h[6], h[7] [all …]
|
D | cor_h_vec_opt.s | 27 @r2 ---- track 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 44 @r0 --- h[], r1 --- vec[], r2 --- pos 49 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 51 RSB r11, r2, #62 @j=62-pos 71 ADD r9, r3, r2, LSL #1 @address of sign[pos] 90 ADD r2, r2, #4 94 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 96 RSB r11, r2, #62 @j=62-pos 118 ADD r9, r3, r2, LSL #1 @address of sign[pos] [all …]
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/ |
D | pvmp3_polyphase_filter_window_gcc.s | 46 stmfd sp!,{r0-r2,r4-r11,lr} 49 adr r2,PolyPh_filter_coeff 50 ldr r1,[r2] 51 add r1,r2 52 ldr r2,[sp,#0xc] 54 sub r2,r2,#1 56 str r2,[sp] 65 add r2,r4,r10 66 add r3,r0,r2,lsl #2 67 sub r2,r4,r10 [all …]
|
D | pvmp3_polyphase_filter_window_arm.s | 48 stmfd sp!,{r0-r2,r4-r11,lr} 51 ldr r2,[sp,#0xc] 54 sub r2,r2,#1 56 str r2,[sp] 65 add r2,r4,r10 66 add r3,r0,r2,lsl #2 67 sub r2,r4,r10 70 add r12,r0,r2,lsl #2 72 smlal r2,r9,lr,r5 73 smlal r2,r11,lr,r6 [all …]
|
D | pvmp3_polyphase_filter_window_wm.asm | 43 stmfd sp!,{r0-r2,r4-r11,lr} 46 ldr r2,[sp,#0xc] 49 sub r2,r2,#1 51 str r2,[sp] 60 add r2,r4,r10 61 add r3,r0,r2,lsl #2 62 sub r2,r4,r10 65 add r12,r0,r2,lsl #2 67 smlal r2,r9,lr,r5 68 smlal r2,r11,lr,r6 [all …]
|
D | pvmp3_dct_16_gcc.s | 53 sub r2,r1,r3 54 smull lr,r2,r12,r2 56 str r2,[sp,#0x14] 57 ldr r2,[r0,#0x1c] 60 sub r3,r2,r12 66 add r3,r2,r12 67 sub r2,r1,r3 70 smull lr,r2,r12,r2 149 sub r3,r2,r12 151 add r2,r2,r12 [all …]
|
D | pvmp3_mdct_18_gcc.s | 45 mov r7,r2 46 adr r2,constdata$1 48 add r3,r2,#0x24 82 ldr lr,[r2],#4 98 ldr r2,[r5,#0x40] 132 sub r1,r1,r2 136 sub r1,r1,r2 142 ldr r2,[r1,#0x28] 144 add r0,r0,r2 149 mov r0,r2 [all …]
|
D | pvmp3_mdct_18_arm.s | 47 mov r7,r2 48 ldr r2,table 50 add r3,r2,#0x24 84 ldr lr,[r2],#4 100 ldr r2,[r5,#0x40] 134 sub r1,r1,r2 138 sub r1,r1,r2 144 ldr r2,[r1,#0x28] 146 add r0,r0,r2 151 mov r0,r2 [all …]
|
D | pvmp3_mdct_18_wm.asm | 45 mov r7,r2 46 ldr r2,table 48 add r3,r2,#0x24 82 ldr lr,[r2],#4 98 ldr r2,[r5,#0x40] 132 sub r1,r1,r2 136 sub r1,r1,r2 142 ldr r2,[r1,#0x28] 144 add r0,r0,r2 149 mov r0,r2 [all …]
|
D | pvmp3_dct_9_gcc.s | 44 ldr r2, [r0, #0x20] 47 add r1,r2,r3 48 sub lr,r2,r3 51 add r2,r3,r12 62 add r8,r9,r2 67 rsb r2,r9,r2,asr #1 70 rsb r7,r2,#0 91 smlal r9,r2,r11,r9 95 smlal r12,r2,r9,r1 115 smlal r1,r2,r12,r1 [all …]
|
D | pvmp3_dct_9_arm.s | 55 ldr r2, [r0, #0x20] 58 add r1,r2,r3 59 sub lr,r2,r3 62 add r2,r3,r12 73 add r8,r9,r2 78 rsb r2,r9,r2,asr #1 81 rsb r7,r2,#0 101 smlal r9,r2,r11,r9 106 smlal r12,r2,r9,r1 126 smlal r1,r2,r12,r1 [all …]
|
/frameworks/av/media/libstagefright/codecs/avc/enc/src/ |
D | block.cpp | 24 int r0, r1, r2, r3, j; in trans() local 35 r2 = cur[2] - predBlock[2]; in trans() 40 r1 += r2; //ptr[1] + ptr[2]; in trans() 41 r2 = r1 - (r2 << 1); //ptr[1] - ptr[2]; in trans() 45 ptr[1] = (r3 << 1) + r2; in trans() 46 ptr[3] = r3 - (r2 << 1); in trans() 61 r2 = ptr[16] - ptr[32]; in trans() 65 ptr[16] = (r3 << 1) + r2; in trans() 66 ptr[48] = r3 - (r2 << 1); in trans() 85 int r0, r1, r2, r3, j, k, idx; in dct_luma() local [all …]
|
D | motion_comp.cpp | 276 int32 r0, r1, r2, r3, r4, r5; in eHorzInterp1MC() local 296 r2 = p_ref[3]; in eHorzInterp1MC() 297 r1 |= (r2 << 16); /* 0,d,0,b */ in eHorzInterp1MC() 300 r2 = *(p_ref += 4); /* move pointer to e */ in eHorzInterp1MC() 302 r2 |= (r3 << 16); /* 0,g,0,e */ in eHorzInterp1MC() 309 r6 = r2 + r3; /* g+h, e+f */ in eHorzInterp1MC() 314 r5 = r1 + r2; /* d+g, b+e */ in eHorzInterp1MC() 328 r5 = r6 | (r2 >> 16);/* 0,i,0,g */ in eHorzInterp1MC() 332 r1 += r2; /* f+g, d+e */ in eHorzInterp1MC() 335 r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */ in eHorzInterp1MC() [all …]
|
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/ |
D | R4R8First_v5.s | 36 ldrd r2, [r11, #8] 40 add r8, r0, r2 43 sub r0, r0, r2 46 add r2, r4, r6 52 add r6, r8, r2 55 sub r8, r8, r2 58 add r2, r0, r5 65 strd r2, [r11, #8] 91 ldrd r2, [r14, #8] 95 add r8, r0, r2 @ r0 = buf[0] + buf[2]@ [all …]
|
D | AutoCorrelation_v5.s | 36 mov r2, r2, lsl #16 38 mov r4, r2, asr #16 44 mov r2, #0 49 ldr r6, [r5, r2] 50 add r2, r2, #4 52 ldr r1, [r5, r2] 59 add r2, r2, #4 63 ldr r1, [r5, r2] 71 add r2, r2, #4 78 ldrsh r6, [r5, r2] [all …]
|
D | Radix4FFT_v5.s | 37 mov r9, r2, lsl #3 @ step = 2*bgn@ 41 str r2, [sp, #8] 47 mov r11, r2 @ j = bgn 58 ldrd r10, [r14, #0] @ r2 = xptr[0]@ r3 = xptr[1]@ 64 smlawb r2, r11, r8, r4 @ r2 = L_mpy_wx(cosx, t0) + L_mpy_wx(sinx, t1)@ 73 sub r0, r10, r2 @ r0 = t0 - r2@ 76 add r2, r10, r2 @ r2 = t0 + r2@ 79 str r2, [sp, #24] 97 smulwt r2, r10, r8 @ L_mpy_wx(cosx, t0) 100 smlawb r6, r11, r8, r2 @ r4 = L_mpy_wx(cosx, t0) + L_mpy_wx(sinx, t1)@ [all …]
|
/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/ |
D | omxVCM4P10_InterpolateLuma_s.S | 53 ADD r12,r2,r3,LSL #1 54 VST1.32 {d9[0]},[r2],r3 56 VST1.32 {d10[0]},[r2] 67 ADD r12,r2,r3,LSL #1 68 VST1.32 {d22[0]},[r2],r3 70 VST1.32 {d24[0]},[r2] 77 ADD r12,r2,r3,LSL #1 78 VST1.32 {d22[0]},[r2],r3 80 VST1.32 {d24[0]},[r2] 91 ADD r12,r2,r3,LSL #1 [all …]
|
D | armVCM4P10_Average_4x_Align_unsafe_s.S | 18 LDR r12,[r2,#0] 20 LDR lr,[r2,r3] 27 STR r5,[r2],r3 29 STR r4,[r2],r3 31 LDR r12,[r2,#0] 33 LDR lr,[r2,r3] 39 STR r5,[r2],r3 41 STR r4,[r2],r3 52 LDR r12,[r2,#0] 53 LDR lr,[r2,r3] [all …]
|
D | armVCM4P10_InterpolateLuma_Copy_unsafe_s.S | 28 STR r4,[r2],r3 30 STR r5,[r2],r3 32 STR r8,[r2],r3 33 STR r9,[r2],r3 42 STR r4,[r2],r3 47 STR r8,[r2],r3 52 STR r4,[r2],r3 55 STR r8,[r2],r3 64 STR r4,[r2],r3 67 STR r8,[r2],r3 [all …]
|
D | armVCM4P10_UnpackBlock4x4_s.S | 17 LDR r2,[r0,#0] 21 LDRB r3,[r2],#1 28 LDRNESB r5,[r2,#1] 29 LDRNEB r4,[r2],#2 31 LDREQSB r4,[r2],#1 34 LDREQB r3,[r2],#1 37 STR r2,[r0,#0]
|
/frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/ |
D | idct.cpp | 131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local 151 r2 = blk[B_SIZE * 6 + i]; in idct_intra() 158 if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7)) in idct_intra() 195 r1 = W6 * (r3 + r2); in idct_intra() 196 r2 = (r1 - (W2 + W6) * r2); in idct_intra() 207 r3 = r0 + r2; in idct_intra() 208 r0 -= r2; in idct_intra() 209 r2 = (181 * (r4 + r5) + 128) >> 8; /* rounding */ in idct_intra() 219 tmpBLK32[(1<<3) + i] = (r3 + r2) >> 8; in idct_intra() 224 tmpBLK32[(6<<3) + i] = (r3 - r2) >> 8; in idct_intra() [all …]
|
/frameworks/native/opengl/libagl/ |
D | fixed_asm.S | 37 mov r2, #0x8E /* 127 + 15 */ 38 sub r1, r2, r1, lsr #24 /* compute shift */ 39 mov r2, r0, lsl #8 /* mantissa<<8 */ 40 orr r2, r2, #0x80000000 /* add the missing 1 */ 41 mov r0, r2, lsr r1 /* scale to 16.16 */ 52 mov r2, #0x8E /* 127 + 15 */ 53 subs r1, r2, r1, lsr #24 /* compute shift */ 55 mov r2, r0, lsl #8 /* mantissa<<8 */ 56 orr r2, r2, #0x80000000 /* add the missing 1 */ 58 movs r0, r2, lsr r1 /* scale to 16.16 */
|
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/ |
D | cor_h_vec_neon.s | 28 @r2 ---- track 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 44 @r0 --- h[], r1 --- vec[], r2 --- pos 50 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 52 RSB r11, r2, #62 @j=62-pos 72 ADD r9, r3, r2, LSL #1 @address of sign[pos] 91 ADD r2, r2, #4 95 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 97 RSB r11, r2, #62 @j=62-pos 119 ADD r9, r3, r2, LSL #1 @address of sign[pos] [all …]
|
D | pred_lt4_1_neon.s | 26 @ r2 --- frac 37 RSB r2, r2, #0 @ frac = - frac 39 CMP r2, #0 40 ADDLT r2, r2, #4 @ frac += UP_SAMP 46 RSB r2, r2, #3 @ k = UP_SAMP - 1 - frac 48 ADD r11, r11, r2, LSL #6 @ get inter4_2[k][]
|