Home
last modified time | relevance | path

Searched refs:r8 (Results 1 – 25 of 57) sorted by relevance

123

/frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
DarmVCM4P10_InterpolateLuma_Copy_unsafe_s.S43 LDR r8,[r0],r1
46 STR r8,[r2],r3
53 LDR r8,[r0],r1
57 LSR r8,r8,#8
58 ORR r8,r8,r9,LSL #24
61 STR r8,[r2],r3
63 LDR r8,[r0],r1
67 LSR r8,r8,#8
68 ORR r8,r8,r9,LSL #24
69 STR r8,[r2],r3
[all …]
DarmVCM4P10_DecodeCoeffsToPair_s.S54 LDRB r8,[r10,#1]
58 ORR r9,r9,r8,LSL #8
60 LSLS r8,r11,r12
62 AND r7,r7,r8,LSR #27
66 LDRB r8,[r10],#1
71 ORRCS r11,r8,r11,LSL #8
72 LSRS r8,r7,#1
74 LSLS r8,r11,r12
76 ADD r7,r7,r8,LSR #29
81 BIC r7,r8,#0xf000
[all …]
DomxVCM4P10_PredictIntra_16x16_s.S58 ADD r8,r3,r5
61 VST1.8 {d0,d1},[r8],r10
63 VST1.8 {d0,d1},[r8],r10
65 VST1.8 {d0,d1},[r8],r10
67 VST1.8 {d0,d1},[r8],r10
69 VST1.8 {d0,d1},[r8],r10
71 VST1.8 {d0,d1},[r8],r10
73 VST1.8 {d0,d1},[r8],r10
75 VST1.8 {d0,d1},[r8]
80 ADD r8,r0,r4
[all …]
DarmVCM4P10_InterpolateLuma_Align_unsafe_s.S30 MOV r12,r8
43 STM r8!,{r7,r10,r11}
55 STM r8!,{r7,r10,r11}
67 STM r8!,{r7,r10,r11}
79 STM r8!,{r7,r10,r11}
99 STR r7,[r8],#4
108 STR r7,[r8],#4
117 STR r7,[r8],#4
126 STR r7,[r8],#4
129 SUB r0,r8,#0x1c
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/
Dconvolve_opt.s45 MUL r8, r9, r10
54 MLA r8, r9, r10, r8
55 MLA r8, r12, r14, r8
60 MLA r8, r9, r10, r8
62 MLA r8, r12, r14, r8
68 ADD r5, r11, r8, LSL #1
82 MUL r8, r9, r10
84 MLA r8, r12, r14, r8
93 MLA r8, r9, r10, r8
94 MLA r8, r12, r14, r8
[all …]
DFilt_6k_7k_opt.s38 MOV r8, r0 @ copy signal[] address
52 …MOV r3, r8 @ change myMemCopy to Copy, due to Copy will cha…
56 LDRSH r8, [r7], #2
58 MOV r8, r8, ASR #2
64 STRH r8, [r6], #2
68 LDRSH r8, [r7], #2
70 MOV r8, r8, ASR #2
76 STRH r8, [r6], #2
99 LDRSH r8, [r4, #4] @ load x[i + 2]
103 ADD r8, r8, r9 @ x[i + 2] + x[i + 28]
[all …]
Dresidu_asm_opt.s44 LDRH r8, [r0], #2
45 ORR r7, r8, r7, LSL #16 @r7 --- a4, a5
47 LDRH r8, [r0], #2
49 ORR r8, r9, r8, LSL #16 @r8 --- a6, a7
68 STMFD r13!, {r8 - r12} @store r8-r12
109 SMLATT r12,r8, r2, r12 @ i4 (6)
112 SMLATB r11,r8, r2, r11 @ i3 (6)
113 SMLABB r12,r8, r2, r12 @ i4 (7)
117 SMLATT r4, r8, r2, r4 @ i2 (6)
118 SMLABT r11,r8, r2, r11 @ i3 (7)
[all …]
DNorm_Corr_opt.s56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
101 @r7 --- scale r4 --- t_min r8 --- excf[]
109 MOV r8, #0x8000
180 ADD r12, r12, r8
196 MOV r8, r0 @ exc[]
200 ADD r8, r8, r5, LSL #1 @ exc[k] address
203 LDRSH r11, [r8] @ tmp = exc[k]
206 LDRSH r8, [r9], #-2 @ load h[i]
[all …]
DSyn_filt_32_opt.s52 LDRSH r8, [r0, #6] @ load Aq[3]
55 AND r8, r8, r14
57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3]
63 LDRSH r8, [r0, #14] @ load Aq[7]
66 AND r8, r8, r14
68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7]
74 LDRSH r8, [r0, #22] @ load Aq[11]
77 AND r8, r8, r14
79 ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11]
85 LDRSH r8, [r0, #30] @ load Aq[15]
[all …]
DDot_p_opt.s41 LDR r8, [r0], #4
47 SMLABB r4, r8, r9, r4
50 SMLATT r4, r8, r9, r4
51 LDR r8, [r0], #4
57 SMLABB r4, r8, r9, r4
59 SMLATT r4, r8, r9, r4
/frameworks/av/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/
Dresidu_asm_neon.s43 MOV r8, r9
44 VLD1.S16 D5, [r8]! @get x[i], x[i+1], x[i+2], x[i+3]
47 SUB r8, r9, #2 @get the x[i-1] address
48 VLD1.S16 D5, [r8]!
51 SUB r8, r9, #4 @load the x[i-2] address
52 VLD1.S16 D5, [r8]!
55 SUB r8, r9, #6 @load the x[i-3] address
56 VLD1.S16 D5, [r8]!
59 SUB r8, r9, #8 @load the x[i-4] address
60 VLD1.S16 D5, [r8]!
[all …]
Dconvolve_neon.s39 @MOV r8, #0 @ s = 0
47 MUL r8, r9, r10
64 ADD r5, r5, r8
71 @MOV r8, #0
80 MUL r8, r9, r10
82 MLA r8, r12, r14, r8
100 ADD r8, r8, r5
101 ADD r8, r11, r8, LSL #1
102 MOV r8, r8, LSR #16 @extract_h(s)
104 STRH r8, [r2], #2 @y[n]
[all …]
DNorm_Corr_neon.s56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
109 @r7 --- scale r4 --- t_min r8 --- excf[]
114 MOV r8, #0x8000
220 ADD r12, r12, r8
235 MOV r8, r0 @ exc[]
239 ADD r8, r8, r5, LSL #1 @ exc[k] address
242 LDRSH r11, [r8] @ tmp = exc[k]
245 LDRSH r8, [r9], #-2 @ load h[i]
[all …]
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV5E/
Dband_nrg_v5.s54 smull r6, r8, r6, r6
60 qadd r14, r14, r8
61 smull r6, r8, r6, r6
65 qadd r14, r14, r8
113 ldr r8, [r0, +r10, lsl #2]
115 mov r8, r8, asr #1
120 add r5, r8, r9
122 sub r8, r8, r9
126 smull r8, r4, r8, r8
132 sub r8, r12, r14
[all …]
DPrePostMDCT_v5.s37 ldr r8, [r2], #4
43 smull r14, r11, r4, r8 @ MULHIGH(tr1, cosa)
44 smull r10, r12, r7, r8 @ MULHIGH(ti1, cosa)
46 smull r14, r8, r7, r9 @ MULHIGH(ti1, sina)
49 add r11, r11, r8 @ MULHIGH(cosa, tr1) + MULHIGH(sina, ti1)@
52 ldr r8, [r2], #4
55 smull r14, r4, r6, r8 @ MULHIGH(tr2, cosa)
56 smull r10, r12, r5, r8 @ MULHIGH(ti2, cosa)
58 smull r14, r8, r5, r9 @ MULHIGH(ti2, sina)
61 add r8, r8, r4
[all …]
DCalcWindowEnergy_v5.s42 mov r8, #0 @ w=0
50 str r8, [r13, #4]
61 smull r0, r8, r12, r11 @ accu2 = fixmul( Coeff0, states1 );
64 mov r8, r8, lsl #1
67 sub r8, r0, r8 @ out = accu3 - accu2;
70 mov r11, r8 @ states1 = out;
73 mov r8, r8, asr #16
78 mul r9, r8, r8
87 ldr r8, [r13, #4]
90 add r4, r0, r8, lsl #2
[all …]
DAutoCorrelation_v5.s39 mov r8, #0
72 add r8, r8, #6
75 cmp r8, r12
84 add r8, r8, #1
85 cmp r8, r4
94 mov r8, #1
107 addlt r6, r5, r8, lsl #1
110 add r6, r5, r8, lsl #1
112 str r8, [r13, #8]
118 add r8, r1, r6
[all …]
DR4R8First_v5.s40 add r8, r0, r2
52 add r6, r8, r2
55 sub r8, r8, r2
66 strd r8, [r11, #16]
95 add r8, r0, r2 @ r0 = buf[0] + buf[2]@
107 add r6, r8, r2 @ r4 = (r0 + r2) >> 1@
110 sub r8, r8, r2 @ r5 = (r0 - r2) >> 1@
122 mov r8, r8, asr #1
134 str r8, [sp, #8]
146 ldrd r8, [r14, #56]
[all …]
DRadix4FFT_v5.s59 ldr r8, [r12], #4 @ cosxsinx = csptr[0]@
61 smulwt r4, r10, r8 @ L_mpy_wx(cosx, t0)
62 smulwt r3, r11, r8 @ L_mpy_wx(cosx, t1)
64 smlawb r2, r11, r8, r4 @ r2 = L_mpy_wx(cosx, t0) + L_mpy_wx(sinx, t1)@
65 smulwb r5, r10, r8 @ L_mpy_wx(sinx, t0)
83 ldr r8, [r12], #4 @ cosxsinx = csptr[1]@
85 smulwt r6, r10, r8 @ L_mpy_wx(cosx, t0)
86 smulwt r5, r11, r8 @ L_mpy_wx(cosx, t1)
88 smlawb r4, r11, r8, r6 @ r4 = L_mpy_wx(cosx, t0) + L_mpy_wx(sinx, t1)@
89 smulwb r7, r10, r8 @ L_mpy_wx(sinx, t0)
[all …]
/frameworks/av/media/libstagefright/codecs/m4v_h263/dec/src/
Didct.cpp131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local
182 r8 = W7 * (r4 + r5); in idct_intra()
183 r4 = (r8 + (W1 - W7) * r4); in idct_intra()
186 r5 = (r8 - (W1 + W7) * r5); in idct_intra()
187 r8 = W3 * (r6 + r7); in idct_intra()
188 r6 = (r8 - (W3 - W5) * r6); in idct_intra()
189 r7 = (r8 - (W3 + W5) * r7); in idct_intra()
192 r8 = r0 + r1; in idct_intra()
205 r7 = r8 + r3; in idct_intra()
206 r8 -= r3; in idct_intra()
[all …]
/frameworks/av/media/libstagefright/codecs/aacenc/src/asm/ARMV7/
DRadix4FFT_v7.s40 mov r8, r0
58 VLD2.I32 {D0, D1, D2, D3}, [r8]
61 add r8, r8, r5 @ xptr += step@
62 VLD2.I32 {D4, D5, D6,D7}, [r8] @ r2 = xptr[0]@ r3 = xptr[1]@
72 add r8, r8, r5 @ xptr += step@
81 VLD2.I32 {D8, D9, D10, D11}, [r8]
83 add r8, r8, r5
93 VLD2.I32 {D12, D13, D14, D15}, [r8]
111 VST2.I32 {D16, D17, D18, D19}, [r8]
114 sub r8, r8, r5 @ xptr -= step@
[all …]
/frameworks/av/media/libstagefright/codecs/mp3dec/src/asm/
Dpvmp3_polyphase_filter_window_gcc.s81 ldr r8,[r3,#0x700]
84 smull r6,r2,r5,r8
87 smlal r8,r9,r5,r8
99 ldr r8,[r3,#0x600]
102 smull r2,r5,r6,r8
105 smlal r8,r9,r6,r8
117 ldr r8,[r3,#0x500]
120 smull r2,r5,r6,r8
124 smlal r8,r9,r6,r8
127 ldr r8,[r1,#0x30]
[all …]
Dpvmp3_dct_16_gcc.s93 smull r8,lr,r6,lr
98 ldr r8,constant8
101 smull r9,r6,r8,r6
104 add r8,r1,r12
132 sub r1,r7,r8
135 add r1,r7,r8
136 add r8,r9,r1
138 mov r8,r8,asr #1
140 str r8,[r0]
141 smull r7,r8,r1,r7
[all …]
/frameworks/native/opengl/libagl/
Diterators.S51 stmfd sp!, {r4, r5, r6, r7, r8, lr}
63 smull r8, lr, r4, r5
67 smlal r8, lr, r3, r5
74 str r8, [r1, #8]
77 umull r8, r0, r4, r8
80 adds r6, r6, r8
88 ldmfd sp!, {r4, r5, r6, r7, r8, pc}
/frameworks/rs/cpu_ref/
DrsCpuIntrinsics_neon_Resize.S178 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
190 ldr r8, [lr,#136] // yr
192 vld1.s32 {q4}, [r8]
209 sub r8, r2, r3
211 add r8, r8, r9
215 str r8, [sp,#OSC_STORE]
238 sub r8, r12, r10, LSL #COMPONENT_SHIFT + 1
240 add r8, r8, #4 * COMPONENT_COUNT * 2
245 vld1.s16 {d24}, [r8]
251 vld1.s16 {q12}, [r8]
[all …]

123