Home
last modified time | relevance | path

Searched refs:r8 (Results 1 – 25 of 33) sorted by relevance

12

/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV5E/
Dconvolve_opt.s45 MUL r8, r9, r10
54 MLA r8, r9, r10, r8
55 MLA r8, r12, r14, r8
60 MLA r8, r9, r10, r8
62 MLA r8, r12, r14, r8
68 ADD r5, r11, r8, LSL #1
82 MUL r8, r9, r10
84 MLA r8, r12, r14, r8
93 MLA r8, r9, r10, r8
94 MLA r8, r12, r14, r8
[all …]
DFilt_6k_7k_opt.s38 MOV r8, r0 @ copy signal[] address
52 …MOV r3, r8 @ change myMemCopy to Copy, due to Copy will cha…
56 LDRSH r8, [r7], #2
58 MOV r8, r8, ASR #2
64 STRH r8, [r6], #2
68 LDRSH r8, [r7], #2
70 MOV r8, r8, ASR #2
76 STRH r8, [r6], #2
99 LDRSH r8, [r4, #4] @ load x[i + 2]
103 ADD r8, r8, r9 @ x[i + 2] + x[i + 28]
[all …]
Dresidu_asm_opt.s44 LDRH r8, [r0], #2
45 ORR r7, r8, r7, LSL #16 @r7 --- a4, a5
47 LDRH r8, [r0], #2
49 ORR r8, r9, r8, LSL #16 @r8 --- a6, a7
68 STMFD r13!, {r8 - r12} @store r8-r12
109 SMLATT r12,r8, r2, r12 @ i4 (6)
112 SMLATB r11,r8, r2, r11 @ i3 (6)
113 SMLABB r12,r8, r2, r12 @ i4 (7)
117 SMLATT r4, r8, r2, r4 @ i2 (6)
118 SMLABT r11,r8, r2, r11 @ i3 (7)
[all …]
DNorm_Corr_opt.s56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
101 @r7 --- scale r4 --- t_min r8 --- excf[]
109 MOV r8, #0x8000
180 ADD r12, r12, r8
196 MOV r8, r0 @ exc[]
200 ADD r8, r8, r5, LSL #1 @ exc[k] address
203 LDRSH r11, [r8] @ tmp = exc[k]
206 LDRSH r8, [r9], #-2 @ load h[i]
[all …]
DSyn_filt_32_opt.s52 LDRSH r8, [r0, #6] @ load Aq[3]
55 AND r8, r8, r14
57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3]
63 LDRSH r8, [r0, #14] @ load Aq[7]
66 AND r8, r8, r14
68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7]
74 LDRSH r8, [r0, #22] @ load Aq[11]
77 AND r8, r8, r14
79 ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11]
85 LDRSH r8, [r0, #30] @ load Aq[15]
[all …]
DDot_p_opt.s41 LDR r8, [r0], #4
47 SMLABB r4, r8, r9, r4
50 SMLATT r4, r8, r9, r4
51 LDR r8, [r0], #4
57 SMLABB r4, r8, r9, r4
59 SMLATT r4, r8, r9, r4
Dcor_h_vec_opt.s55 LDRSH r8, [r9], #2
58 MLA r5, r12, r8, r5
72 ADD r8, r7, #32
82 LDRSH r11, [r8] @*p3++
101 LDRSH r8, [r9], #2
104 MLA r5, r12, r8, r5
119 ADD r8, r7, #32
129 LDRSH r11, [r8] @*p3++
DDeemph_32_opt.s45 MOV r8, r5, ASR #1 @fac = mu >> 1
49 MUL r9, r5, r8
62 MUL r9, r14, r8
74 MUL r9, r14, r8
86 MUL r9, r14, r8
Dsyn_filt_opt.s49 LDRH r8, [r4], #2
58 STRH r8, [r5], #2
67 LDRH r8, [r4], #2
76 STRH r8, [r5], #2
84 MOV r8, #0 @ i = 0
137 ADD r10, r4, r8, LSL #1 @ temp_p = yy + i
216 ADD r8, r8, #1
219 CMP r8, #80
Dscale_sig_opt.s38 MOV r8, #0x7fffffff
47 EORNE r12, r8, r5, ASR #31
Dpred_lt4_1_opt.s48 ADR r8, Table
50 LDR r6, [r8]
51 ADD r6, r8
52 MOV r8, r4, LSL #6
54 ADD r8, r6, r8 @ptr2 = &(inter4_2[k][0])
60 @r0 --- exc[] r1 --- x r7 --- j r8 --- ptr2 r5 --- 0x8000
64 MOV r2, r8 @ptr = ptr2
271 MOV r2, r8 @ptr = ptr2
/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV7/
Dresidu_asm_neon.s43 MOV r8, r9
44 VLD1.S16 D5, [r8]! @get x[i], x[i+1], x[i+2], x[i+3]
47 SUB r8, r9, #2 @get the x[i-1] address
48 VLD1.S16 D5, [r8]!
51 SUB r8, r9, #4 @load the x[i-2] address
52 VLD1.S16 D5, [r8]!
55 SUB r8, r9, #6 @load the x[i-3] address
56 VLD1.S16 D5, [r8]!
59 SUB r8, r9, #8 @load the x[i-4] address
60 VLD1.S16 D5, [r8]!
[all …]
Dconvolve_neon.s39 @MOV r8, #0 @ s = 0
47 MUL r8, r9, r10
64 ADD r5, r5, r8
71 @MOV r8, #0
80 MUL r8, r9, r10
82 MLA r8, r12, r14, r8
100 ADD r8, r8, r5
101 ADD r8, r11, r8, LSL #1
102 MOV r8, r8, LSR #16 @extract_h(s)
104 STRH r8, [r2], #2 @y[n]
[all …]
DNorm_Corr_neon.s56 ADD r8, r13, #20 @get the excf[L_SUBFR]
65 MOV r2, r8 @r2 --- excf[]
69 @ r8 --- excf[]
109 @r7 --- scale r4 --- t_min r8 --- excf[]
114 MOV r8, #0x8000
220 ADD r12, r12, r8
235 MOV r8, r0 @ exc[]
239 ADD r8, r8, r5, LSL #1 @ exc[k] address
242 LDRSH r11, [r8] @ tmp = exc[k]
245 LDRSH r8, [r9], #-2 @ load h[i]
[all …]
Dpred_lt4_1_neon.s44 ADR r8, Lable1
45 LDR r11, [r8]
46 ADD r11, r8
48 MOV r8, #0 @ j = 0
83 ADD r8, r8, #1
91 CMP r8, r3
Dcor_h_vec_neon.s56 LDRSH r8, [r9], #2
59 MLA r5, r12, r8, r5
73 ADD r8, r7, #32
83 LDRSH r11, [r8] @*p3++
102 LDRSH r8, [r9], #2
105 MLA r5, r12, r8, r5
120 ADD r8, r7, #32
130 LDRSH r11, [r8] @*p3++
Dsyn_filt_neon.s49 MOV r8, #0 @ i = 0
58 MOV r8, #0 @ loop times
68 ADD r10, r4, r8, LSL #1 @ y[i], yy[i] address
79 ADD r8, r8, #1
88 CMP r8, #80
DDeemph_32_neon.s45 MOV r8, r5, ASR #1 @fac = mu >> 1
49 MUL r9, r5, r8
62 MUL r9, r14, r8
74 MUL r9, r14, r8
86 MUL r9, r14, r8
DSyn_filt_32_neon.s54 MOV r8, #0 @ i = 0
62 VDUP.S32 Q15, r8
117 ADD r8, r8, #1
124 CMP r8, #64
/frameworks/av/media/codecs/m4v_h263/dec/src/
Didct.cpp131 int32 r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */ in idct_intra() local
182 r8 = W7 * (r4 + r5); in idct_intra()
183 r4 = (r8 + (W1 - W7) * r4); in idct_intra()
186 r5 = (r8 - (W1 + W7) * r5); in idct_intra()
187 r8 = W3 * (r6 + r7); in idct_intra()
188 r6 = (r8 - (W3 - W5) * r6); in idct_intra()
189 r7 = (r8 - (W3 + W5) * r7); in idct_intra()
192 r8 = r0 + r1; in idct_intra()
205 r7 = r8 + r3; in idct_intra()
206 r8 -= r3; in idct_intra()
[all …]
/frameworks/av/media/codecs/mp3dec/src/asm/
Dpvmp3_polyphase_filter_window_gcc.s81 ldr r8,[r3,#0x700]
84 smull r6,r2,r5,r8
87 smlal r8,r9,r5,r8
99 ldr r8,[r3,#0x600]
102 smull r2,r5,r6,r8
105 smlal r8,r9,r6,r8
117 ldr r8,[r3,#0x500]
120 smull r2,r5,r6,r8
124 smlal r8,r9,r6,r8
127 ldr r8,[r1,#0x30]
[all …]
Dpvmp3_dct_16_gcc.s93 smull r8,lr,r6,lr
98 ldr r8,constant8
101 smull r9,r6,r8,r6
104 add r8,r1,r12
132 sub r1,r7,r8
135 add r1,r7,r8
136 add r8,r9,r1
138 mov r8,r8,asr #1
140 str r8,[r0]
141 smull r7,r8,r1,r7
[all …]
Dpvmp3_mdct_18_gcc.s63 ldr r8,[r3],#4 @@ tmp1 == r8
69 smull r10,lr,r8,lr
70 ldr r8,[r12],#-4
73 smull r9,r10,r8,r9
74 mov r8,r9,lsr #27
75 add r8,r8,r10,lsl #5
80 add r9,lr,r8
81 sub r8,lr,r8
84 smull r8,r9,lr,r8
85 mov lr,r8,lsr #28
[all …]
Dpvmp3_dct_9_gcc.s62 add r8,r9,r2
64 add r10,r7,r8
65 rsb r7,r8,r7,asr #1
74 mov r8,r7
78 smlal r1,r8,r11,r9
103 smlal r12,r8,r11,r1
112 smlal r12,r8,r6,r1
137 str r8,[r0, #0x10]
144 smlal r8,r6,r7,r12
/frameworks/rs/cpu_ref/
DrsCpuIntrinsics_neon_Resize.S178 push {r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
190 ldr r8, [lr,#136] // yr
192 vld1.s32 {q4}, [r8]
209 sub r8, r2, r3
211 add r8, r8, r9
215 str r8, [sp,#OSC_STORE]
238 sub r8, r12, r10, LSL #COMPONENT_SHIFT + 1
240 add r8, r8, #4 * COMPONENT_COUNT * 2
245 vld1.s16 {d24}, [r8]
251 vld1.s16 {q12}, [r8]
[all …]

12