/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV7/ |
D | Deemph_32_neon.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 58 MOV r10, r6, LSL #16 59 ADD r12, r10, r7, LSL #4 61 MOV r10, r12, LSL #3 64 MOV r12, r10, LSL #1 72 MOV r10, r6, LSL #16 73 ADD r12, r10, r7, LSL #4 [all …]
|
D | cor_h_vec_neon.s | 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 50 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 64 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 67 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2) 72 ADD r9, r3, r2, LSL #1 @address of sign[pos] 84 ADD r9, r9, r4, LSL #1 85 ADD r12, r12, r4, LSL #1 95 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 110 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 113 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2) [all …]
|
D | convolve_neon.s | 40 ADD r4, r1, r3, LSL #1 @ tmpH address 65 ADD r5, r11, r5, LSL #1 72 ADD r4, r1, r3, LSL #1 @tmpH address 101 ADD r8, r11, r8, LSL #1 108 ADD r4, r1, r3, LSL #1 140 ADD r8, r11, r8, LSL #1 146 ADD r4, r1, r5, LSL #1 @ tmpH address 164 ADD r5, r11, r5, LSL #1
|
D | Norm_Corr_neon.s | 59 ADD r5, r0, r11, LSL #1 @get the &exc[k] 169 ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1 170 ADD r6, r10, r6, LSL #1 @L_tmp1 = (L_tmp1 << 1) + 1 178 MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp) 184 MOV r6, r6, LSL r5 @L_tmp = (L_tmp1 << exp) 218 … MOVGT r12, r12, LSL r6 @L_tmp = L_shl(L_tmp, exp_corr + exp_norm + scale) 225 ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address 239 ADD r8, r8, r5, LSL #1 @ exc[k] address 240 ADD r9, r9, r6, LSL #1 @ h[i] address 241 ADD r10, r10, r6, LSL #1 @ excf[i] address
|
D | pred_lt4_1_neon.s | 37 SUB r4, r0, r1, LSL #1 @ x = exc - T0 49 ADD r11, r11, r2, LSL #6 @ get inter4_2[k][]
|
D | Syn_filt_32_neon.s | 97 MOV r14, r12, LSL #1 @exc[i] * a0 << 1 118 SUB r12, r11, r10, LSL #12
|
/frameworks/av/media/codecs/amrwb/enc/src/asm/ARMV5E/ |
D | Deemph_32_opt.s | 44 MOV r10, r6, LSL #16 @L_tmp = x_hi[0]<<16 47 ADD r12, r10, r7, LSL #4 @L_tmp += x_lo[0] << 4 48 MOV r10, r12, LSL #3 @L_tmp <<= 3 53 MOV r12, r10, LSL #1 @L_tmp = L_mac(L_tmp, *mem, fac) 58 MOV r10, r6, LSL #16 59 ADD r12, r10, r7, LSL #4 61 MOV r10, r12, LSL #3 64 MOV r12, r10, LSL #1 72 MOV r10, r6, LSL #16 73 ADD r12, r10, r7, LSL #4 [all …]
|
D | cor_h_vec_opt.s | 41 ADD r7, r4, r2, LSL #5 @r7 --- p0 = rrixix[track] 49 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 63 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 66 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2) 71 ADD r9, r3, r2, LSL #1 @address of sign[pos] 83 ADD r9, r9, r4, LSL #1 84 ADD r12, r12, r4, LSL #1 94 ADD r9, r1, r2, LSL #1 @p2 = &vec[pos] 109 MOV r6, r6, LSL #2 @L_sum2 = (L_sum2 << 2) 112 MOV r5, r5, LSL #2 @L_sum1 = (L_sum1 << 2) [all …]
|
D | Norm_Corr_opt.s | 59 ADD r5, r0, r11, LSL #1 @get the &exc[k] 92 ADD r9, r7, r6, LSL #1 @L_tmp = (L_tmp << 1) + 1 129 ADD r5, r10, r5, LSL #1 @L_tmp = (L_tmp << 1) + 1 130 ADD r6, r10, r6, LSL #1 @L_tmp1 = (L_tmp1 << 1) + 1 138 MOV r5, r5, LSL r10 @L_tmp = (L_tmp << exp) 144 MOV r6, r6, LSL r5 @L_tmp = (L_tmp1 << exp) 178 MOVGT r12, r12, LSL r6 @L_tmp = L_shl(L_tmp, exp_corr + exp_norm + scale) 185 ADD r10, r5, r4, LSL #1 @ get corr_norm[t] address 200 ADD r8, r8, r5, LSL #1 @ exc[k] address 201 ADD r9, r9, r6, LSL #1 @ h[i] address [all …]
|
D | syn_filt_opt.s | 96 ORR r10, r6, r7, LSL #16 @ -a[2] -- -a[1] 97 ORR r12, r9, r11, LSL #16 @ -a[4] -- -a[3] 107 ORR r10, r6, r7, LSL #16 @ -a[6] -- -a[5] 108 ORR r12, r9, r11, LSL #16 @ -a[8] -- -a[7] 118 ORR r10, r6, r7, LSL #16 @ -a[10] -- -a[9] 119 ORR r12, r9, r11, LSL #16 @ -a[12] -- -a[11] 129 ORR r10, r6, r7, LSL #16 @ -a[14] -- -a[13] 130 ORR r12, r9, r11, LSL #16 @ -a[16] -- -a[15] 137 ADD r10, r4, r8, LSL #1 @ temp_p = yy + i 215 MOV r7, r14, LSL #4 @ L_tmp <<=4
|
D | convolve_opt.s | 39 ADD r4, r1, r3, LSL #1 @ tmpH address 68 ADD r5, r11, r8, LSL #1 74 ADD r4, r1, r3, LSL #1 @tmpH address 105 ADD r8, r11, r8, LSL #1 110 ADD r4, r1, r3, LSL #1 143 ADD r8, r11, r8, LSL #1 149 ADD r4, r1, r3, LSL #1 @ tmpH address 171 ADD r5, r11, r8, LSL #1
|
D | Syn_filt_32_opt.s | 56 ORR r10, r6, r7, LSL #16 @ Aq[2] -- Aq[1] 57 ORR r11, r8, r9, LSL #16 @ Aq[4] -- Aq[3] 67 ORR r10, r6, r7, LSL #16 @ Aq[6] -- Aq[5] 68 ORR r11, r8, r9, LSL #16 @ Aq[8] -- Aq[7] 78 ORR r10, r6, r7, LSL #16 @ Aq[10] -- Aq[9] 79 ORR r11, r8, r9, LSL #16 @ Aq[12] -- Aq[11] 89 ORR r10, r6, r7, LSL #16 @ Aq[14] -- Aq[13] 90 ORR r11, r8, r9, LSL #16 @ Aq[16] -- Aq[15] 146 ADD r14, r14, r7, LSL #1 @ L_tmp += (exc[i] * a0) << 1 207 MOV r14, r14, LSL #3 @ L_tmp <<=3 [all …]
|
D | scale_sig_opt.s | 37 ADD r4, r0, r3, LSL #1 @x[i] address 45 MOV r12, r5, LSL r10 58 MOV r6, r5, LSL #16 @L_tmp = x[i] << 16
|
D | residu_asm_opt.s | 37 ORR r5, r6, r5, LSL #16 @r5 --- a0, a1 41 ORR r6, r7, r6, LSL #16 @r6 --- a2, a3 45 ORR r7, r8, r7, LSL #16 @r7 --- a4, a5 49 ORR r8, r9, r8, LSL #16 @r8 --- a6, a7 53 ORR r9, r10, r9, LSL #16 @r9 --- a8, a9 57 ORR r10, r11, r10, LSL #16 @r10 --- a10, a11 61 ORR r11, r12, r11, LSL #16 @r11 --- a12, a13 65 ORR r12, r4, r12, LSL #16 @r12 --- a14, a15 73 ORR r14, r4, r14, LSL #16 @r14 --- loopnum, a16
|
D | pred_lt4_1_opt.s | 42 ADD r5, r0, r4, LSL #1 @x = exc - T0 52 MOV r8, r4, LSL #6 244 @SSAT r10, #32, r10, LSL #2 245 @SSAT r11, #32, r11, LSL #2 246 @SSAT r12, #32, r12, LSL #2 248 MOV r10, r10, LSL #1 249 MOV r11, r11, LSL #1 250 MOV r12, r12, LSL #1 436 @SSAT r10, #32, r10, LSL #2 437 @SSAT r11, #32, r11, LSL #2 [all …]
|
D | Dot_p_opt.s | 62 MOV r12, r4, LSL #1 70 MOV r0, r12, LSL r10 @ L_sum = L_sum << sft
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_Resize.S | 210 mov r9, r3, LSL #VECSHIFT 238 sub r8, r12, r10, LSL #COMPONENT_SHIFT + 1 274 sub r4, r4, r10, LSL #COMPONENT_SHIFT 275 sub r5, r5, r10, LSL #COMPONENT_SHIFT 276 sub r6, r6, r10, LSL #COMPONENT_SHIFT 277 sub r7, r7, r10, LSL #COMPONENT_SHIFT 287 mov r2, r2, LSL #(15 - CHUNKSHIFT) 288 mov r3, r3, LSL #(15 - CHUNKSHIFT) 330 add r4, r4, lr, LSL #COMPONENT_SHIFT 331 add r5, r5, lr, LSL #COMPONENT_SHIFT [all …]
|
D | rsCpuIntrinsics_advsimd_Resize.S | 118 add x0, x0, x1, LSL #32 210 sub x14, x12, x13, LSL #(COMPONENT_SHIFT + 1) 243 sub x4, x4, x13, LSL #(COMPONENT_SHIFT) 244 sub x5, x5, x13, LSL #(COMPONENT_SHIFT) 245 sub x6, x6, x13, LSL #(COMPONENT_SHIFT) 246 sub x7, x7, x13, LSL #(COMPONENT_SHIFT) 295 add x4, x4, x11, LSL #(COMPONENT_SHIFT) 296 add x5, x5, x11, LSL #(COMPONENT_SHIFT) 297 add x6, x6, x11, LSL #(COMPONENT_SHIFT) 298 add x7, x7, x11, LSL #(COMPONENT_SHIFT) [all …]
|
D | rsCpuIntrinsics_neon_YuvToRGB.S | 233 add r0, r5, LSL #2 261 add r0, r4, LSL #2 287 add r0, r4, LSL #2
|
D | rsCpuIntrinsics_advsimd_YuvToRGB.S | 309 add x0, x0, x4, LSL #2 313 sub x2, x5, x6, LSL #1 336 add x0, x0, x5, LSL #2 362 add x0, x0, x5, LSL #2
|
/frameworks/rs/toolkit/ |
D | Resize_neon.S | 210 mov r9, r3, LSL #VECSHIFT 238 sub r8, r12, r10, LSL #COMPONENT_SHIFT + 1 274 sub r4, r4, r10, LSL #COMPONENT_SHIFT 275 sub r5, r5, r10, LSL #COMPONENT_SHIFT 276 sub r6, r6, r10, LSL #COMPONENT_SHIFT 277 sub r7, r7, r10, LSL #COMPONENT_SHIFT 287 mov r2, r2, LSL #(15 - CHUNKSHIFT) 288 mov r3, r3, LSL #(15 - CHUNKSHIFT) 330 add r4, r4, lr, LSL #COMPONENT_SHIFT 331 add r5, r5, lr, LSL #COMPONENT_SHIFT [all …]
|
D | Resize_advsimd.S | 118 add x0, x0, x1, LSL #32 210 sub x14, x12, x13, LSL #(COMPONENT_SHIFT + 1) 243 sub x4, x4, x13, LSL #(COMPONENT_SHIFT) 244 sub x5, x5, x13, LSL #(COMPONENT_SHIFT) 245 sub x6, x6, x13, LSL #(COMPONENT_SHIFT) 246 sub x7, x7, x13, LSL #(COMPONENT_SHIFT) 295 add x4, x4, x11, LSL #(COMPONENT_SHIFT) 296 add x5, x5, x11, LSL #(COMPONENT_SHIFT) 297 add x6, x6, x11, LSL #(COMPONENT_SHIFT) 298 add x7, x7, x11, LSL #(COMPONENT_SHIFT) [all …]
|
D | YuvToRgb_neon.S | 233 add r0, r5, LSL #2 261 add r0, r4, LSL #2 287 add r0, r4, LSL #2
|
/frameworks/av/media/codecs/amrnb/common/include/ |
D | norm_l.h | 118 EORNE L_var1, L_var1, L_var1, LSL #1
|
D | norm_s.h | 118 EORNE var1, var1, var1, LSL #1
|