/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/ |
D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q3 [all …]
|
/external/llvm/test/MC/ARM/ |
D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q3 [all …]
|
/external/capstone/suite/MC/ARM/ |
D | neon-bitwise-encoding.s.cs | 23 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 24 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 25 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 26 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 27 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 28 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 29 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 30 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 31 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 32 0x56,0x81,0x0e,0xf3 = veor q4, q7, q3 [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/boringssl/ios-arm/crypto/fipsmodule/ |
D | aesv8-armx32.S | 55 vld1.8 {q3},[r0]! 65 vtbl.8 d20,{q3},d4 66 vtbl.8 d21,{q3},d5 67 vext.8 q9,q0,q3,#12 68 vst1.32 {q3},[r2]! 72 veor q3,q3,q9 74 veor q3,q3,q9 77 veor q3,q3,q9 79 veor q3,q3,q10 84 vtbl.8 d20,{q3},d4 [all …]
|
D | ghashv8-armx32.S | 30 vext.8 q3,q9,q9,#8 34 vshr.u64 q10,q3,#63 37 vshl.i64 q3,q3,#1 40 vorr q3,q3,q10 @ H<<<=1 41 veor q12,q3,q8 @ twisted H 87 vext.8 q3,q9,q9,#8 89 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo 90 veor q9,q9,q3 @ Karatsuba pre-processing 91 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi 152 vext.8 q3,q8,q8,#8 @ rotate I[0] [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 270 FT_Pos q3, in cubic_peak() argument 288 FT_ABS( q3 ) | in cubic_peak() 299 q3 <<= shift; in cubic_peak() 306 q3 >>= -shift; in cubic_peak() 312 while ( q2 > 0 || q3 > 0 ) in cubic_peak() 315 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 317 q4 = q4 + q3; in cubic_peak() 318 q3 = q3 + q2; in cubic_peak() 320 q4 = q4 + q3; in cubic_peak() 321 q3 = q3 + q2; in cubic_peak() [all …]
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/geometry/ |
D | Rotation.java | 113 private final double q3; field in Rotation 133 public Rotation(double q0, double q1, double q2, double q3, in Rotation() argument 138 double inv = 1.0 / FastMath.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3); in Rotation() 142 q3 *= inv; in Rotation() 148 this.q3 = q3; in Rotation() 186 q3 = coeff * axis.getZ(); in Rotation() 262 q3 = inv * (ort[0][1] - ort[1][0]); in Rotation() 271 q3 = inv * (ort[0][2] + ort[2][0]); in Rotation() 280 q3 = inv * (ort[2][1] + ort[1][2]); in Rotation() 284 q3 = 0.5 * FastMath.sqrt(s + 1.0); in Rotation() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | loopfilter.c | 36 uint8_t q1, uint8_t q2, uint8_t q3) { in filter_mask() argument 43 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 50 uint8_t q2, uint8_t q3) { in flat_mask4() argument 57 mask |= (abs(q3 - q0) > thresh) * -1; in flat_mask4() 63 uint8_t q1, uint8_t q2, uint8_t q3, in flat_mask5() argument 65 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5() 122 q3 = s[3 * pitch]; in vpx_lpf_horizontal_4_c() local 124 filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_c() 146 const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; in vpx_lpf_vertical_4_c() local 148 filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_c() [all …]
|
/external/boringssl/linux-arm/crypto/fipsmodule/ |
D | ghashv8-armx32.S | 29 vext.8 q3,q9,q9,#8 33 vshr.u64 q10,q3,#63 36 vshl.i64 q3,q3,#1 39 vorr q3,q3,q10 @ H<<<=1 40 veor q12,q3,q8 @ twisted H 84 vext.8 q3,q9,q9,#8 86 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo 87 veor q9,q9,q3 @ Karatsuba pre-processing 88 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi 147 vext.8 q3,q8,q8,#8 @ rotate I[0] [all …]
|
/external/libxaac/decoder/armv7/ |
D | ixheaacd_sbr_imdct_using_fft.s | 186 VADD.I32 q2, q3, q7 190 VSUB.I32 q6, q3, q7 193 VADD.S32 q3, q9, q6 218 VPUSH {q3} 234 VSUB.I32 q3, q14, q12 264 VADD.S32 q14, q3, q12 265 VSUB.S32 q10, q3, q12 266 VADD.S32 q3, q13, q1 271 VSUB.S32 q12, q3, q10 274 VADD.S32 q14, q3, q10 [all …]
|
D | ixheaacd_imdct_using_fft.s | 182 VADD.I32 q2, q3, q7 186 VSUB.I32 q6, q3, q7 189 VADD.S32 q3, q9, q6 214 VPUSH {q3} 230 VSUB.I32 q3, q14, q12 260 VADD.S32 q14, q3, q12 261 VSUB.S32 q10, q3, q12 262 VADD.S32 q3, q13, q1 267 VSUB.S32 q12, q3, q10 270 VADD.S32 q14, q3, q10 [all …]
|
/external/libavc/common/arm/ |
D | ih264_padding_neon.s | 185 vdup.u8 q3, r11 188 vst1.8 {q3}, [r4], r1 @ 16 bytes store 197 vdup.u8 q3, r11 200 vst1.8 {q3}, [r4], r1 @ 16 bytes store 217 vdup.u8 q3, r11 220 vst1.8 {q3}, [r4]! @ 16 bytes store 223 vst1.8 {q3}, [r4], r6 @ 16 bytes store 233 vdup.u8 q3, r11 236 vst1.8 {q3}, [r4]! @ 16 bytes store 237 vst1.8 {q3}, [r4], r6 @ 16 bytes store [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 153 vaddl.u8 q3, d0, d5 159 vmla.u16 q3, q4, q11 163 vmls.u16 q3, q4, q12 170 vst1.32 {q3}, [r9], r6 @ store temp buffer 0 249 vaddl.s16 q3, d7, d17 255 vmlal.s16 q3, d31, d22 256 vmlsl.s16 q3, d29, d24 262 vqrshrun.s32 d19, q3, #10 279 vaddl.s16 q3, d9, d21 291 vmlal.s16 q3, d31, d22 [all …]
|
/external/libaom/libaom/aom_dsp/ |
D | loopfilter.c | 45 uint8_t q1, uint8_t q2, uint8_t q3) { in filter_mask() argument 52 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 82 uint8_t q2, uint8_t q3) { in flat_mask4() argument 89 mask |= (abs(q3 - q0) > thresh) * -1; in flat_mask4() 206 const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3; in filter8() local 212 *oq0 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3, 3); in filter8() 213 *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3); in filter8() 214 *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3); in filter8() 257 const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; in aom_lpf_horizontal_8_c() local 260 filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_8_c() [all …]
|
/external/libjpeg-turbo/simd/arm/ |
D | jsimd_neon.S | 111 JLONG q1, q2, q3, q4, q5, q6, q7; \ 133 q3 = ((JLONG)row0 - (JLONG)row4) << 13; \ 137 q1 = q3 + q2; \ 155 q3 = q3 - q2; \ 171 tmp12 = q3; \ 255 vmul.s16 q11, q11, q3 263 vmul.s16 q15, q15, q3 274 vsubl.s16 q3, ROW0L, ROW4L 283 vshl.s32 q3, q3, #13 287 vadd.s32 q1, q3, q2 [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | loopfilter_4_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; in vpx_lpf_horizontal_4_msa() local 23 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_msa() 29 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_4_msa() 48 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_4_dual_msa() local 51 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_dual_msa() 65 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, in vpx_lpf_horizontal_4_dual_msa() 77 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_vertical_4_msa() local 80 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_msa() 86 TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, in vpx_lpf_vertical_4_msa() 87 q3); in vpx_lpf_vertical_4_msa() [all …]
|
D | loopfilter_8_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_msa() local 27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa() 33 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_msa() 35 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_msa() 48 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in vpx_lpf_horizontal_8_msa() 86 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_dual_msa() local 96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa() 111 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_dual_msa() 113 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_dual_msa() 120 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in vpx_lpf_horizontal_8_dual_msa() [all …]
|
D | loopfilter_masks_dspr2.h | 30 uint32_t q2, uint32_t q3, in filter_hev_mask_dspr2() argument 92 [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3), in filter_hev_mask_dspr2() 131 uint32_t q3, uint32_t *hev, uint32_t *mask, uint32_t *flat) { in filter_hev_mask_flatmask4_dspr2() argument 232 [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3), in filter_hev_mask_flatmask4_dspr2() 271 uint32_t q3, uint32_t q4, uint32_t *flat2) { in flatmask5() argument 345 [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3), [q4] "r"(q4), in flatmask5()
|
/external/libaom/libaom/aom_dsp/mips/ |
D | loopfilter_4_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; in aom_lpf_horizontal_4_msa() local 23 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_4_msa() 29 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in aom_lpf_horizontal_4_msa() 48 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_horizontal_4_dual_msa() local 51 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_4_dual_msa() 65 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, in aom_lpf_horizontal_4_dual_msa() 77 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_vertical_4_msa() local 80 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_vertical_4_msa() 86 TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, in aom_lpf_vertical_4_msa() 87 q3); in aom_lpf_vertical_4_msa() [all …]
|
D | loopfilter_8_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_horizontal_8_msa() local 27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_8_msa() 33 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in aom_lpf_horizontal_8_msa() 35 AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat); in aom_lpf_horizontal_8_msa() 48 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in aom_lpf_horizontal_8_msa() 86 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_horizontal_8_dual_msa() local 96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_8_dual_msa() 111 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in aom_lpf_horizontal_8_dual_msa() 113 AOM_FLAT4(p3, p2, p0, q0, q2, q3, flat); in aom_lpf_horizontal_8_dual_msa() 120 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in aom_lpf_horizontal_8_dual_msa() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_loopfilter_neon.c | 47 const uint16x8_t q3, uint16x8_t *hev, uint16x8_t *mask) { in filter_hev_mask4() argument 56 *mask = vmaxq_u16(*mask, vabdq_u16(q3, q2)); in filter_hev_mask4() 73 const uint16x8_t q2, const uint16x8_t q3, uint16x8_t *flat, in filter_flat_hev_mask() argument 77 q0, q1, q2, q3, hev, &mask); in filter_flat_hev_mask() 81 *flat = vmaxq_u16(*flat, vabdq_u16(q3, q0)); in filter_flat_hev_mask() 93 const uint16x8_t q3, const uint16x8_t q4, in flat_mask5() argument 102 flat2 = vmaxq_u16(flat2, vabdq_u16(q3, q0)); in flat_mask5() 151 const uint16x8_t q2, const uint16x8_t q3, in calc_7_tap_filter() argument 166 *oq0 = calc_7_tap_filter_kernel(p3, p0, q0, q3, &sum); in calc_7_tap_filter() 167 *oq1 = calc_7_tap_filter_kernel(p2, q0, q1, q3, &sum); in calc_7_tap_filter() [all …]
|
/external/boringssl/src/crypto/curve25519/asm/ |
D | x25519-asm-arm.S | 92 vshr.u64 q3,q2,#7 122 vand q5,q5,q3 123 vand q7,q7,q3 124 vand q9,q9,q3 125 vand q11,q11,q3 126 vand q3,q13,q3 157 vadd.i64 q3,q3,q13 159 vadd.i64 q14,q3,q0 188 vsub.i64 q3,q3,q10 199 vsub.i64 q3,q6,q7 [all …]
|
/external/flac/libFLAC/ |
D | lpc_intrin_avx2.c | 64 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() local 68 q3 = _mm256_set1_epi32(0xffff & qlp_coeff[3 ]); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 88 …mull = _mm256_madd_epi16(q3, _mm256_loadu_si256((const __m256i*)(data+i-4 ))); summ = _mm256_add_… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 97 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10; in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() local 101 q3 = _mm256_set1_epi32(0xffff & qlp_coeff[3 ]); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 119 …mull = _mm256_madd_epi16(q3, _mm256_loadu_si256((const __m256i*)(data+i-4 ))); summ = _mm256_add_… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 130 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9; in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() local 134 q3 = _mm256_set1_epi32(0xffff & qlp_coeff[3 ]); in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 150 …mull = _mm256_madd_epi16(q3, _mm256_loadu_si256((const __m256i*)(data+i-4 ))); summ = _mm256_add_… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 159 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8; in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() local [all …]
|