Home
last modified time | relevance | path

Searched refs:q8s32 (Results 1 – 4 of 4) sorted by relevance

/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_shortwalsh4x4_neon.c24 int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32; in vp8_short_walsh4x4_neon() local
77 q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]); in vp8_short_walsh4x4_neon()
82 q0s32 = vaddq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
85 q3s32 = vsubq_s32(q8s32, q9s32); in vp8_short_walsh4x4_neon()
92 q8s32 = vreinterpretq_s32_u32(q8u32); in vp8_short_walsh4x4_neon()
97 q0s32 = vsubq_s32(q0s32, q8s32); in vp8_short_walsh4x4_neon()
102 q8s32 = vaddq_s32(q0s32, q15s32); in vp8_short_walsh4x4_neon()
107 d0s16 = vshrn_n_s32(q8s32, 3); in vp8_short_walsh4x4_neon()
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_iht4x4_add_neon.c27 int32x4_t q8s32, q9s32; in TRANSPOSE4X4() local
34 q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1])); in TRANSPOSE4X4()
36 q0x2s32 = vtrnq_s32(q8s32, q9s32); in TRANSPOSE4X4()
96 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; in IADST4x4_1D() local
112 q8s32 = vmull_s16(*d4s16, d19s16); in IADST4x4_1D()
117 q10s32 = vaddq_s32(q10s32, q8s32); in IADST4x4_1D()
119 q8s32 = vdupq_n_s32(sinpi_3_9); in IADST4x4_1D()
121 q15s32 = vmulq_s32(q15s32, q8s32); in IADST4x4_1D()
Dvp9_iht8x8_add_neon.c44 int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; in IDCT8x8_1D() local
127 q8s32 = vmull_s16(d20s16, d1s16); in IDCT8x8_1D()
132 q8s32 = vmlal_s16(q8s32, d28s16, d0s16); in IDCT8x8_1D()
137 d30s16 = vrshrn_n_s32(q8s32, 14); in IDCT8x8_1D()
194 int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; in IADST8X8_1D() local
233 q8s32 = vmull_s16(d23s16, d31s16); in IADST8X8_1D()
238 q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); in IADST8X8_1D()
250 q15s32 = vaddq_s32(q4s32, q8s32); in IADST8X8_1D()
252 q4s32 = vsubq_s32(q4s32, q8s32); in IADST8X8_1D()
/external/libvpx/libvpx/vpx_dsp/arm/
Dvariance_neon.c267 int32x4_t q7s32, q8s32, q9s32, q10s32; in vpx_mse16x16_neon() local
272 q8s32 = vdupq_n_s32(0); in vpx_mse16x16_neon()
294 q8s32 = vmlal_s16(q8s32, d23s16, d23s16); in vpx_mse16x16_neon()
304 q8s32 = vmlal_s16(q8s32, d27s16, d27s16); in vpx_mse16x16_neon()
312 q7s32 = vaddq_s32(q7s32, q8s32); in vpx_mse16x16_neon()
330 int32x4_t q7s32, q8s32, q9s32, q10s32; in vpx_get4x4sse_cs_neon() local
362 q8s32 = vmull_s16(d24s16, d24s16); in vpx_get4x4sse_cs_neon()
366 q7s32 = vaddq_s32(q7s32, q8s32); in vpx_get4x4sse_cs_neon()