/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_iht4x4_add_neon.c | 42 static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16, in GENERATE_SINE_CONSTANTS() argument 44 *d3s16 = vdup_n_s16(sinpi_1_9); in GENERATE_SINE_CONSTANTS() 85 static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16, in IADST4x4_1D() argument 98 q10s32 = vmull_s16(*d3s16, d16s16); in IADST4x4_1D() 102 q14s32 = vmull_s16(*d3s16, d18s16); in IADST4x4_1D() 133 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16; in vp9_iht4x4_16_add_neon() local 152 GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); in vp9_iht4x4_16_add_neon() 161 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); in vp9_iht4x4_16_add_neon() 166 GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); in vp9_iht4x4_16_add_neon() 169 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); in vp9_iht4x4_16_add_neon() [all …]
|
D | vp9_iht8x8_add_neon.c | 39 int16x4_t d0s16, d1s16, d2s16, d3s16; in IDCT8x8_1D() local 50 d3s16 = vdup_n_s16(cospi_20_64); in IDCT8x8_1D() 76 q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); in IDCT8x8_1D() 77 q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); in IDCT8x8_1D() 88 q9s32 = vmull_s16(d26s16, d3s16); in IDCT8x8_1D() 89 q13s32 = vmull_s16(d27s16, d3s16); in IDCT8x8_1D() 189 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; in IADST8X8_1D() local 255 d3s16 = vrshrn_n_s32(q2s32, 14); in IADST8X8_1D() 315 q6s32 = vmull_s16(d3s16, d30s16); in IADST8X8_1D() 317 q0s32 = vmull_s16(d3s16, d31s16); in IADST8X8_1D()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 14 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; in vp8_short_fdct4x4_neon() local 37 d3s16 = vld1_s16(input); in vp8_short_fdct4x4_neon() 40 v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); in vp8_short_fdct4x4_neon() 65 d3s16 = vshrn_n_s32(q10s32, 12); in vp8_short_fdct4x4_neon() 69 v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); in vp8_short_fdct4x4_neon() 101 d3s16 = vshrn_n_s32(q12s32, 16); in vp8_short_fdct4x4_neon() 104 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_fdct4x4_neon() 112 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; in vp8_short_fdct8x4_neon() local 179 d3s16 = vshrn_n_s32(q11s32, 12); in vp8_short_fdct8x4_neon() 181 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_fdct8x4_neon() [all …]
|
D | vp8_shortwalsh4x4_neon.c | 23 int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; in vp8_short_walsh4x4_neon() local 40 d3s16 = vld1_s16(input); in vp8_short_walsh4x4_neon() 43 v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); in vp8_short_walsh4x4_neon() 63 d3s16 = vsub_s16(d4s16, d5s16); in vp8_short_walsh4x4_neon() 70 v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); in vp8_short_walsh4x4_neon() 110 d3s16 = vshrn_n_s32(q11s32, 3); in vp8_short_walsh4x4_neon() 113 q1s16 = vcombine_s16(d2s16, d3s16); in vp8_short_walsh4x4_neon()
|