Home
last modified time | relevance | path

Searched refs:vmul_s16 (Results 1 – 10 of 10) sorted by relevance

/external/libjpeg-turbo/simd/arm/
Djidctfst-neon.c128 int16x4_t tmp1 = vmul_s16(vget_high_s16(row2), quant_row2); in jsimd_idct_ifast_neon()
129 int16x4_t tmp2 = vmul_s16(vget_high_s16(row4), quant_row4); in jsimd_idct_ifast_neon()
130 int16x4_t tmp3 = vmul_s16(vget_high_s16(row6), quant_row6); in jsimd_idct_ifast_neon()
147 int16x4_t tmp4 = vmul_s16(vget_high_s16(row1), quant_row1); in jsimd_idct_ifast_neon()
148 int16x4_t tmp5 = vmul_s16(vget_high_s16(row3), quant_row3); in jsimd_idct_ifast_neon()
149 int16x4_t tmp6 = vmul_s16(vget_high_s16(row5), quant_row5); in jsimd_idct_ifast_neon()
150 int16x4_t tmp7 = vmul_s16(vget_high_s16(row7), quant_row7); in jsimd_idct_ifast_neon()
203 int16x4_t tmp1 = vmul_s16(vget_low_s16(row2), quant_row2); in jsimd_idct_ifast_neon()
204 int16x4_t tmp2 = vmul_s16(vget_low_s16(row4), quant_row4); in jsimd_idct_ifast_neon()
205 int16x4_t tmp3 = vmul_s16(vget_low_s16(row6), quant_row6); in jsimd_idct_ifast_neon()
[all …]
Djidctred-neon.c264 int16x4_t z2 = vmul_s16(vget_high_s16(row2), quant_row2); in jsimd_idct_4x4_neon()
265 int16x4_t z3 = vmul_s16(vget_high_s16(row6), quant_row6); in jsimd_idct_4x4_neon()
274 int16x4_t z1 = vmul_s16(vget_high_s16(row7), quant_row7); in jsimd_idct_4x4_neon()
275 z2 = vmul_s16(vget_high_s16(row5), quant_row5); in jsimd_idct_4x4_neon()
276 z3 = vmul_s16(vget_high_s16(row3), quant_row3); in jsimd_idct_4x4_neon()
277 int16x4_t z4 = vmul_s16(vget_high_s16(row1), quant_row1); in jsimd_idct_4x4_neon()
317 int16x4_t z2 = vmul_s16(vget_low_s16(row2), quant_row2); in jsimd_idct_4x4_neon()
318 int16x4_t z3 = vmul_s16(vget_low_s16(row6), quant_row6); in jsimd_idct_4x4_neon()
327 int16x4_t z1 = vmul_s16(vget_low_s16(row7), quant_row7); in jsimd_idct_4x4_neon()
328 z2 = vmul_s16(vget_low_s16(row5), quant_row5); in jsimd_idct_4x4_neon()
[all …]
Djidctint-neon.c233 int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS); in jsimd_idct_islow_neon()
291 int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS); in jsimd_idct_islow_neon()
368 int16x4_t z2_s16 = vmul_s16(row2, quant_row2); in jsimd_idct_islow_pass1_regular()
369 int16x4_t z3_s16 = vmul_s16(row6, quant_row6); in jsimd_idct_islow_pass1_regular()
376 z2_s16 = vmul_s16(row0, quant_row0); in jsimd_idct_islow_pass1_regular()
377 z3_s16 = vmul_s16(row4, quant_row4); in jsimd_idct_islow_pass1_regular()
388 int16x4_t tmp0_s16 = vmul_s16(row7, quant_row7); in jsimd_idct_islow_pass1_regular()
389 int16x4_t tmp1_s16 = vmul_s16(row5, quant_row5); in jsimd_idct_islow_pass1_regular()
390 int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3); in jsimd_idct_islow_pass1_regular()
391 int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1); in jsimd_idct_islow_pass1_regular()
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve8_neon.h92 sum = vqadd_s16(sum, vmul_s16(s3, filter3)); in convolve8_4()
93 sum = vqadd_s16(sum, vmul_s16(s4, filter4)); in convolve8_4()
/external/libaom/libaom/aom_dsp/simd/
Dv64_intrinsics_arm.h286 vmul_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y))); in v64_mullo_s16()
/external/neon_2_sse/
DNEON_2_SSE.h537 _NEON2SSESTORAGE int16x4_t vmul_s16(int16x4_t a, int16x4_t b); // VMUL.I16 d0,d0,d0
3535 _NEON2SSESTORAGE int16x4_t vmul_s16(int16x4_t a, int16x4_t b); // VMUL.I16 d0,d0,d0
3536 #define vmul_s16 vmul_u16 macro
13605 return vmul_s16(a, b16x4); in vmul_n_s16()
13630 return vmul_s16(a, b16x4); in vmul_n_u16()
13690 return vmul_s16(a, b16x4);
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics.c288 return vmul_s16(v1, v2); in test_vmul_s16()
Darm_neon_intrinsics.c8350 return vmul_s16(a, b); in test_vmul_s16()
/external/clang/test/CodeGen/
Darm_neon_intrinsics.c10232 return vmul_s16(a, b); in test_vmul_s16()
Daarch64-neon-intrinsics.c285 return vmul_s16(v1, v2); in test_vmul_s16()