/external/libvpx/libvpx/vpx_dsp/arm/ |
D | fdct16x16_neon.c | 92 b[0] = vshlq_n_s16(vaddq_s16(a[0], a[15]), 2); in cross_input() 93 b[1] = vshlq_n_s16(vaddq_s16(a[1], a[14]), 2); in cross_input() 94 b[2] = vshlq_n_s16(vaddq_s16(a[2], a[13]), 2); in cross_input() 95 b[3] = vshlq_n_s16(vaddq_s16(a[3], a[12]), 2); in cross_input() 96 b[4] = vshlq_n_s16(vaddq_s16(a[4], a[11]), 2); in cross_input() 97 b[5] = vshlq_n_s16(vaddq_s16(a[5], a[10]), 2); in cross_input() 98 b[6] = vshlq_n_s16(vaddq_s16(a[6], a[9]), 2); in cross_input() 99 b[7] = vshlq_n_s16(vaddq_s16(a[7], a[8]), 2); in cross_input() 101 b[8] = vshlq_n_s16(vsubq_s16(a[7], a[8]), 2); in cross_input() 102 b[9] = vshlq_n_s16(vsubq_s16(a[6], a[9]), 2); in cross_input() [all …]
|
D | fwd_txfm_neon.c | 24 int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2); in vpx_fdct8x8_neon() 25 int16x8_t input_1 = vshlq_n_s16(vld1q_s16(&input[1 * stride]), 2); in vpx_fdct8x8_neon() 26 int16x8_t input_2 = vshlq_n_s16(vld1q_s16(&input[2 * stride]), 2); in vpx_fdct8x8_neon() 27 int16x8_t input_3 = vshlq_n_s16(vld1q_s16(&input[3 * stride]), 2); in vpx_fdct8x8_neon() 28 int16x8_t input_4 = vshlq_n_s16(vld1q_s16(&input[4 * stride]), 2); in vpx_fdct8x8_neon() 29 int16x8_t input_5 = vshlq_n_s16(vld1q_s16(&input[5 * stride]), 2); in vpx_fdct8x8_neon() 30 int16x8_t input_6 = vshlq_n_s16(vld1q_s16(&input[6 * stride]), 2); in vpx_fdct8x8_neon() 31 int16x8_t input_7 = vshlq_n_s16(vld1q_s16(&input[7 * stride]), 2); in vpx_fdct8x8_neon()
|
D | fdct32x32_neon.c | 48 src[index1] = vshlq_n_s16(vsubq_s16(src[index0], src[index1]), 2); \ 94 b[0] = vshlq_n_s16(c[0], 2); in load() 95 b[1] = vshlq_n_s16(c[1], 2); in load() 96 b[2] = vshlq_n_s16(c[2], 2); in load() 97 b[3] = vshlq_n_s16(c[3], 2); in load() 98 b[4] = vshlq_n_s16(c[4], 2); in load() 99 b[5] = vshlq_n_s16(c[5], 2); in load() 100 b[6] = vshlq_n_s16(c[6], 2); in load() 101 b[7] = vshlq_n_s16(c[7], 2); in load() 138 b[8] = vshlq_n_s16(c[0], 2); in load() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_inter_pred_luma_copy_w16out.s | 163 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 164 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) 165 vshl.i16 q2,q10,#6 @vshlq_n_s16(tmp, 6) 166 vshl.i16 q3,q11,#6 @vshlq_n_s16(tmp, 6) 204 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 207 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) 210 vshl.i16 q2,q10,#6 @vshlq_n_s16(tmp, 6) 215 vshl.i16 q3,q11,#6 @vshlq_n_s16(tmp, 6) 238 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 239 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) [all …]
|
D | ihevc_inter_pred_chroma_copy_w16out.s | 225 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 226 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) 227 vshl.i16 q2,q10,#6 @vshlq_n_s16(tmp, 6) 228 vshl.i16 q3,q11,#6 @vshlq_n_s16(tmp, 6) 266 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 269 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) 272 vshl.i16 q2,q10,#6 @vshlq_n_s16(tmp, 6) 277 vshl.i16 q3,q11,#6 @vshlq_n_s16(tmp, 6) 300 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6) 301 vshl.i16 q1,q9,#6 @vshlq_n_s16(tmp, 6) [all …]
|
/external/libaom/libaom/aom_dsp/arm/ |
D | fwd_txfm_neon.c | 115 int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2); in aom_fdct8x8_neon() 116 int16x8_t input_1 = vshlq_n_s16(vld1q_s16(&input[1 * stride]), 2); in aom_fdct8x8_neon() 117 int16x8_t input_2 = vshlq_n_s16(vld1q_s16(&input[2 * stride]), 2); in aom_fdct8x8_neon() 118 int16x8_t input_3 = vshlq_n_s16(vld1q_s16(&input[3 * stride]), 2); in aom_fdct8x8_neon() 119 int16x8_t input_4 = vshlq_n_s16(vld1q_s16(&input[4 * stride]), 2); in aom_fdct8x8_neon() 120 int16x8_t input_5 = vshlq_n_s16(vld1q_s16(&input[5 * stride]), 2); in aom_fdct8x8_neon() 121 int16x8_t input_6 = vshlq_n_s16(vld1q_s16(&input[6 * stride]), 2); in aom_fdct8x8_neon() 122 int16x8_t input_7 = vshlq_n_s16(vld1q_s16(&input[7 * stride]), 2); in aom_fdct8x8_neon()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.c | 153 q11s16 = vshlq_n_s16(q11s16, 3); in vp8_short_fdct8x4_neon() 154 q12s16 = vshlq_n_s16(q12s16, 3); in vp8_short_fdct8x4_neon() 155 q13s16 = vshlq_n_s16(q13s16, 3); in vp8_short_fdct8x4_neon() 156 q14s16 = vshlq_n_s16(q14s16, 3); in vp8_short_fdct8x4_neon()
|
/external/webp/src/dsp/ |
D | lossless_enc_neon.c | 110 const int16x8_t B = vshlq_n_s16(vreinterpretq_s16_u8(in), 8); in TransformColor_NEON()
|
D | lossless_neon.c | 584 const int16x8_t C = vshlq_n_s16(vreinterpretq_s16_s8(B), 8); in TransformColorInverse_NEON()
|
D | enc_neon.c | 315 const int16x8_t a0a1_2 = vshlq_n_s16(a0a1, 3); in FTransform_NEON()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | intrapred_directional_neon.cc | 432 shift_upsampled = vshlq_n_s16(shift_upsampled, 1); in DirectionalZone2FromLeftCol_WxH()
|
D | loop_restoration_neon.cc | 93 sum = vrsraq_n_s16(vshlq_n_s16(s_1, 7 - kInterRoundBitsHorizontal), sum, in WienerHorizontalSum()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 6342 vshlq_n_s16(input_value_0_1.val[0], 7); in HardSwish() 6344 vshlq_n_s16(input_value_0_1.val[1], 7); in HardSwish() 6346 vshlq_n_s16(input_value_2_3.val[0], 7); in HardSwish() 6348 vshlq_n_s16(input_value_2_3.val[1], 7); in HardSwish() 6432 vshlq_n_s16(input_value, 7); in HardSwish()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 1050 _NEON2SSESTORAGE int16x8_t vshlq_n_s16(int16x8_t a, __constrange(0,15) int b); // VSHL.I16 q0,q0,#0 7734 _NEON2SSESTORAGE int16x8_t vshlq_n_s16(int16x8_t a, __constrange(0,15) int b); // VSHL.I16 q0,q0,#0 7735 #define vshlq_n_s16 _mm_slli_epi16 macro 7755 #define vshlq_n_u16 vshlq_n_s16 9171 b_shift = vshlq_n_s16( b, c); 9172 a_c = vshlq_n_s16( a, (16 - c));
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 5295 return vshlq_n_s16(a, 3); in test_vshlq_n_s16()
|
D | arm_neon_intrinsics.c | 17124 return vshlq_n_s16(a, 1); in test_vshlq_n_s16()
|