/external/clang/test/CodeGen/ |
D | aarch64-neon-tbl.c | 12 int8x8_t test_vtbl1_s8(int8x8_t a, int8x8_t b) { in test_vtbl1_s8() 19 int8x8_t test_vqtbl1_s8(int8x16_t a, int8x8_t b) { in test_vqtbl1_s8() 41 int8x8_t test_vtbl2_s8(int8x8x2_t a, int8x8_t b) { in test_vtbl2_s8() 62 int8x8_t test_vqtbl2_s8(int8x16x2_t a, int8x8_t b) { in test_vqtbl2_s8() 88 int8x8_t test_vtbl3_s8(int8x8x3_t a, int8x8_t b) { in test_vtbl3_s8() 112 int8x8_t test_vqtbl3_s8(int8x16x3_t a, int8x8_t b) { in test_vqtbl3_s8() 141 int8x8_t test_vtbl4_s8(int8x8x4_t a, int8x8_t b) { in test_vtbl4_s8() 168 int8x8_t test_vqtbl4_s8(int8x16x4_t a, int8x8_t b) { in test_vqtbl4_s8() 261 int8x8_t test_vtbx1_s8(int8x8_t a, int8x8_t b, int8x8_t c) { in test_vtbx1_s8() 283 int8x8_t test_vtbx2_s8(int8x8_t a, int8x8x2_t b, int8x8_t c) { in test_vtbx2_s8() [all …]
|
D | aarch64-neon-misc.c | 13 uint8x8_t test_vceqz_s8(int8x8_t a) { in test_vceqz_s8() 261 uint8x8_t test_vcgez_s8(int8x8_t a) { in test_vcgez_s8() 377 uint8x8_t test_vclez_s8(int8x8_t a) { in test_vclez_s8() 493 uint8x8_t test_vcgtz_s8(int8x8_t a) { in test_vcgtz_s8() 609 uint8x8_t test_vcltz_s8(int8x8_t a) { in test_vcltz_s8() 724 int8x8_t test_vrev16_s8(int8x8_t a) { in test_vrev16_s8() 766 int8x8_t test_vrev32_s8(int8x8_t a) { in test_vrev32_s8() 850 int8x8_t test_vrev64_s8(int8x8_t a) { in test_vrev64_s8() 976 int16x4_t test_vpaddl_s8(int8x8_t a) { in test_vpaddl_s8() 1079 int16x4_t test_vpadal_s8(int16x4_t a, int8x8_t b) { in test_vpadal_s8() [all …]
|
D | aarch64-neon-shifts.c | 12 int8x8_t test_shift_vshr_smax(int8x8_t a) { in test_shift_vshr_smax() 31 int8x8_t test_shift_vsra_smax(int8x8_t a, int8x8_t b) { in test_shift_vsra_smax()
|
D | arm-neon-shifts.c | 15 int8x8_t test_shift_vshr_smax(int8x8_t a) { in test_shift_vshr_smax() 34 int8x8_t test_shift_vsra_smax(int8x8_t a, int8x8_t b) { in test_shift_vsra_smax()
|
D | aarch64-neon-3v.c | 10 int8x8_t test_vand_s8(int8x8_t a, int8x8_t b) { in test_vand_s8() 122 int8x8_t test_vorr_s8(int8x8_t a, int8x8_t b) { in test_vorr_s8() 234 int8x8_t test_veor_s8(int8x8_t a, int8x8_t b) { in test_veor_s8() 347 int8x8_t test_vbic_s8(int8x8_t a, int8x8_t b) { in test_vbic_s8() 475 int8x8_t test_vorn_s8(int8x8_t a, int8x8_t b) { in test_vorn_s8()
|
D | arm_neon_intrinsics.c | 13 int8x8_t test_vaba_s8(int8x8_t a, int8x8_t b, int8x8_t c) { in test_vaba_s8() 159 int16x8_t test_vabal_s8(int16x8_t a, int8x8_t b, int8x8_t c) { in test_vabal_s8() 244 int8x8_t test_vabd_s8(int8x8_t a, int8x8_t b) { in test_vabd_s8() 404 int16x8_t test_vabdl_s8(int8x8_t a, int8x8_t b) { in test_vabdl_s8() 484 int8x8_t test_vabs_s8(int8x8_t a) { in test_vabs_s8() 553 int8x8_t test_vadd_s8(int8x8_t a, int8x8_t b) { in test_vadd_s8() 686 int8x8_t test_vaddhn_s16(int16x8_t a, int16x8_t b) { in test_vaddhn_s16() 761 int16x8_t test_vaddl_s8(int8x8_t a, int8x8_t b) { in test_vaddl_s8() 831 int16x8_t test_vaddw_s8(int16x8_t a, int8x8_t b) { in test_vaddw_s8() 887 int8x8_t test_vand_s8(int8x8_t a, int8x8_t b) { in test_vand_s8() [all …]
|
D | aarch64-neon-perm.c | 10 int8x8_t test_vuzp1_s8(int8x8_t a, int8x8_t b) { in test_vuzp1_s8() 157 int8x8_t test_vuzp2_s8(int8x8_t a, int8x8_t b) { in test_vuzp2_s8() 304 int8x8_t test_vzip1_s8(int8x8_t a, int8x8_t b) { in test_vzip1_s8() 451 int8x8_t test_vzip2_s8(int8x8_t a, int8x8_t b) { in test_vzip2_s8() 598 int8x8_t test_vtrn1_s8(int8x8_t a, int8x8_t b) { in test_vtrn1_s8() 745 int8x8_t test_vtrn2_s8(int8x8_t a, int8x8_t b) { in test_vtrn2_s8() 909 int8x8x2_t test_vuzp_s8(int8x8_t a, int8x8_t b) { in test_vuzp_s8() 1373 int8x8x2_t test_vzip_s8(int8x8_t a, int8x8_t b) { in test_vzip_s8() 1837 int8x8x2_t test_vtrn_s8(int8x8_t a, int8x8_t b) { in test_vtrn_s8()
|
D | aarch64-neon-intrinsics.c | 13 int8x8_t test_vadd_s8(int8x8_t v1, int8x8_t v2) { in test_vadd_s8() 146 int8x8_t test_vsub_s8(int8x8_t v1, int8x8_t v2) { in test_vsub_s8() 277 int8x8_t test_vmul_s8(int8x8_t v1, int8x8_t v2) { in test_vmul_s8() 403 int8x8_t test_vmla_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { in test_vmla_s8() 412 int8x8_t test_vmla_s16(int16x4_t v1, int16x4_t v2, int16x4_t v3) { in test_vmla_s16() 524 int8x8_t test_vmls_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { in test_vmls_s8() 533 int8x8_t test_vmls_s16(int16x4_t v1, int16x4_t v2, int16x4_t v3) { in test_vmls_s16() 743 int8x8_t test_vaba_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { in test_vaba_s8() 870 int8x8_t test_vabd_s8(int8x8_t v1, int8x8_t v2) { in test_vabd_s8() 1023 int8x8_t test_vbsl_s8(uint8x8_t v1, int8x8_t v2, int8x8_t v3) { in test_vbsl_s8() [all …]
|
D | aarch64-neon-across.c | 12 int16_t test_vaddlv_s8(int8x8_t a) { in test_vaddlv_s8() 98 int8_t test_vmaxv_s8(int8x8_t a) { in test_vmaxv_s8() 188 int8_t test_vminv_s8(int8x8_t a) { in test_vminv_s8() 278 int8_t test_vaddv_s8(int8x8_t a) { in test_vaddv_s8()
|
D | aarch64-neon-extract.c | 12 int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) { in test_vext_s8()
|
D | aarch64-neon-vcombine.c | 10 int8x16_t test_vcombine_s8(int8x8_t low, int8x8_t high) { in test_vcombine_s8()
|
D | arm-neon-vget.c | 13 int8x8_t low_s8(int8x16_t a) { in low_s8() 69 int8x8_t high_s8(int8x16_t a) { in high_s8()
|
D | aarch64-neon-vget.c | 35 int8_t test_vget_lane_s8(int8x8_t a) { in test_vget_lane_s8() 258 int8x8_t test_vset_lane_s8(int8_t a, int8x8_t b) { in test_vset_lane_s8()
|
D | aarch64-neon-vget-hilo.c | 11 int8x8_t test_vget_high_s8(int8x16_t a) { in test_vget_high_s8() 109 int8x8_t test_vget_low_s8(int8x16_t a) { in test_vget_low_s8()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | motion_field_projection_neon.cc | 38 const int8x8_t reference_offset) { in LoadDivision() 39 const int8x8_t kOne = vcreate_s8(0x0100010001000100); in LoadDivision() 41 const int8x8_t t = vadd_s8(reference_offset, reference_offset); in LoadDivision() 45 const int8x8_t idx_low = vget_low_s8(idx); in LoadDivision() 46 const int8x8_t idx_high = vget_high_s8(idx); in LoadDivision() 74 inline int8x8_t Project_NEON(const int16x8_t delta, const int16x8_t dst_sign) { in Project_NEON() 90 const int8x8_t r_offsets, const int8x8_t source_reference_type8, in GetPosition() 91 const int8x8_t skip_r, const int8x8_t y8_floor8, const int8x8_t y8_ceiling8, in GetPosition() 92 const int16x8_t d_sign, const int delta, int8x8_t* const r, in GetPosition() 93 int8x8_t* const position_y8, int8x8_t* const position_x8, in GetPosition() [all …]
|
D | common_neon.h | 163 inline void PrintReg(const int8x8_t val, const char* name) { 245 inline void StoreLo4(void* const buf, const int8x8_t val) { in StoreLo4() 295 inline int8x8_t RightShift(const int8x8_t vector) { in RightShift() 310 inline int8x8_t VQTbl1S8(const int8x16_t a, const uint8x8_t index) { in VQTbl1S8() 343 inline int8x8_t InterleaveLow32(const int8x8_t a, const int8x8_t b) { in InterleaveLow32() 365 inline int8x8_t InterleaveHigh32(const int8x8_t a, const int8x8_t b) { in InterleaveHigh32() 513 inline void Transpose8x8(int8x8_t a[8]) { in Transpose8x8()
|
D | intrapred_directional_neon.cc | 65 inline void LoadStepwise(const uint8_t* const source, const int8x8_t left_step, in LoadStepwise() 66 const int8x8_t right_step, uint8x8_t* left, in LoadStepwise() 83 const int8x8_t max_base = vdup_n_s8(max_base_x); in DirectionalZone1_WxH() 86 const int8x8_t all = vcreate_s8(0x0706050403020100); in DirectionalZone1_WxH() 87 const int8x8_t even = vcreate_s8(0x0e0c0a0806040200); in DirectionalZone1_WxH() 88 const int8x8_t base_step = upsampled ? even : all; in DirectionalZone1_WxH() 89 const int8x8_t right_step = vadd_s8(base_step, vdup_n_s8(1)); in DirectionalZone1_WxH() 108 const int8x8_t base_v = vadd_s8(vdup_n_s8(top_base_x), base_step); in DirectionalZone1_WxH() 154 const int8x8_t max_base = vdup_n_s8(max_base_x); in DirectionalZone1_WxH() 157 const int8x8_t all = vcreate_s8(0x0706050403020100); in DirectionalZone1_WxH() [all …]
|
D | film_grain_neon.cc | 995 const int8x8_t grain_coeff, in WriteOverlapLine8bpp_NEON() 996 const int8x8_t old_coeff, in WriteOverlapLine8bpp_NEON() 1002 const int8x8_t source_grain = vld1_s8(noise_stripe_row + x); in WriteOverlapLine8bpp_NEON() 1003 const int8x8_t source_old = vld1_s8(noise_stripe_row_prev + x); in WriteOverlapLine8bpp_NEON() 1026 const int8x8_t first_row_grain_coeff = vdup_n_s8(17); in ConstructNoiseImageOverlap8bpp_NEON() 1027 const int8x8_t first_row_old_coeff = vdup_n_s8(27); in ConstructNoiseImageOverlap8bpp_NEON() 1028 const int8x8_t second_row_grain_coeff = first_row_old_coeff; in ConstructNoiseImageOverlap8bpp_NEON() 1029 const int8x8_t second_row_old_coeff = first_row_grain_coeff; in ConstructNoiseImageOverlap8bpp_NEON() 1062 const int8x8_t first_row_grain_coeff = vdup_n_s8(22); in ConstructNoiseImageOverlap8bpp_NEON() 1063 const int8x8_t first_row_old_coeff = vdup_n_s8(23); in ConstructNoiseImageOverlap8bpp_NEON()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | depthwise_conv.h | 61 const int8x8_t input_s8 = vld1_s8(input_ptr); 91 const int8x8_t filter_s8 = vld1_s8(filter_ptr); 103 int8x8_t input_s8[2]; 136 const int8x8_t input_s8 = vld1_s8(input_ptr); 158 const int8x8_t filter_s8 = vld1_s8(filter_ptr); 170 const int8x8_t input_s8 = vld1_s8(input_ptr); 197 int8x8_t input_s8 = vdup_n_s8(0); 228 const int8x8_t filter_s8 = vld1_s8(filter_ptr + 8 * i); 240 int8x8_t input_s8 = vdup_n_s8(0); 271 int8x8_t input_s8 = vdup_n_s8(0); [all …]
|
D | add.h | 39 const int8x8_t output_activation_min_vector = in AddElementwise() 41 const int8x8_t output_activation_max_vector = in AddElementwise() 44 const int8x8_t input1_val_original = vld1_s8(input1_data + i); in AddElementwise() 45 const int8x8_t input2_val_original = vld1_s8(input2_data + i); in AddElementwise() 86 const int8x8_t clamped = in AddElementwise() 134 const int8x8_t output_activation_min_vector = in AddScalarBroadcast() 136 const int8x8_t output_activation_max_vector = in AddScalarBroadcast() 140 const int8x8_t input1_val_original = vdup_n_s8(input1_data); in AddScalarBroadcast() 157 const int8x8_t input2_val_original = vld1_s8(input2_data + i); in AddScalarBroadcast() 182 const int8x8_t clamped = in AddScalarBroadcast()
|
D | softmax.h | 72 int8x8_t max8 = vmax_s8(vget_low_s8(max16), vget_high_s8(max16)); in Softmax() 77 int8x8_t max4 = vmax_s8(max8, vext_s8(max8, max8, 4)); in Softmax() 78 int8x8_t max2 = vmax_s8(max4, vext_s8(max4, max4, 2)); in Softmax() 79 int8x8_t max1 = vpmax_s8(max2, max2); in Softmax() 204 int8x8_t output_s8 = vqmovn_s16(output_s16); in Softmax() 205 int8x8_t masked_output = vbsl_s8(mask, output_s8, vdup_n_s8(-128)); in Softmax()
|
D | pooling.h | 113 int8x8_t acc_reg = vld1_s8(acc + channel); in MaxPool() 114 int8x8_t input_reg = vld1_s8(input_channel_ptr); in MaxPool() 137 int8x8_t a = vld1_s8(acc + channel); in MaxPool() 231 int8x8_t input_reg = vld1_s8(input_channel_ptr); in AveragePool16() 256 int8x8_t buf8 = vqmovn_s16(vld1q_s16(buf)); \ in AveragePool16() 273 int8x8_t buf8 = vqmovn_s16(vld1q_s16(buf)); in AveragePool16()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 143 typedef __m64_128 int8x8_t; typedef 210 int8x8_t val[2]; 278 int8x8_t val[4]; 354 int8x8_t val[3]; 445 _NEON2SSESTORAGE int8x8_t vadd_s8(int8x8_t a, int8x8_t b); // VADD.I8 d0,d0,d0 464 _NEON2SSESTORAGE int16x8_t vaddl_s8(int8x8_t a, int8x8_t b); // VADDL.S8 q0,d0,d0 471 _NEON2SSESTORAGE int16x8_t vaddw_s8(int16x8_t a, int8x8_t b); // VADDW.S8 q0,q0,d0 478 _NEON2SSESTORAGE int8x8_t vhadd_s8(int8x8_t a, int8x8_t b); // VHADD.S8 d0,d0,d0 491 _NEON2SSESTORAGE int8x8_t vrhadd_s8(int8x8_t a, int8x8_t b); // VRHADD.S8 d0,d0,d0 504 _NEON2SSESTORAGE int8x8_t vqadd_s8(int8x8_t a, int8x8_t b); // VQADD.S8 d0,d0,d0 [all …]
|
/external/libaom/libaom/aom_dsp/arm/ |
D | loopfilter_neon.c | 171 int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8; in lpf_14_neon() 172 int8x8_t op0, oq0, op1, oq1; in lpf_14_neon() 173 int8x8_t pq_s0, pq_s1; in lpf_14_neon() 174 int8x8_t filter_s8, filter1_s8, filter2_s8; in lpf_14_neon() 175 int8x8_t hev_8x8; in lpf_14_neon() 176 const int8x8_t sign_mask = vdup_n_s8(0x80); in lpf_14_neon() 177 const int8x8_t val_4 = vdup_n_s8(4); in lpf_14_neon() 178 const int8x8_t val_3 = vdup_n_s8(3); in lpf_14_neon() 352 int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8; in lpf_8_neon() 353 int8x8_t op0, oq0, op1, oq1; in lpf_8_neon() [all …]
|
/external/clang/test/CodeGenCXX/ |
D | aarch64-mangle-neon-vectors.cpp | 15 typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; typedef 40 void f1(int8x8_t) {} in f1() argument
|