/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | loopfiltersimplehorizontaledge_neon.c | 45 q5u8 = veorq_u8(q5u8, q0u8); in vp8_loop_filter_simple_horizontal_edge_neon() 46 q6u8 = veorq_u8(q6u8, q0u8); in vp8_loop_filter_simple_horizontal_edge_neon() 47 q7u8 = veorq_u8(q7u8, q0u8); in vp8_loop_filter_simple_horizontal_edge_neon() 48 q8u8 = veorq_u8(q8u8, q0u8); in vp8_loop_filter_simple_horizontal_edge_neon() 82 q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); in vp8_loop_filter_simple_horizontal_edge_neon() 83 q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); in vp8_loop_filter_simple_horizontal_edge_neon()
|
D | loopfiltersimpleverticaledge_neon.c | 210 q3u8 = veorq_u8(q3u8, q0u8); in vp8_loop_filter_simple_vertical_edge_neon() 211 q4u8 = veorq_u8(q4u8, q0u8); in vp8_loop_filter_simple_vertical_edge_neon() 212 q5u8 = veorq_u8(q5u8, q0u8); in vp8_loop_filter_simple_vertical_edge_neon() 213 q6u8 = veorq_u8(q6u8, q0u8); in vp8_loop_filter_simple_vertical_edge_neon() 247 q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); in vp8_loop_filter_simple_vertical_edge_neon() 248 q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); in vp8_loop_filter_simple_vertical_edge_neon()
|
D | mbloopfilter_neon.c | 65 q9 = veorq_u8(q9, q0u8); in vp8_mbloop_filter_neon() 66 q8 = veorq_u8(q8, q0u8); in vp8_mbloop_filter_neon() 67 q7 = veorq_u8(q7, q0u8); in vp8_mbloop_filter_neon() 68 q6 = veorq_u8(q6, q0u8); in vp8_mbloop_filter_neon() 69 q5 = veorq_u8(q5, q0u8); in vp8_mbloop_filter_neon() 70 q4 = veorq_u8(q4, q0u8); in vp8_mbloop_filter_neon() 144 *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8); in vp8_mbloop_filter_neon() 145 *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8); in vp8_mbloop_filter_neon() 146 *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8); in vp8_mbloop_filter_neon() 147 *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8); in vp8_mbloop_filter_neon() [all …]
|
D | vp8_loopfilter_neon.c | 63 q8 = veorq_u8(q8, q10); in vp8_loop_filter_neon() 64 q7 = veorq_u8(q7, q10); in vp8_loop_filter_neon() 65 q6 = veorq_u8(q6, q10); in vp8_loop_filter_neon() 66 q5 = veorq_u8(q5, q10); in vp8_loop_filter_neon() 118 *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8); in vp8_loop_filter_neon() 119 *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); in vp8_loop_filter_neon() 120 *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); in vp8_loop_filter_neon() 121 *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8); in vp8_loop_filter_neon()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 567 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 0))); in GEMVForLstmCellWithSymmetricRange() 569 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 1))); in GEMVForLstmCellWithSymmetricRange() 571 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 2))); in GEMVForLstmCellWithSymmetricRange() 573 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 3))); in GEMVForLstmCellWithSymmetricRange() 598 int8x16_t weights00 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 601 int8x16_t weights01 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 604 int8x16_t weights02 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 607 int8x16_t weights03 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 610 int8x16_t weights10 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 613 int8x16_t weights11 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() [all …]
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 689 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 0))); in GEMVForLstmCellWithSymmetricRange() 691 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 1))); in GEMVForLstmCellWithSymmetricRange() 693 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 2))); in GEMVForLstmCellWithSymmetricRange() 695 veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 3))); in GEMVForLstmCellWithSymmetricRange() 720 int8x16_t weights00 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 723 int8x16_t weights01 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 726 int8x16_t weights02 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 729 int8x16_t weights03 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 732 int8x16_t weights10 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() 735 int8x16_t weights11 = vreinterpretq_s8_u8(veorq_u8( in GEMVForLstmCellWithSymmetricRange() [all …]
|
/external/scrypt/lib/crypto/ |
D | crypto_scrypt-neon.c | 78 D[i] = veorq_u8(D[i], S[i]); in blkxor()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-3v.c | 298 return veorq_u8(a, b); in test_veorq_u8()
|
D | arm_neon_intrinsics.c | 3162 return veorq_u8(a, b); in test_veorq_u8()
|
/external/gemmlowp/internal/ |
D | pack_neon.h | 286 src_lines[i] = veorq_u8(src_lines[i], sign_bit_dup); in Pack()
|
/external/webp/src/dsp/ |
D | dec_neon.c | 442 return vreinterpretq_s8_u8(veorq_u8(v, sign_bit)); in FlipSign_NEON() 755 const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask); in DoFilter4_NEON() 827 const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask); in DoFilter6_NEON()
|
/external/scrypt/patches/ |
D | arm_neon.patch | 210 + D[i] = veorq_u8(D[i], S[i]);
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 2155 _NEON2SSESTORAGE uint8x16_t veorq_u8(uint8x16_t a, uint8x16_t b); // VEOR q0,q0,q0 15358 _NEON2SSESTORAGE uint8x16_t veorq_u8(uint8x16_t a, uint8x16_t b); // VEOR q0,q0,q0 15359 #define veorq_u8 _mm_xor_si128 macro
|