Home
last modified time | relevance | path

Searched refs:vmaxq_u8 (Results 1 – 25 of 26) sorted by relevance

12

/external/XNNPACK/src/u8-maxpool/
D9p8x-neon-c16.c91 const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
92 const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
93 const uint8x16_t vmax45 = vmaxq_u8(vi4, vi5); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
94 const uint8x16_t vmax67 = vmaxq_u8(vi6, vi7); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
96 const uint8x16_t vmax2345 = vmaxq_u8(vmax23, vmax45); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
97 const uint8x16_t vmax01678 = vmaxq_u8(vmax018, vmax67); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
98 const uint8x16_t vmax = vmaxq_u8(vmax2345, vmax01678); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
99 const uint8x16_t vout = vmaxq_u8(vminq_u8(vmax, voutput_max), voutput_min); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
114 const uint8x16_t vmax018 = vmaxq_u8(vmaxq_u8(vi0, vi1), vi8); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
115 const uint8x16_t vmax23 = vmaxq_u8(vi2, vi3); in xnn_u8_maxpool_ukernel_9p8x__neon_c16()
[all …]
/external/XNNPACK/src/u8-clamp/
Dneon.c33 const uint8x16_t vy0 = vminq_u8(vmaxq_u8(vx0, voutput_min), voutput_max); in xnn_u8_clamp_ukernel__neon()
34 const uint8x16_t vy1 = vminq_u8(vmaxq_u8(vx1, voutput_min), voutput_max); in xnn_u8_clamp_ukernel__neon()
35 const uint8x16_t vy2 = vminq_u8(vmaxq_u8(vx2, voutput_min), voutput_max); in xnn_u8_clamp_ukernel__neon()
36 const uint8x16_t vy3 = vminq_u8(vmaxq_u8(vx3, voutput_min), voutput_max); in xnn_u8_clamp_ukernel__neon()
/external/libgav1/libgav1/src/dsp/arm/
Dcdef_neon.cc542 vmaxq_u8(vreinterpretq_u8_u16(primary_val[0]), in CdefFilter_NEON()
545 vmaxq_u8(vreinterpretq_u8_u16(primary_val[2]), in CdefFilter_NEON()
548 vreinterpretq_u16_u8(vmaxq_u8(max_p01, max_p23)); in CdefFilter_NEON()
593 vmaxq_u8(vreinterpretq_u8_u16(secondary_val[0]), in CdefFilter_NEON()
596 vmaxq_u8(vreinterpretq_u8_u16(secondary_val[2]), in CdefFilter_NEON()
599 vmaxq_u8(vreinterpretq_u8_u16(secondary_val[4]), in CdefFilter_NEON()
602 vmaxq_u8(vreinterpretq_u8_u16(secondary_val[6]), in CdefFilter_NEON()
605 vmaxq_u8(vmaxq_u8(max_s01, max_s23), vmaxq_u8(max_s45, max_s67))); in CdefFilter_NEON()
/external/XNNPACK/src/u8-rmax/
Dneon.c27 vmax = vmaxq_u8(vmax, vx); in xnn_u8_rmax_ukernel__neon()
34 vmax = vmaxq_u8(vmax, vx); in xnn_u8_rmax_ukernel__neon()
/external/libvpx/libvpx/vpx_dsp/arm/
Davg_neon.c214 const uint8x16_t ab0123_max = vmaxq_u8(ab01_diff, ab23_diff); in vpx_minmax_8x8_neon()
215 const uint8x16_t ab4567_max = vmaxq_u8(ab45_diff, ab67_diff); in vpx_minmax_8x8_neon()
219 const uint8x16_t ab07_max = vmaxq_u8(ab0123_max, ab4567_max); in vpx_minmax_8x8_neon()
Ddeblock_neon.c70 uint8x16_t max = vmaxq_u8(a2_v0, a1_v0); in generate_maskq()
71 max = vmaxq_u8(b1_v0, max); in generate_maskq()
72 max = vmaxq_u8(b2_v0, max); in generate_maskq()
/external/libvpx/libvpx/vp8/common/arm/neon/
Dvp8_loopfilter_neon.c45 q11u8 = vmaxq_u8(q11u8, q12u8); in vp8_loop_filter_neon()
46 q12u8 = vmaxq_u8(q13u8, q14u8); in vp8_loop_filter_neon()
47 q3 = vmaxq_u8(q3, q4); in vp8_loop_filter_neon()
48 q15u8 = vmaxq_u8(q11u8, q12u8); in vp8_loop_filter_neon()
55 q15u8 = vmaxq_u8(q15u8, q3); in vp8_loop_filter_neon()
Dmbloopfilter_neon.c47 q11u8 = vmaxq_u8(q11u8, q12u8); in vp8_mbloop_filter_neon()
48 q12u8 = vmaxq_u8(q13u8, q14u8); in vp8_mbloop_filter_neon()
49 q1u8 = vmaxq_u8(q1u8, q0u8); in vp8_mbloop_filter_neon()
50 q15u8 = vmaxq_u8(q11u8, q12u8); in vp8_mbloop_filter_neon()
57 q15u8 = vmaxq_u8(q15u8, q1u8); in vp8_mbloop_filter_neon()
/external/XNNPACK/src/q8-vadd/
Dneon.c95 vy01 = vmaxq_u8(vy01, vy_min); in xnn_q8_vadd_ukernel__neon()
96 vy23 = vmaxq_u8(vy23, vy_min); in xnn_q8_vadd_ukernel__neon()
144 vy01 = vmaxq_u8(vy01, vy_min); in xnn_q8_vadd_ukernel__neon()
/external/XNNPACK/src/requantization/
Dq31-neon.c91 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin); in xnn_requantize_q31__neon()
Dgemmlowp-neon.c99 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin); in xnn_requantize_gemmlowp__neon()
Dfp32-neon.c76 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin); in xnn_requantize_fp32__neon()
Dprecise-neon.c128 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin); in xnn_requantize_precise__neon()
/external/XNNPACK/src/q8-igemm/
D8x8-neon.c591 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min); in xnn_q8_igemm_ukernel_8x8__neon()
592 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min); in xnn_q8_igemm_ukernel_8x8__neon()
593 vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min); in xnn_q8_igemm_ukernel_8x8__neon()
594 vout6x01234567_7x01234567 = vmaxq_u8(vout6x01234567_7x01234567, voutput_min); in xnn_q8_igemm_ukernel_8x8__neon()
D4x8-neon.c371 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min); in xnn_q8_igemm_ukernel_4x8__neon()
372 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min); in xnn_q8_igemm_ukernel_4x8__neon()
/external/XNNPACK/src/q8-gemm/
D8x8-neon.c546 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min); in xnn_q8_gemm_ukernel_8x8__neon()
547 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min); in xnn_q8_gemm_ukernel_8x8__neon()
548 vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min); in xnn_q8_gemm_ukernel_8x8__neon()
549 vout6x01234567_7x01234567 = vmaxq_u8(vout6x01234567_7x01234567, voutput_min); in xnn_q8_gemm_ukernel_8x8__neon()
D4x8-neon.c334 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min); in xnn_q8_gemm_ukernel_4x8__neon()
335 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min); in xnn_q8_gemm_ukernel_4x8__neon()
/external/webp/src/dsp/
Ddec_neon.c681 const uint8x16_t a_max = vmaxq_u8(a_p1_p0, a_q1_q0); in NeedsHev_NEON()
698 const uint8x16_t max1 = vmaxq_u8(a_p3_p2, a_p2_p1); in NeedsFilter2_NEON()
699 const uint8x16_t max2 = vmaxq_u8(a_p1_p0, a_q3_q2); in NeedsFilter2_NEON()
700 const uint8x16_t max3 = vmaxq_u8(a_q2_q1, a_q1_q0); in NeedsFilter2_NEON()
701 const uint8x16_t max12 = vmaxq_u8(max1, max2); in NeedsFilter2_NEON()
702 const uint8x16_t max123 = vmaxq_u8(max12, max3); in NeedsFilter2_NEON()
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h361 vmaxq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); in v128_max_u8()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dlegacy_optimized_ops.h4014 max16_0 = vmaxq_u8(max16_0, vld1q_u8(input_data_ptr + c + 0)); in Softmax()
4015 max16_1 = vmaxq_u8(max16_1, vld1q_u8(input_data_ptr + c + 16)); in Softmax()
4017 uint8x16_t max16 = vmaxq_u8(max16_0, max16_1); in Softmax()
4019 max16 = vmaxq_u8(max16, vld1q_u8(input_data_ptr + c)); in Softmax()
Ddepthwiseconv_uint8_transitional.h3251 return vmaxq_u8(a, b);
4355 return vmaxq_u8(a, b);
Doptimized_ops.h3789 acc_reg = vmaxq_u8(acc_reg, input_reg); in MaxPool()
3814 a = vmaxq_u8(a, vdupq_n_u8(params.quantized_activation_min)); in MaxPool()
/external/psimd/include/
Dpsimd.h815 return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b); in psimd_max_u8()
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c4322 return vmaxq_u8(a, b); in test_vmaxq_u8()
Darm_neon_intrinsics.c8606 return vmaxq_u8(a, b); in test_vmaxq_u8()

12