Home
last modified time | relevance | path

Searched refs:vst1q_lane_u32 (Results 1 – 12 of 12) sorted by relevance

/external/XNNPACK/src/x32-pad/
Dx2-neon.c50 vst1q_lane_u32(y0, vc, 0); y0 += 1; in xnn_x32_pad_x2__neon()
51 vst1q_lane_u32(y1, vc, 0); y1 += 1; in xnn_x32_pad_x2__neon()
88 vst1q_lane_u32(y0, vc, 0); in xnn_x32_pad_x2__neon()
89 vst1q_lane_u32(y1, vc, 0); in xnn_x32_pad_x2__neon()
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve8_neon.c390 vst1q_lane_u32((uint32_t *)(dst + 0 * dst_stride), d0123, 0); in vpx_convolve8_avg_horiz_neon()
391 vst1q_lane_u32((uint32_t *)(dst + 1 * dst_stride), d0123, 2); in vpx_convolve8_avg_horiz_neon()
392 vst1q_lane_u32((uint32_t *)(dst + 2 * dst_stride), d0123, 1); in vpx_convolve8_avg_horiz_neon()
393 vst1q_lane_u32((uint32_t *)(dst + 3 * dst_stride), d0123, 3); in vpx_convolve8_avg_horiz_neon()
477 vst1q_lane_u32((uint32_t *)dst, d0415, 0); in vpx_convolve8_avg_horiz_neon()
479 vst1q_lane_u32((uint32_t *)dst, d0415, 2); in vpx_convolve8_avg_horiz_neon()
481 vst1q_lane_u32((uint32_t *)dst, d2637, 0); in vpx_convolve8_avg_horiz_neon()
483 vst1q_lane_u32((uint32_t *)dst, d2637, 2); in vpx_convolve8_avg_horiz_neon()
485 vst1q_lane_u32((uint32_t *)dst, d0415, 1); in vpx_convolve8_avg_horiz_neon()
487 vst1q_lane_u32((uint32_t *)dst, d0415, 3); in vpx_convolve8_avg_horiz_neon()
[all …]
/external/XNNPACK/src/q8-igemm/
D8x8-neon.c615vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2… in xnn_q8_igemm_ukernel_8x8__neon()
616vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0… in xnn_q8_igemm_ukernel_8x8__neon()
617vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2… in xnn_q8_igemm_ukernel_8x8__neon()
618vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0… in xnn_q8_igemm_ukernel_8x8__neon()
619vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2… in xnn_q8_igemm_ukernel_8x8__neon()
620vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0… in xnn_q8_igemm_ukernel_8x8__neon()
621vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2… in xnn_q8_igemm_ukernel_8x8__neon()
622vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0… in xnn_q8_igemm_ukernel_8x8__neon()
D4x8-neon.c386vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2… in xnn_q8_igemm_ukernel_4x8__neon()
387vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0… in xnn_q8_igemm_ukernel_4x8__neon()
388vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2… in xnn_q8_igemm_ukernel_4x8__neon()
389vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0… in xnn_q8_igemm_ukernel_4x8__neon()
/external/XNNPACK/src/q8-gemm/
D8x8-neon.c577vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0… in xnn_q8_gemm_ukernel_8x8__neon()
578vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2… in xnn_q8_gemm_ukernel_8x8__neon()
579vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0… in xnn_q8_gemm_ukernel_8x8__neon()
580vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2… in xnn_q8_gemm_ukernel_8x8__neon()
581vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0… in xnn_q8_gemm_ukernel_8x8__neon()
582vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2… in xnn_q8_gemm_ukernel_8x8__neon()
583vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0… in xnn_q8_gemm_ukernel_8x8__neon()
584vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2… in xnn_q8_gemm_ukernel_8x8__neon()
D4x8-neon.c353vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0… in xnn_q8_gemm_ukernel_4x8__neon()
354vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2… in xnn_q8_gemm_ukernel_4x8__neon()
355vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0… in xnn_q8_gemm_ukernel_4x8__neon()
356vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2… in xnn_q8_gemm_ukernel_4x8__neon()
/external/webp/src/dsp/
Dlossless_neon.c270 vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
294 vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
357 vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
388 vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
/external/clang/test/CodeGen/
Daarch64-neon-ldst-one.c5302 vst1q_lane_u32(a, b, 3); in test_vst1q_lane_u32()
Darm_neon_intrinsics.c18201 vst1q_lane_u32(a, b, 3); in test_vst1q_lane_u32()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Ddepthwiseconv_uint8_3x3_filter.h48 vst1q_lane_u32(reinterpret_cast<uint32_t*>(dst), reg, lane_num)
Ddepthwiseconv_uint8_transitional.h68 vst1q_lane_u32(reinterpret_cast<uint32_t*>(dst), reg, lane_num)
/external/neon_2_sse/
DNEON_2_SSE.h9691 _NEON2SSESTORAGE void vst1q_lane_u32(__transfersize(1) uint32_t * ptr, uint32x4_t val, __constrange…
9692 #define vst1q_lane_u32(ptr, val, lane) *(ptr) = (uint32_t) _MM_EXTRACT_EPI32 (val, lane) macro
11627 vst1q_lane_u32(ptr, val->val[0], lane);
11628 vst1q_lane_u32((ptr + 1), val->val[1], lane);
11716 vst1q_lane_u32((ptr + 2), val->val[2], lane);