Home
last modified time | relevance | path

Searched defs:v2_0 (Results 1 – 25 of 32) sorted by relevance

12

/external/XNNPACK/src/x32-transpose/gen/
D4x4-reuse-multi-zip-neon.c54 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_multi_zip_neon() local
72 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_zip_neon() local
D4x4-multi-multi-zip-neon.c58 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_multi_zip_neon() local
76 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_zip_neon() local
D4x4-reuse-dec-zip-neon.c46 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_dec_zip_neon() local
75 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_dec_zip_neon() local
D4x4-reuse-multi-sse2.c54 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_sse2() local
85 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_sse2() local
D4x4-multi-dec-zip-neon.c50 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_dec_zip_neon() local
79 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_dec_zip_neon() local
D4x4-reuse-switch-zip-neon.c45 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_switch_zip_neon() local
73 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_zip_neon() local
D4x4-reuse-mov-zip-neon.c46 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_mov_zip_neon() local
78 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_zip_neon() local
D4x4-multi-switch-zip-neon.c49 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_switch_zip_neon() local
77 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_zip_neon() local
D4x4-multi-mov-zip-neon.c50 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_mov_zip_neon() local
82 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_zip_neon() local
D4x4-multi-multi-sse2.c58 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_sse2() local
89 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_sse2() local
D4x4-reuse-mov-sse2.c46 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_sse2() local
87 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_sse2() local
D4x4-reuse-switch-sse2.c45 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_sse2() local
85 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_sse2() local
D4x4-multi-switch-sse2.c49 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_sse2() local
89 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_sse2() local
D4x4-multi-mov-sse2.c50 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_sse2() local
91 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_sse2() local
/external/XNNPACK/src/x16-transpose/gen/
D8x8-reuse-dec-zip-neon.c55 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_dec_zip_neon() local
136 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_dec_zip_neon() local
D8x8-reuse-switch-zip-neon.c54 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_switch_zip_neon() local
126 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_switch_zip_neon() local
D8x8-multi-dec-zip-neon.c63 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_dec_zip_neon() local
138 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_dec_zip_neon() local
D8x8-reuse-multi-zip-neon.c79 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_multi_zip_neon() local
137 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_multi_zip_neon() local
D8x8-reuse-mov-zip-neon.c55 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_mov_zip_neon() local
143 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_mov_zip_neon() local
D8x8-multi-switch-zip-neon.c62 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_switch_zip_neon() local
128 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_switch_zip_neon() local
D8x8-multi-mov-zip-neon.c63 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_mov_zip_neon() local
145 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_mov_zip_neon() local
D8x8-multi-switch-sse2.c70 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_multi_switch_sse2() local
156 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_multi_switch_sse2() local
D8x8-reuse-switch-sse2.c62 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_switch_sse2() local
154 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_switch_sse2() local
D8x8-reuse-multi-sse2.c87 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_multi_sse2() local
166 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_multi_sse2() local
D8x8-reuse-mov-sse2.c63 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_mov_sse2() local
164 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_mov_sse2() local

12