| /external/XNNPACK/src/x32-transpose/gen/ |
| D | 4x4-reuse-multi-zip-neon.c | 54 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_multi_zip_neon() local 72 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_zip_neon() local
|
| D | 4x4-multi-multi-zip-neon.c | 58 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_multi_zip_neon() local 76 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_zip_neon() local
|
| D | 4x4-reuse-dec-zip-neon.c | 46 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_dec_zip_neon() local 75 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_dec_zip_neon() local
|
| D | 4x4-reuse-multi-sse2.c | 54 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_sse2() local 85 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_multi_sse2() local
|
| D | 4x4-multi-dec-zip-neon.c | 50 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_dec_zip_neon() local 79 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_dec_zip_neon() local
|
| D | 4x4-reuse-switch-zip-neon.c | 45 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_switch_zip_neon() local 73 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_zip_neon() local
|
| D | 4x4-reuse-mov-zip-neon.c | 46 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_stride); in xnn_x32_transpose_ukernel__4x4_reuse_mov_zip_neon() local 78 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_zip_neon() local
|
| D | 4x4-multi-switch-zip-neon.c | 49 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_switch_zip_neon() local 77 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_zip_neon() local
|
| D | 4x4-multi-mov-zip-neon.c | 50 const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); in xnn_x32_transpose_ukernel__4x4_multi_mov_zip_neon() local 82 const uint32x4_t v2_0 = vld1q_u32(i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_zip_neon() local
|
| D | 4x4-multi-multi-sse2.c | 58 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_sse2() local 89 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_multi_sse2() local
|
| D | 4x4-reuse-mov-sse2.c | 46 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_sse2() local 87 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_mov_sse2() local
|
| D | 4x4-reuse-switch-sse2.c | 45 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_sse2() local 85 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_reuse_switch_sse2() local
|
| D | 4x4-multi-switch-sse2.c | 49 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_sse2() local 89 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_switch_sse2() local
|
| D | 4x4-multi-mov-sse2.c | 50 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_sse2() local 91 const __m128i v2_0 = _mm_loadu_si128((const __m128i*) i0); in xnn_x32_transpose_ukernel__4x4_multi_mov_sse2() local
|
| /external/XNNPACK/src/x16-transpose/gen/ |
| D | 8x8-reuse-dec-zip-neon.c | 55 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_dec_zip_neon() local 136 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_dec_zip_neon() local
|
| D | 8x8-reuse-switch-zip-neon.c | 54 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_switch_zip_neon() local 126 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_switch_zip_neon() local
|
| D | 8x8-multi-dec-zip-neon.c | 63 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_dec_zip_neon() local 138 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_dec_zip_neon() local
|
| D | 8x8-reuse-multi-zip-neon.c | 79 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_multi_zip_neon() local 137 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_multi_zip_neon() local
|
| D | 8x8-reuse-mov-zip-neon.c | 55 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_mov_zip_neon() local 143 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_reuse_mov_zip_neon() local
|
| D | 8x8-multi-switch-zip-neon.c | 62 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_switch_zip_neon() local 128 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_switch_zip_neon() local
|
| D | 8x8-multi-mov-zip-neon.c | 63 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_mov_zip_neon() local 145 const uint16x8x2_t v2_0 = vzipq_u16(v3_0, v3_4); in xnn_x16_transpose_ukernel__8x8_multi_mov_zip_neon() local
|
| D | 8x8-multi-switch-sse2.c | 70 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_multi_switch_sse2() local 156 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_multi_switch_sse2() local
|
| D | 8x8-reuse-switch-sse2.c | 62 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_switch_sse2() local 154 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_switch_sse2() local
|
| D | 8x8-reuse-multi-sse2.c | 87 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_multi_sse2() local 166 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_multi_sse2() local
|
| D | 8x8-reuse-mov-sse2.c | 63 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_mov_sse2() local 164 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1); in xnn_x16_transpose_ukernel__8x8_reuse_mov_sse2() local
|