| /external/XNNPACK/src/x32-transposec/gen/ |
| D | 4x4-reuse-multi-sse2.c | 73 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_multi_sse2() local 108 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_multi_sse2() local
|
| D | 4x4-reuse-multi-wasmsimd.c | 70 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_multi_wasmsimd() local 104 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_multi_wasmsimd() local
|
| D | 4x4-multi-multi-sse2.c | 77 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_multi_sse2() local 110 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_multi_sse2() local
|
| D | 4x4-multi-multi-wasmsimd.c | 74 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_multi_wasmsimd() local 106 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_multi_wasmsimd() local
|
| D | 4x4-reuse-mov-wasmsimd.c | 62 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_mov_wasmsimd() local 106 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_mov_wasmsimd() local
|
| D | 4x4-reuse-switch-wasmsimd.c | 61 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_switch_wasmsimd() local 104 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_reuse_switch_wasmsimd() local
|
| D | 4x4-reuse-mov-sse2.c | 65 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_mov_sse2() local 110 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_mov_sse2() local
|
| D | 4x4-reuse-switch-sse2.c | 64 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_switch_sse2() local 108 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_reuse_switch_sse2() local
|
| D | 4x4-multi-switch-wasmsimd.c | 65 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_switch_wasmsimd() local 106 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_switch_wasmsimd() local
|
| D | 4x4-multi-mov-sse2.c | 69 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_mov_sse2() local 112 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_mov_sse2() local
|
| D | 4x4-multi-mov-wasmsimd.c | 66 const v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_mov_wasmsimd() local 108 v128_t v0_3 = wasm_v32x4_shuffle(v1_1, v1_3, 2, 6, 3, 7); in xnn_x32_transposec_ukernel__4x4_multi_mov_wasmsimd() local
|
| D | 4x4-multi-switch-sse2.c | 68 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_switch_sse2() local 110 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_3); in xnn_x32_transposec_ukernel__4x4_multi_switch_sse2() local
|
| /external/XNNPACK/src/x8-transposec/gen/ |
| D | 8x8-reuse-dec-zip-neon.c | 67 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() local 148 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() local
|
| D | 8x8-multi-switch-zip-neon.c | 74 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_multi_switch_zip_neon() local 140 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_multi_switch_zip_neon() local
|
| D | 8x8-reuse-multi-zip-neon.c | 91 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_multi_zip_neon() local 149 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_multi_zip_neon() local
|
| D | 8x8-multi-dec-zip-neon.c | 75 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() local 150 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() local
|
| D | 8x8-reuse-mov-zip-neon.c | 67 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() local 155 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() local
|
| D | 8x8-reuse-switch-zip-neon.c | 66 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_switch_zip_neon() local 138 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]); in xnn_x8_transposec_ukernel__8x8_reuse_switch_zip_neon() local
|
| /external/XNNPACK/src/x16-transposec/gen/ |
| D | 8x8-reuse-dec-zip-neon.c | 67 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() local 148 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() local
|
| D | 8x8-multi-switch-zip-neon.c | 74 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_switch_zip_neon() local 140 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_switch_zip_neon() local
|
| D | 8x8-reuse-multi-zip-neon.c | 91 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_multi_zip_neon() local 149 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_multi_zip_neon() local
|
| D | 8x8-multi-dec-zip-neon.c | 75 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() local 150 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() local
|
| D | 8x8-reuse-switch-zip-neon.c | 66 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_switch_zip_neon() local 138 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_switch_zip_neon() local
|
| D | 8x8-reuse-mov-zip-neon.c | 67 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() local 155 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() local
|
| D | 8x8-multi-mov-zip-neon.c | 75 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() local 157 const uint16x8x2_t v0_3 = vzipq_u16(v1_1.val[1], v1_3.val[1]); in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() local
|