| /external/XNNPACK/src/f32-gemm/gen/ | 
| D | 1x8-minmax-neonfma-lane-ld64.c | 48       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_lane_ld64()  local 62       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_lane_ld64()  local
  | 
| D | 1x8-minmax-neon-lane-ld64.c | 48       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemm_minmax_ukernel_1x8__neon_lane_ld64()  local 62       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemm_minmax_ukernel_1x8__neon_lane_ld64()  local
  | 
| D | 1x8-minmax-neon-dup-ld64.c | 48       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64()  local 64       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64()  local
  | 
| D | 1x8-minmax-neonfma-dup-ld64.c | 48       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64()  local 64       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64()  local
  | 
| D | 1x8-wasmsimd-splat.c | 48       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_ukernel_1x8__wasmsimd_splat()  local 85         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_ukernel_1x8__wasmsimd_splat()  local
  | 
| D | 1x8-wasmrelaxedsimd-fma-splat.c | 48       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local 85         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local
  | 
| D | 1x8-relu-wasmsimd-splat.c | 48       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat()  local 85         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat()  local
  | 
| D | 1x8-relu-wasmrelaxedsimd-fma-splat.c | 48       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local 85         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local
  | 
| D | 1x8-minmax-wasmrelaxedsimd-fma-splat.c | 50       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local 87         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_splat()  local
  | 
| D | 1x8-minmax-wasmsimd-arm-splat.c | 50       const v128_t va0 = wasm_v128_load(a0);  in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat()  local 87         const v128_t va0 = wasm_v128_load32_splat(a0);  in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat()  local
  | 
| /external/XNNPACK/src/f32-gemm/gen-inc/ | 
| D | 1x8inc-minmax-neon-lane-ld64.c | 50       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemminc_minmax_ukernel_1x8__neon_lane_ld64()  local 64       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemminc_minmax_ukernel_1x8__neon_lane_ld64()  local
  | 
| D | 1x8inc-minmax-neonfma-lane-ld64.c | 50       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_lane_ld64()  local 64       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_lane_ld64()  local
  | 
| D | 1x8inc-minmax-neon-dup-ld64.c | 50       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64()  local 66       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64()  local
  | 
| D | 1x8inc-minmax-neonfma-dup-ld64.c | 50       const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64()  local 66       const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;  in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64()  local
  | 
| /external/XNNPACK/src/f16-gemm/gen/ | 
| D | 1x8-minmax-neonfp16arith-ld64.c | 49       const float16x4_t va0 = vld1_f16(a0); a0 += 4;  in xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64()  local 92         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;  in xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64()  local
  | 
| /external/XNNPACK/src/f32-igemm/gen/ | 
| D | 1x8-minmax-neon-lane-ld64.c | 61         const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_igemm_minmax_ukernel_1x8__neon_lane_ld64()  local 76         const float32x4_t va0 = vld1q_dup_f32(a0);  in xnn_f32_igemm_minmax_ukernel_1x8__neon_lane_ld64()  local
  | 
| D | 1x8-minmax-neonfma-lane-ld64.c | 61         const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_lane_ld64()  local 76         const float32x4_t va0 = vld1q_dup_f32(a0);  in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_lane_ld64()  local
  | 
| D | 1x8-minmax-neonfma-dup-ld64.c | 61         const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64()  local 78         const float32x4_t va0 = vld1q_dup_f32(a0);  in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64()  local
  | 
| D | 1x8-minmax-neon-dup-ld64.c | 61         const float32x2_t va0 = vld1_f32(a0); a0 += 2;  in xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64()  local 78         const float32x4_t va0 = vld1q_dup_f32(a0);  in xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64()  local
  | 
| /external/XNNPACK/src/f16-gemm/gen-inc/ | 
| D | 1x8inc-minmax-neonfp16arith-ld64.c | 51       const float16x4_t va0 = vld1_f16(a0); a0 += 4;  in xnn_f16_gemminc_minmax_ukernel_1x8__neonfp16arith_ld64()  local 94         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;  in xnn_f16_gemminc_minmax_ukernel_1x8__neonfp16arith_ld64()  local
  | 
| /external/XNNPACK/src/f16-igemm/gen/ | 
| D | 1x8-minmax-neonfp16arith-ld64.c | 60         const float16x4_t va0 = vld1_f16(a0); a0 += 4;  in xnn_f16_igemm_minmax_ukernel_1x8__neonfp16arith_ld64()  local 101           const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;  in xnn_f16_igemm_minmax_ukernel_1x8__neonfp16arith_ld64()  local
  | 
| /external/XNNPACK/src/qs8-gemm/gen/ | 
| D | 1x4c2-xw-minmax-fp32-xop.c | 56       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__xop()  local 81       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__xop()  local
  | 
| D | 1x4c2-xw-minmax-fp32-avx.c | 51       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__avx()  local 76       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__avx()  local
  | 
| D | 1x4c2-xw-minmax-fp32-sse41.c | 51       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse41()  local 76       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse41()  local
  | 
| D | 1x4c2-xw-minmax-fp32-sse2.c | 51       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse2()  local 76       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_1x4c2__sse2()  local
  |