| /external/XNNPACK/src/f32-gemm/gen/ | 
| D | 4x2-minmax-neon-lane-ld64.c | 71       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemm_minmax_ukernel_4x2__neon_lane_ld64()  local 90       const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;  in xnn_f32_gemm_minmax_ukernel_4x2__neon_lane_ld64()  local
  | 
| D | 4x2-minmax-neonfma-lane-ld64.c | 71       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local 112       const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;  in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local
  | 
| D | 6x2-minmax-neon-lane-ld64.c | 85       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local 110       const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local
  | 
| D | 4x8-minmax-neonfma-lane-ld64.c | 75       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local 104       const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;  in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local
  | 
| D | 4x8-minmax-neon-lane-ld64.c | 75       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld64()  local 104       const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;  in xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld64()  local
  | 
| D | 4x2c4-wasmsimd.c | 78       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x2c4__wasmsimd()  local 101       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x2c4__wasmsimd()  local
  | 
| D | 4x2c4-wasmrelaxedsimd-fma.c | 78       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x2c4__wasmrelaxedsimd_fma()  local 101       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x2c4__wasmrelaxedsimd_fma()  local
  | 
| D | 4x2c4-minmax-sse.c | 78       const __m128 va3 = _mm_loadu_ps(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__sse()  local 101       const __m128 va3 = _mm_loadu_ps(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__sse()  local
  | 
| D | 4x2c4-relu-wasmsimd.c | 78       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_relu_ukernel_4x2c4__wasmsimd()  local 101       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_relu_ukernel_4x2c4__wasmsimd()  local
  | 
| D | 4x2c4-minmax-wasmsimd-x86.c | 80       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_x86()  local 103       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_x86()  local
  | 
| D | 4x2c4-relu-wasmrelaxedsimd-fma.c | 78       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_relu_ukernel_4x2c4__wasmrelaxedsimd_fma()  local 101       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_relu_ukernel_4x2c4__wasmrelaxedsimd_fma()  local
  | 
| D | 4x2c4-minmax-wasmrelaxedsimd.c | 80       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd()  local 103       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd()  local
  | 
| D | 4x2c4-minmax-wasmrelaxedsimd-fma.c | 80       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd_fma()  local 103       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmrelaxedsimd_fma()  local
  | 
| D | 4x2c4-minmax-wasmsimd-arm.c | 80       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_arm()  local 103       const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_minmax_ukernel_4x2c4__wasmsimd_arm()  local
  | 
| D | 4x8s4-wasmrelaxedsimd-fma.c | 78       v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x8s4__wasmrelaxedsimd_fma()  local 156       v128_t va3 = wasm_v128_load(a3);  in xnn_f32_gemm_ukernel_4x8s4__wasmrelaxedsimd_fma()  local
  | 
| /external/XNNPACK/src/f32-igemm/gen/ | 
| D | 4x2-minmax-neon-lane-ld64.c | 93         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x2__neon_lane_ld64()  local 112         const float32x2_t va3 = vld1_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x2__neon_lane_ld64()  local
  | 
| D | 4x4-minmax-neon-lane-ld64.c | 93         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x4__neon_lane_ld64()  local 113         const float32x4_t va3 = vld1q_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x4__neon_lane_ld64()  local
  | 
| D | 4x4-minmax-neonfma-lane-ld64.c | 93         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x4__neonfma_lane_ld64()  local 113         const float32x4_t va3 = vld1q_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x4__neonfma_lane_ld64()  local
  | 
| D | 4x2-minmax-neonfma-lane-ld64.c | 93         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local 134         const float32x2_t va3 = vld1_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local
  | 
| D | 4x8-minmax-neon-lane-ld64.c | 97         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld64()  local 127         const float32x4_t va3 = vld1q_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x8__neon_lane_ld64()  local
  | 
| D | 6x2-minmax-neon-lane-ld64.c | 113         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64()  local 138         const float32x2_t va3 = vld1_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64()  local
  | 
| D | 4x8-minmax-neonfma-lane-ld64.c | 97         const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local 127         const float32x4_t va3 = vld1q_dup_f32(a3);  in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local
  | 
| D | 4x2c4-relu-wasmsimd.c | 100         const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_igemm_relu_ukernel_4x2c4__wasmsimd()  local 120         const v128_t va3 = wasm_v128_load(a3);  in xnn_f32_igemm_relu_ukernel_4x2c4__wasmsimd()  local
  | 
| /external/XNNPACK/src/f32-gemm/gen-inc/ | 
| D | 4x8inc-minmax-neonfma-lane-ld64.c | 77       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_lane_ld64()  local 106       const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;  in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_lane_ld64()  local
  | 
| D | 4x8inc-minmax-neon-lane-ld64.c | 77       const float32x2_t va3 = vld1_f32(a3); a3 += 2;  in xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld64()  local 106       const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;  in xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld64()  local
  |