| /external/XNNPACK/src/f32-gemm/gen/ | 
| D | 6x2-minmax-neon-lane-ld64.c | 86       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local 111       const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local
  | 
| D | 5x8-minmax-neon-lane-ld64.c | 84       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemm_minmax_ukernel_5x8__neon_lane_ld64()  local 118       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemm_minmax_ukernel_5x8__neon_lane_ld64()  local
  | 
| D | 5x8-minmax-neonfma-lane-ld64.c | 84       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemm_minmax_ukernel_5x8__neonfma_lane_ld64()  local 118       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemm_minmax_ukernel_5x8__neonfma_lane_ld64()  local
  | 
| D | 5x8s4-wasmrelaxedsimd-fma.c | 88       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_ukernel_5x8s4__wasmrelaxedsimd_fma()  local 179       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_ukernel_5x8s4__wasmrelaxedsimd_fma()  local
  | 
| D | 6x2-minmax-neonfma-lane-ld64.c | 86       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemm_minmax_ukernel_6x2__neonfma_lane_ld64()  local 141       const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1;  in xnn_f32_gemm_minmax_ukernel_6x2__neonfma_lane_ld64()  local
  | 
| D | 5x8s4-wasmsimd.c | 88       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_ukernel_5x8s4__wasmsimd()  local 179       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_ukernel_5x8s4__wasmsimd()  local
  | 
| D | 5x8s4-minmax-sse.c | 88       __m128 va4 = _mm_loadu_ps(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__sse()  local 179       __m128 va4 = _mm_loadu_ps(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__sse()  local
  | 
| D | 6x8-minmax-neonfma-lane-ld64.c | 92       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_lane_ld64()  local 131       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_lane_ld64()  local
  | 
| D | 5x8s4-minmax-wasmrelaxedsimd.c | 90       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd()  local 181       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd()  local
  | 
| D | 5x8s4-minmax-wasmsimd-arm.c | 90       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_arm()  local 181       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_arm()  local
  | 
| D | 5x8s4-relu-wasmsimd.c | 88       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_relu_ukernel_5x8s4__wasmsimd()  local 179       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_relu_ukernel_5x8s4__wasmsimd()  local
  | 
| D | 5x8s4-minmax-wasmsimd-x86.c | 90       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_x86()  local 181       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmsimd_x86()  local
  | 
| D | 5x8s4-minmax-wasmrelaxedsimd-fma.c | 90       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma()  local 181       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemm_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma()  local
  | 
| /external/XNNPACK/src/f32-igemm/gen/ | 
| D | 6x2-minmax-neon-lane-ld64.c | 114         const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64()  local 139         const float32x2_t va4 = vld1_dup_f32(a4);  in xnn_f32_igemm_minmax_ukernel_6x2__neon_lane_ld64()  local
  | 
| D | 5x8s4-wasmrelaxedsimd-fma.c | 113         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_ukernel_5x8s4__wasmrelaxedsimd_fma()  local 204         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_ukernel_5x8s4__wasmrelaxedsimd_fma()  local
  | 
| D | 5x8s4-relu-wasmsimd.c | 113         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_relu_ukernel_5x8s4__wasmsimd()  local 204         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_relu_ukernel_5x8s4__wasmsimd()  local
  | 
| D | 6x2-minmax-neonfma-lane-ld64.c | 114         const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64()  local 169         const float32x2_t va4 = vld1_dup_f32(a4);  in xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64()  local
  | 
| D | 5x8s4-wasmsimd.c | 113         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_ukernel_5x8s4__wasmsimd()  local 204         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_ukernel_5x8s4__wasmsimd()  local
  | 
| D | 5x8s4-relu-wasmrelaxedsimd-fma.c | 113         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma()  local 204         v128_t va4 = wasm_v128_load(a4);  in xnn_f32_igemm_relu_ukernel_5x8s4__wasmrelaxedsimd_fma()  local
  | 
| /external/XNNPACK/src/f32-gemm/gen-inc/ | 
| D | 5x8inc-minmax-neonfma-lane-ld64.c | 86       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemminc_minmax_ukernel_5x8__neonfma_lane_ld64()  local 120       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemminc_minmax_ukernel_5x8__neonfma_lane_ld64()  local
  | 
| D | 5x8inc-minmax-neon-lane-ld64.c | 86       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemminc_minmax_ukernel_5x8__neon_lane_ld64()  local 120       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemminc_minmax_ukernel_5x8__neon_lane_ld64()  local
  | 
| D | 5x8s4inc-minmax-wasmsimd-x86.c | 92       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_x86()  local 183       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_x86()  local
  | 
| D | 5x8s4inc-minmax-wasmrelaxedsimd-fma.c | 92       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma()  local 183       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmrelaxedsimd_fma()  local
  | 
| D | 6x8inc-minmax-neonfma-lane-ld64.c | 94       const float32x2_t va4 = vld1_f32(a4); a4 += 2;  in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_lane_ld64()  local 133       const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;  in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_lane_ld64()  local
  | 
| D | 5x8s4inc-minmax-wasmsimd-arm.c | 92       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_arm()  local 183       v128_t va4 = wasm_v128_load(a4);  in xnn_f32_gemminc_minmax_ukernel_5x8s4__wasmsimd_arm()  local
  |