| /external/XNNPACK/src/f32-gemm/gen/ | 
| D | 4x2-minmax-neon-lane-ld64.c | 70       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemm_minmax_ukernel_4x2__neon_lane_ld64()  local 89       const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;  in xnn_f32_gemm_minmax_ukernel_4x2__neon_lane_ld64()  local
  | 
| D | 4x2-minmax-neonfma-lane-ld64.c | 70       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local 111       const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;  in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local
  | 
| D | 6x2-minmax-neon-lane-ld64.c | 84       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local 109       const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;  in xnn_f32_gemm_minmax_ukernel_6x2__neon_lane_ld64()  local
  | 
| D | 3x8s4-wasmrelaxedsimd-fma.c | 68       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_3x8s4__wasmrelaxedsimd_fma()  local 133       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_3x8s4__wasmrelaxedsimd_fma()  local
  | 
| D | 3x8s4-wasmsimd.c | 68       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_3x8s4__wasmsimd()  local 133       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_3x8s4__wasmsimd()  local
  | 
| D | 3x8s4-minmax-sse.c | 68       __m128 va2 = _mm_loadu_ps(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__sse()  local 133       __m128 va2 = _mm_loadu_ps(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__sse()  local
  | 
| D | 3x8s4-relu-wasmsimd.c | 68       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_relu_ukernel_3x8s4__wasmsimd()  local 133       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_relu_ukernel_3x8s4__wasmsimd()  local
  | 
| D | 4x8-minmax-neonfma-lane-ld64.c | 74       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local 103       const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;  in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_lane_ld64()  local
  | 
| D | 3x8s4-minmax-wasmsimd-arm.c | 70       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_arm()  local 135       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_arm()  local
  | 
| D | 4x8-minmax-neon-lane-ld64.c | 74       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld64()  local 103       const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;  in xnn_f32_gemm_minmax_ukernel_4x8__neon_lane_ld64()  local
  | 
| D | 3x8s4-minmax-wasmrelaxedsimd.c | 70       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmrelaxedsimd()  local 135       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmrelaxedsimd()  local
  | 
| D | 3x8s4-relu-wasmrelaxedsimd-fma.c | 68       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_relu_ukernel_3x8s4__wasmrelaxedsimd_fma()  local 133       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_relu_ukernel_3x8s4__wasmrelaxedsimd_fma()  local
  | 
| D | 4x2c4-wasmsimd.c | 76       const v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_4x2c4__wasmsimd()  local 99       const v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_ukernel_4x2c4__wasmsimd()  local
  | 
| D | 3x8s4-minmax-wasmsimd-x86.c | 70       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_x86()  local 135       v128_t va2 = wasm_v128_load(a2);  in xnn_f32_gemm_minmax_ukernel_3x8s4__wasmsimd_x86()  local
  | 
| /external/XNNPACK/src/f32-igemm/gen/ | 
| D | 4x2-minmax-neon-lane-ld64.c | 92         const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_igemm_minmax_ukernel_4x2__neon_lane_ld64()  local 111         const float32x2_t va2 = vld1_dup_f32(a2);  in xnn_f32_igemm_minmax_ukernel_4x2__neon_lane_ld64()  local
  | 
| D | 4x4-minmax-neon-lane-ld64.c | 92         const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_igemm_minmax_ukernel_4x4__neon_lane_ld64()  local 112         const float32x4_t va2 = vld1q_dup_f32(a2);  in xnn_f32_igemm_minmax_ukernel_4x4__neon_lane_ld64()  local
  | 
| D | 4x4-minmax-neonfma-lane-ld64.c | 92         const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_igemm_minmax_ukernel_4x4__neonfma_lane_ld64()  local 112         const float32x4_t va2 = vld1q_dup_f32(a2);  in xnn_f32_igemm_minmax_ukernel_4x4__neonfma_lane_ld64()  local
  | 
| D | 4x2-minmax-neonfma-lane-ld64.c | 92         const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local 133         const float32x2_t va2 = vld1_dup_f32(a2);  in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64()  local
  | 
| /external/clang/test/Preprocessor/ | 
| D | macro_fn_varargs_iso.c | 8 #define va2(a, b, ...) foo2{__VA_ARGS__, b, a}  macro
  | 
| /external/XNNPACK/src/qs8-gemm/gen/ | 
| D | 3x4c2-xw-minmax-fp32-avx.c | 71       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__avx()  local 118       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__avx()  local
  | 
| D | 3x4c2-xw-minmax-fp32-xop.c | 76       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__xop()  local 123       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__xop()  local
  | 
| D | 3x4c2-xw-minmax-fp32-sse41.c | 71       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__sse41()  local 118       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);  in xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c2__sse41()  local
  | 
| /external/XNNPACK/src/f32-gemm/gen-inc/ | 
| D | 4x8inc-minmax-neonfma-lane-ld64.c | 76       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_lane_ld64()  local 105       const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;  in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_lane_ld64()  local
  | 
| D | 3x8s4inc-minmax-sse.c | 70       __m128 va2 = _mm_loadu_ps(a2);  in xnn_f32_gemminc_minmax_ukernel_3x8s4__sse()  local 135       __m128 va2 = _mm_loadu_ps(a2);  in xnn_f32_gemminc_minmax_ukernel_3x8s4__sse()  local
  | 
| D | 4x8inc-minmax-neon-lane-ld64.c | 76       const float32x2_t va2 = vld1_f32(a2); a2 += 2;  in xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld64()  local 105       const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;  in xnn_f32_gemminc_minmax_ukernel_4x8__neon_lane_ld64()  local
  |