| /external/XNNPACK/src/f32-vrelu/gen/ | 
| D | vrelu-wasm-x8.c | 35     float vacc4 = x[4];  in xnn_f32_vrelu_ukernel__wasm_x8()  local
  | 
| D | vrelu-scalar-x8.c | 35     uint32_t vacc4 = x[4];  in xnn_f32_vrelu_ukernel__scalar_x8()  local
  | 
| /external/XNNPACK/src/qs8-vcvt/gen/ | 
| D | vcvt-armsimd32-x8.c | 43     int32_t vacc4 = __smlawb(vmultiplier, vx46, vbias);  in xnn_qs8_vcvt_ukernel__armsimd32_x8()  local
  | 
| D | vcvt-sse2-x32.c | 57     __m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);  in xnn_qs8_vcvt_ukernel__sse2_x32()  local
  | 
| /external/XNNPACK/src/qu8-vcvt/gen/ | 
| D | vcvt-armsimd32-x8.c | 43     int32_t vacc4 = __smlawb(vmultiplier, vx46, vbias);  in xnn_qu8_vcvt_ukernel__armsimd32_x8()  local
  | 
| D | vcvt-sse2-x32.c | 56     __m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);  in xnn_qu8_vcvt_ukernel__sse2_x32()  local
  | 
| /external/XNNPACK/src/qs8-vlrelu/gen/ | 
| D | vlrelu-armsimd32-x8.c | 53     int32_t vacc4 = __smlabb(vmultiplier46, vx46, vbias);  in xnn_qs8_vlrelu_ukernel__armsimd32_x8()  local
  | 
| /external/XNNPACK/src/qu8-vlrelu/gen/ | 
| D | vlrelu-armsimd32-x8.c | 53     int32_t vacc4 = __smlabb(vmultiplier46, vx46, vbias);  in xnn_qu8_vlrelu_ukernel__armsimd32_x8()  local
  | 
| /external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ | 
| D | neonfma-rr1-p5-x20-acc5.c | 43   float32x4_t vacc4 = vmovq_n_f32(0.0f);  in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20_acc5()  local
  | 
| D | sse2-rr2-p5-x20-acc5.c | 44   __m128 vacc4 = _mm_setzero_ps();  in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc5()  local
  | 
| D | wasmsimd-rr2-p5-x20-acc5.c | 44   v128_t vacc4 = vacc0;  in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20_acc5()  local
  | 
| D | neon-rr2-p5-x20-acc5.c | 44   float32x4_t vacc4 = vmovq_n_f32(0.0f);  in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc5()  local
  | 
| D | avx512f-rr1-p5-scalef-x160-acc5.c | 42   __m512 vacc4 = _mm512_setzero_ps();  in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160_acc5()  local
  | 
| D | avx2-rr1-p5-x80-acc5.c | 42   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80_acc5()  local
  | 
| D | avx512f-rr1-p5-scalef-x192-acc6.c | 42   __m512 vacc4 = _mm512_setzero_ps();  in xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc6()  local
  | 
| D | neon-rr2-lut64-p2-x20-acc5.c | 43   float32x4_t vacc4 = vmovq_n_f32(0.0f);  in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc5()  local
  | 
| /external/XNNPACK/src/f16-raddstoreexpminusmax/gen/ | 
| D | avx2-rr1-p2-x40-acc5.c | 42   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc5()  local
  | 
| D | neonfp16arith-rr2-p2-x40-acc5.c | 43   float16x8_t vacc4 = vmovq_n_f16(0.0f);  in xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40_acc5()  local
  | 
| D | avx2-rr1-p2-x80-acc5.c | 42   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80_acc5()  local
  | 
| D | avx2-rr1-p2-x96-acc6.c | 42   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc6()  local
  | 
| /external/XNNPACK/src/f32-spmm/gen/ | 
| D | 8x1-minmax-scalar-pipelined.c | 55       float vacc4 = vw;  in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined()  local
  | 
| /external/XNNPACK/src/f32-raddexpminusmax/gen/ | 
| D | avx512f-p5-scalef-x160-acc5.c | 43   __m512 vacc4 = _mm512_setzero_ps();  in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5()  local
  | 
| D | avx2-p5-x80-acc5.c | 46   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5()  local
  | 
| D | avx512f-p5-scalef-x192-acc6.c | 43   __m512 vacc4 = _mm512_setzero_ps();  in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6()  local
  | 
| D | avx2-p5-x96-acc6.c | 46   __m256 vacc4 = _mm256_setzero_ps();  in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc6()  local
  |