/external/XNNPACK/src/f32-relu/gen/ |
D | wasm-x4.c | 33 float vacc2 = x[2]; in xnn_f32_relu_ukernel__wasm_x4() local
|
D | scalar-x4.c | 33 uint32_t vacc2 = x[2]; in xnn_f32_relu_ukernel__scalar_x4() local
|
D | wasm-x8.c | 33 float vacc2 = x[2]; in xnn_f32_relu_ukernel__wasm_x8() local
|
D | scalar-x8.c | 33 uint32_t vacc2 = x[2]; in xnn_f32_relu_ukernel__scalar_x8() local
|
/external/XNNPACK/src/f32-clamp/gen/ |
D | scalar-x4.c | 34 float vacc2 = x[2]; in xnn_f32_clamp_ukernel__scalar_x4() local
|
D | wasm-x4.c | 34 float vacc2 = x[2]; in xnn_f32_clamp_ukernel__wasm_x4() local
|
/external/XNNPACK/src/f32-vlrelu/gen/ |
D | vlrelu-scalar-x4.c | 36 float vacc2 = vx2 * vslope; in xnn_f32_vlrelu_ukernel__scalar_x4() local
|
D | vlrelu-wasm-x4.c | 44 float vacc2 = vnegx2 * vslope; in xnn_f32_vlrelu_ukernel__wasm_x4() local
|
/external/XNNPACK/src/f32-hswish/gen/ |
D | hswish-wasm-x4.c | 44 float vacc2 = vx2 + vthree; in xnn_f32_hswish_ukernel__wasm_x4() local
|
D | hswish-scalar-x4.c | 44 float vacc2 = vx2 + vthree; in xnn_f32_hswish_ukernel__scalar_x4() local
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x1-minmax-scalar-pipelined.c | 53 float vacc2 = vw; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 131 float vacc2 = vw; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local
|
D | 8x1-minmax-scalar.c | 112 float vacc2 = vacc0; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 221 float vacc2 = vacc0; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local
|
D | 4x1-minmax-scalar-pipelined.c | 49 float vacc2 = vw; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local
|
D | 8x2-minmax-scalar.c | 154 float vacc2 = vacc0; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 285 float vacc2 = vacc0; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | neon-p5-x12-acc3.c | 45 float32x4_t vacc2 = vmovq_n_f32(0.0f); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x12_acc3() local
|
D | scalar-p5-x4-acc4.c | 43 float vacc2 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc4() local
|
D | neonfma-p5-x12-acc3.c | 44 float32x4_t vacc2 = vmovq_n_f32(0.0f); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x12_acc3() local
|
D | sse2-p5-x12-acc3.c | 45 __m128 vacc2 = _mm_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x12_acc3() local
|
D | wasmsimd-p5-x12-acc3.c | 45 v128_t vacc2 = vacc0; in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12_acc3() local
|
D | scalar-lut64-p2-x4-acc4.c | 44 float vacc2 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc4() local
|
D | sse2-p5-x16-acc4.c | 45 __m128 vacc2 = _mm_setzero_ps(); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_p5_x16_acc4() local
|
D | neonfma-p5-x16-acc4.c | 44 float32x4_t vacc2 = vmovq_n_f32(0.0f); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_p5_x16_acc4() local
|
D | neon-p5-x16-acc4.c | 45 float32x4_t vacc2 = vmovq_n_f32(0.0f); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x16_acc4() local
|
D | wasmsimd-p5-x16-acc4.c | 45 v128_t vacc2 = vacc0; in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16_acc4() local
|
D | neon-p5-x20-acc5.c | 45 float32x4_t vacc2 = vmovq_n_f32(0.0f); in xnn_f32_raddstoreexpminusmax_ukernel__neon_p5_x20_acc5() local
|