Home
last modified time | relevance | path

Searched refs:vacc0x2 (Results 1 – 8 of 8) sorted by relevance

/external/XNNPACK/src/f32-ppmm/gen/
D3x3-scalar.c46 float vacc0x2 = w[2]; in xnn_f32_ppmm_ukernel_3x3__scalar() local
49 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
52 float vacc2x2 = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
73 vacc0x2 += va0 * vb2; in xnn_f32_ppmm_ukernel_3x3__scalar()
87 vacc0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_ppmm_ukernel_3x3__scalar()
98 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_ppmm_ukernel_3x3__scalar()
111 c0[2] = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
131 vacc0x0 = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
D2x4-scalar.c42 float vacc0x2 = w[2]; in xnn_f32_ppmm_ukernel_2x4__scalar() local
46 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
66 vacc0x2 += va0 * vb2; in xnn_f32_ppmm_ukernel_2x4__scalar()
79 vacc0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_ppmm_ukernel_2x4__scalar()
89 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_ppmm_ukernel_2x4__scalar()
101 c0[2] = vacc0x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
118 vacc0x0 = vacc0x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
D4x4-scalar.c50 float vacc0x2 = w[2]; in xnn_f32_ppmm_ukernel_4x4__scalar() local
54 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
58 float vacc2x2 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
62 float vacc3x2 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
88 vacc0x2 += va0 * vb2; in xnn_f32_ppmm_ukernel_4x4__scalar()
109 vacc0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_ppmm_ukernel_4x4__scalar()
127 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_ppmm_ukernel_4x4__scalar()
151 c0[2] = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
176 vacc0x0 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
/external/XNNPACK/src/f32-vmulcaddc/gen/
Dc4-scalar-2x.c55 float vacc0x2 = i0[2]; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() local
71 vacc0x2 = vacc0x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
80 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
89 vacc0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
98 o0[2] = vacc0x2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
Dc4-wasm-2x.c55 float vacc0x2 = i0[2]; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() local
71 vacc0x2 = vacc0x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
80 vacc0x2 = __builtin_wasm_max_f32(vacc0x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
89 vacc0x2 = __builtin_wasm_min_f32(vacc0x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
98 o0[2] = vacc0x2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
/external/XNNPACK/src/f32-spmm/gen/
D8x4-scalar.c54 float vacc0x2 = *w++; in xnn_f32_spmm_ukernel_8x4__scalar() local
55 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
56 float vacc2x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
57 float vacc3x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
58 float vacc4x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
59 float vacc5x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
60 float vacc6x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
61 float vacc7x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar()
102 vacc0x2 += va0 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar()
136 float vout0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()
[all …]
/external/XNNPACK/src/f32-prelu/gen/
Dscalar-2x4.c68 float vacc0x2 = signbit(vi0x2) ? vi0x2 * vw2 : vi0x2; in xnn_f32_prelu_ukernel__scalar_2x4() local
77 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_prelu_ukernel__scalar_2x4()
86 vacc0x2 = math_min_f32(vacc0x2, vmax); in xnn_f32_prelu_ukernel__scalar_2x4()
95 o0[2] = vacc0x2; in xnn_f32_prelu_ukernel__scalar_2x4()
Dwasm-2x4.c68 float vacc0x2 = signbit(vi0x2) ? vi0x2 * vw2 : vi0x2; in xnn_f32_prelu_ukernel__wasm_2x4() local
77 vacc0x2 = __builtin_wasm_max_f32(vacc0x2, vmin); in xnn_f32_prelu_ukernel__wasm_2x4()
86 vacc0x2 = __builtin_wasm_min_f32(vacc0x2, vmax); in xnn_f32_prelu_ukernel__wasm_2x4()
95 o0[2] = vacc0x2; in xnn_f32_prelu_ukernel__wasm_2x4()