Home
last modified time | relevance | path

Searched refs:vacc1x2 (Results 1 – 8 of 8) sorted by relevance

/external/XNNPACK/src/f32-ppmm/gen/
D2x4-scalar.c46 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_2x4__scalar() local
67 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_2x4__scalar()
80 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_2x4__scalar()
90 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_2x4__scalar()
97 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
117 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
D3x3-scalar.c49 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar() local
74 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_3x3__scalar()
88 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_3x3__scalar()
99 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_3x3__scalar()
108 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
130 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
D4x4-scalar.c54 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar() local
89 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_4x4__scalar()
110 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_4x4__scalar()
128 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_4x4__scalar()
147 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
175 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
/external/XNNPACK/src/f32-vmulcaddc/gen/
Dc4-scalar-2x.c60 float vacc1x2 = i1[2]; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() local
75 vacc1x2 = vacc1x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
84 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
93 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
103 o1[2] = vacc1x2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
Dc4-wasm-2x.c60 float vacc1x2 = i1[2]; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() local
75 vacc1x2 = vacc1x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
84 vacc1x2 = __builtin_wasm_max_f32(vacc1x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
93 vacc1x2 = __builtin_wasm_min_f32(vacc1x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
103 o1[2] = vacc1x2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
/external/XNNPACK/src/f32-prelu/gen/
Dscalar-2x4.c72 float vacc1x2 = signbit(vi1x2) ? vi1x2 * vw2 : vi1x2; in xnn_f32_prelu_ukernel__scalar_2x4() local
81 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_prelu_ukernel__scalar_2x4()
90 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_prelu_ukernel__scalar_2x4()
100 o1[2] = vacc1x2; in xnn_f32_prelu_ukernel__scalar_2x4()
Dwasm-2x4.c72 float vacc1x2 = signbit(vi1x2) ? vi1x2 * vw2 : vi1x2; in xnn_f32_prelu_ukernel__wasm_2x4() local
81 vacc1x2 = __builtin_wasm_max_f32(vacc1x2, vmin); in xnn_f32_prelu_ukernel__wasm_2x4()
90 vacc1x2 = __builtin_wasm_min_f32(vacc1x2, vmax); in xnn_f32_prelu_ukernel__wasm_2x4()
100 o1[2] = vacc1x2; in xnn_f32_prelu_ukernel__wasm_2x4()
/external/XNNPACK/src/f32-spmm/gen/
D8x4-scalar.c55 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local
103 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar()
137 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()
303 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local
331 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar()
349 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()
445 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local
463 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar()
473 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()