Searched refs:vacc3x0 (Results 1 – 6 of 6) sorted by relevance
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 4x2-scalar.c | 54 float vacc3x0 = vacc0x0; in xnn_f32_ppmm_ukernel_4x2__scalar() local 73 vacc3x0 += va3 * vb0; in xnn_f32_ppmm_ukernel_4x2__scalar() 86 vacc3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_ppmm_ukernel_4x2__scalar() 96 vacc3x0 = math_max_f32(vacc3x0, vmin); in xnn_f32_ppmm_ukernel_4x2__scalar() 103 c3[0] = vacc3x0; in xnn_f32_ppmm_ukernel_4x2__scalar() 122 *c3 = vacc3x0; in xnn_f32_ppmm_ukernel_4x2__scalar()
|
D | 4x4-scalar.c | 60 float vacc3x0 = vacc0x0; in xnn_f32_ppmm_ukernel_4x4__scalar() local 83 vacc3x0 += va3 * vb0; in xnn_f32_ppmm_ukernel_4x4__scalar() 104 vacc3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_ppmm_ukernel_4x4__scalar() 122 vacc3x0 = math_max_f32(vacc3x0, vmin); in xnn_f32_ppmm_ukernel_4x4__scalar() 137 c3[0] = vacc3x0; in xnn_f32_ppmm_ukernel_4x4__scalar() 164 c3[0] = vacc3x0; in xnn_f32_ppmm_ukernel_4x4__scalar() 173 vacc3x0 = vacc3x2; in xnn_f32_ppmm_ukernel_4x4__scalar() 184 *c3 = vacc3x0; in xnn_f32_ppmm_ukernel_4x4__scalar()
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x1-scalar.c | 41 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x1__scalar() local 62 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x1__scalar() 72 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x1__scalar() 174 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x1__scalar() local 187 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x1__scalar() 193 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x1__scalar()
|
D | 8x2-scalar.c | 41 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x2__scalar() local 71 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x2__scalar() 89 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x2__scalar() 215 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x2__scalar() local 233 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x2__scalar() 243 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x2__scalar()
|
D | 4x1-scalar.c | 41 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_4x1__scalar() local 54 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_4x1__scalar() 60 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_4x1__scalar()
|
D | 8x4-scalar.c | 41 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x4__scalar() local 89 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x4__scalar() 123 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x4__scalar() 297 float vacc3x0 = vacc0x0; in xnn_f32_spmm_ukernel_8x4__scalar() local 325 vacc3x0 += va3 * vb0; in xnn_f32_spmm_ukernel_8x4__scalar() 343 float vout3x0 = math_min_f32(vacc3x0, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()
|