Searched refs:vacc1x2 (Results 1 – 8 of 8) sorted by relevance
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 2x4-scalar.c | 46 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_2x4__scalar() local 67 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_2x4__scalar() 80 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_2x4__scalar() 90 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_2x4__scalar() 97 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_2x4__scalar() 117 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_2x4__scalar()
|
D | 3x3-scalar.c | 49 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_3x3__scalar() local 74 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_3x3__scalar() 88 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_3x3__scalar() 99 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_3x3__scalar() 108 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_3x3__scalar() 130 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_3x3__scalar()
|
D | 4x4-scalar.c | 54 float vacc1x2 = vacc0x2; in xnn_f32_ppmm_ukernel_4x4__scalar() local 89 vacc1x2 += va1 * vb2; in xnn_f32_ppmm_ukernel_4x4__scalar() 110 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_ppmm_ukernel_4x4__scalar() 128 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_ukernel_4x4__scalar() 147 c1[2] = vacc1x2; in xnn_f32_ppmm_ukernel_4x4__scalar() 175 vacc1x0 = vacc1x2; in xnn_f32_ppmm_ukernel_4x4__scalar()
|
/external/XNNPACK/src/f32-vmulcaddc/gen/ |
D | c4-scalar-2x.c | 60 float vacc1x2 = i1[2]; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() local 75 vacc1x2 = vacc1x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() 84 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() 93 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x() 103 o1[2] = vacc1x2; in xnn_f32_vmulcaddc_ukernel_c4__scalar_2x()
|
D | c4-wasm-2x.c | 60 float vacc1x2 = i1[2]; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() local 75 vacc1x2 = vacc1x2 * vscale2 + vbias2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() 84 vacc1x2 = __builtin_wasm_max_f32(vacc1x2, vmin); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() 93 vacc1x2 = __builtin_wasm_min_f32(vacc1x2, vmax); in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x() 103 o1[2] = vacc1x2; in xnn_f32_vmulcaddc_ukernel_c4__wasm_2x()
|
/external/XNNPACK/src/f32-prelu/gen/ |
D | scalar-2x4.c | 72 float vacc1x2 = signbit(vi1x2) ? vi1x2 * vw2 : vi1x2; in xnn_f32_prelu_ukernel__scalar_2x4() local 81 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_prelu_ukernel__scalar_2x4() 90 vacc1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_prelu_ukernel__scalar_2x4() 100 o1[2] = vacc1x2; in xnn_f32_prelu_ukernel__scalar_2x4()
|
D | wasm-2x4.c | 72 float vacc1x2 = signbit(vi1x2) ? vi1x2 * vw2 : vi1x2; in xnn_f32_prelu_ukernel__wasm_2x4() local 81 vacc1x2 = __builtin_wasm_max_f32(vacc1x2, vmin); in xnn_f32_prelu_ukernel__wasm_2x4() 90 vacc1x2 = __builtin_wasm_min_f32(vacc1x2, vmax); in xnn_f32_prelu_ukernel__wasm_2x4() 100 o1[2] = vacc1x2; in xnn_f32_prelu_ukernel__wasm_2x4()
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x4-scalar.c | 55 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local 103 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar() 137 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar() 303 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local 331 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar() 349 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar() 445 float vacc1x2 = vacc0x2; in xnn_f32_spmm_ukernel_8x4__scalar() local 463 vacc1x2 += va1 * vb2; in xnn_f32_spmm_ukernel_8x4__scalar() 473 float vout1x2 = math_min_f32(vacc1x2, vmax); in xnn_f32_spmm_ukernel_8x4__scalar()
|