/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x4-scalar.c | 85 const float vb3 = *w++; in xnn_f32_spmm_ukernel_8x4__scalar() local 110 vacc0x3 += va0 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 111 vacc1x3 += va1 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 112 vacc2x3 += va2 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 113 vacc3x3 += va3 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 114 vacc4x3 += va4 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 115 vacc5x3 += va5 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 116 vacc6x3 += va6 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 117 vacc7x3 += va7 * vb3; in xnn_f32_spmm_ukernel_8x4__scalar() 321 const float vb3 = *w++; in xnn_f32_spmm_ukernel_8x4__scalar() local [all …]
|
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 4x4-scalar.c | 77 const float vb3 = w[3]; in xnn_f32_ppmm_ukernel_4x4__scalar() local 92 vacc0x3 += va0 * vb3; in xnn_f32_ppmm_ukernel_4x4__scalar() 93 vacc1x3 += va1 * vb3; in xnn_f32_ppmm_ukernel_4x4__scalar() 94 vacc2x3 += va2 * vb3; in xnn_f32_ppmm_ukernel_4x4__scalar() 95 vacc3x3 += va3 * vb3; in xnn_f32_ppmm_ukernel_4x4__scalar()
|
D | 2x4-scalar.c | 59 const float vb3 = w[3]; in xnn_f32_ppmm_ukernel_2x4__scalar() local 68 vacc0x3 += va0 * vb3; in xnn_f32_ppmm_ukernel_2x4__scalar() 69 vacc1x3 += va1 * vb3; in xnn_f32_ppmm_ukernel_2x4__scalar()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 4x4-scalar.c | 87 const float vb3 = w[3]; in xnn_f32_gemm_ukernel_4x4__scalar() local 93 vacc03 += va0 * vb3; in xnn_f32_gemm_ukernel_4x4__scalar() 97 vacc13 += va1 * vb3; in xnn_f32_gemm_ukernel_4x4__scalar() 101 vacc23 += va2 * vb3; in xnn_f32_gemm_ukernel_4x4__scalar() 105 vacc33 += va3 * vb3; in xnn_f32_gemm_ukernel_4x4__scalar()
|
D | 4x4-wasm.c | 87 const float vb3 = w[3]; in xnn_f32_gemm_ukernel_4x4__wasm() local 93 vacc03 += va0 * vb3; in xnn_f32_gemm_ukernel_4x4__wasm() 97 vacc13 += va1 * vb3; in xnn_f32_gemm_ukernel_4x4__wasm() 101 vacc23 += va2 * vb3; in xnn_f32_gemm_ukernel_4x4__wasm() 105 vacc33 += va3 * vb3; in xnn_f32_gemm_ukernel_4x4__wasm()
|
D | 2x4-wasm.c | 65 const float vb3 = w[3]; in xnn_f32_gemm_ukernel_2x4__wasm() local 71 vacc03 += va0 * vb3; in xnn_f32_gemm_ukernel_2x4__wasm() 75 vacc13 += va1 * vb3; in xnn_f32_gemm_ukernel_2x4__wasm()
|
D | 2x4-scalar.c | 65 const float vb3 = w[3]; in xnn_f32_gemm_ukernel_2x4__scalar() local 71 vacc03 += va0 * vb3; in xnn_f32_gemm_ukernel_2x4__scalar() 75 vacc13 += va1 * vb3; in xnn_f32_gemm_ukernel_2x4__scalar()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 4x4-wasm.c | 89 const float vb3 = w[3]; in xnn_f32_gemminc_ukernel_4x4__wasm() local 95 vacc03 += va0 * vb3; in xnn_f32_gemminc_ukernel_4x4__wasm() 99 vacc13 += va1 * vb3; in xnn_f32_gemminc_ukernel_4x4__wasm() 103 vacc23 += va2 * vb3; in xnn_f32_gemminc_ukernel_4x4__wasm() 107 vacc33 += va3 * vb3; in xnn_f32_gemminc_ukernel_4x4__wasm()
|
D | 4x4-scalar.c | 89 const float vb3 = w[3]; in xnn_f32_gemminc_ukernel_4x4__scalar() local 95 vacc03 += va0 * vb3; in xnn_f32_gemminc_ukernel_4x4__scalar() 99 vacc13 += va1 * vb3; in xnn_f32_gemminc_ukernel_4x4__scalar() 103 vacc23 += va2 * vb3; in xnn_f32_gemminc_ukernel_4x4__scalar() 107 vacc33 += va3 * vb3; in xnn_f32_gemminc_ukernel_4x4__scalar()
|
D | 2x4-scalar.c | 67 const float vb3 = w[3]; in xnn_f32_gemminc_ukernel_2x4__scalar() local 73 vacc03 += va0 * vb3; in xnn_f32_gemminc_ukernel_2x4__scalar() 77 vacc13 += va1 * vb3; in xnn_f32_gemminc_ukernel_2x4__scalar()
|
D | 2x4-wasm.c | 67 const float vb3 = w[3]; in xnn_f32_gemminc_ukernel_2x4__wasm() local 73 vacc03 += va0 * vb3; in xnn_f32_gemminc_ukernel_2x4__wasm() 77 vacc13 += va1 * vb3; in xnn_f32_gemminc_ukernel_2x4__wasm()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 4x4-scalar.c | 109 const float vb3 = w[3]; in xnn_f32_igemm_ukernel_4x4__scalar() local 115 vacc03 += va0 * vb3; in xnn_f32_igemm_ukernel_4x4__scalar() 119 vacc13 += va1 * vb3; in xnn_f32_igemm_ukernel_4x4__scalar() 123 vacc23 += va2 * vb3; in xnn_f32_igemm_ukernel_4x4__scalar() 127 vacc33 += va3 * vb3; in xnn_f32_igemm_ukernel_4x4__scalar()
|
D | 4x4-wasm.c | 109 const float vb3 = w[3]; in xnn_f32_igemm_ukernel_4x4__wasm() local 115 vacc03 += va0 * vb3; in xnn_f32_igemm_ukernel_4x4__wasm() 119 vacc13 += va1 * vb3; in xnn_f32_igemm_ukernel_4x4__wasm() 123 vacc23 += va2 * vb3; in xnn_f32_igemm_ukernel_4x4__wasm() 127 vacc33 += va3 * vb3; in xnn_f32_igemm_ukernel_4x4__wasm()
|
D | 2x4-wasm.c | 81 const float vb3 = w[3]; in xnn_f32_igemm_ukernel_2x4__wasm() local 87 vacc03 += va0 * vb3; in xnn_f32_igemm_ukernel_2x4__wasm() 91 vacc13 += va1 * vb3; in xnn_f32_igemm_ukernel_2x4__wasm()
|
D | 2x4-scalar.c | 81 const float vb3 = w[3]; in xnn_f32_igemm_ukernel_2x4__scalar() local 87 vacc03 += va0 * vb3; in xnn_f32_igemm_ukernel_2x4__scalar() 91 vacc13 += va1 * vb3; in xnn_f32_igemm_ukernel_2x4__scalar()
|
/external/XNNPACK/src/f32-vbinary/gen/ |
D | vdiv-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vdiv_ukernel__scalar_x4() local 46 float vy3 = va3 / vb3; in xnn_f32_vdiv_ukernel__scalar_x4()
|
D | vadd-wasm-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vadd_ukernel__wasm_x4() local 46 float vy3 = va3 + vb3; in xnn_f32_vadd_ukernel__wasm_x4()
|
D | vmin-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vmin_ukernel__scalar_x4() local 46 float vy3 = math_min_f32(va3, vb3); in xnn_f32_vmin_ukernel__scalar_x4()
|
D | vsub-wasm-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vsub_ukernel__wasm_x4() local 46 float vy3 = va3 - vb3; in xnn_f32_vsub_ukernel__wasm_x4()
|
D | vmax-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vmax_ukernel__scalar_x4() local 46 float vy3 = math_max_f32(va3, vb3); in xnn_f32_vmax_ukernel__scalar_x4()
|
D | vmul-wasm-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vmul_ukernel__wasm_x4() local 46 float vy3 = va3 * vb3; in xnn_f32_vmul_ukernel__wasm_x4()
|
D | vadd-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vadd_ukernel__scalar_x4() local 46 float vy3 = va3 + vb3; in xnn_f32_vadd_ukernel__scalar_x4()
|
D | vmul-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vmul_ukernel__scalar_x4() local 46 float vy3 = va3 * vb3; in xnn_f32_vmul_ukernel__scalar_x4()
|
D | vmax-wasm-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vmax_ukernel__wasm_x4() local 46 float vy3 = __builtin_wasm_max_f32(va3, vb3); in xnn_f32_vmax_ukernel__wasm_x4()
|
D | vsub-scalar-x4.c | 40 const float vb3 = b[3]; in xnn_f32_vsub_ukernel__scalar_x4() local 46 float vy3 = va3 - vb3; in xnn_f32_vsub_ukernel__scalar_x4()
|