/external/XNNPACK/src/f32-gemm/gen/ |
D | 1x4-minmax-scalar.c | 45 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_1x4__scalar() local 61 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 69 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 74 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 80 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 91 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_1x4__scalar()
|
D | 1x4-minmax-wasm.c | 45 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_1x4__wasm() local 61 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 69 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 74 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 80 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 91 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_1x4__wasm()
|
D | 2x4-minmax-scalar.c | 51 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() local 56 float vacc12 = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 72 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 84 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 93 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 108 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 124 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__scalar()
|
D | 2x4-minmax-wasm.c | 51 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() local 56 float vacc12 = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 72 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 84 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 93 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 108 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 124 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_2x4__wasm()
|
D | 1x4-relu-wasm.c | 43 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_1x4__wasm() local 59 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_1x4__wasm() 67 vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_1x4__wasm() 73 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_1x4__wasm() 84 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_1x4__wasm()
|
D | 1x4-relu-scalar.c | 43 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_1x4__scalar() local 59 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_1x4__scalar() 67 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_1x4__scalar() 73 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_1x4__scalar() 84 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_1x4__scalar()
|
D | 2x4-relu-scalar.c | 49 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_2x4__scalar() local 54 float vacc12 = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__scalar() 70 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_2x4__scalar() 82 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_2x4__scalar() 97 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__scalar() 113 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__scalar()
|
D | 2x4-relu-wasm.c | 49 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_2x4__wasm() local 54 float vacc12 = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__wasm() 70 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_2x4__wasm() 82 vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_2x4__wasm() 97 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__wasm() 113 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_2x4__wasm()
|
D | 4x4-minmax-wasm.c | 63 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() local 68 float vacc12 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 72 float vacc22 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 76 float vacc32 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 94 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 114 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 131 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 164 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 190 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
|
D | 4x4-minmax-scalar.c | 63 float vacc02 = w[2]; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() local 68 float vacc12 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 72 float vacc22 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 76 float vacc32 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 94 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 114 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 131 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 164 c0[2] = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 190 vacc00 = vacc02; in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
|
D | 4x4-relu-wasm.c | 61 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_4x4__wasm() local 66 float vacc12 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 70 float vacc22 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 74 float vacc32 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 92 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_4x4__wasm() 112 vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__wasm() 145 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 171 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__wasm()
|
D | 4x4-relu-scalar.c | 61 float vacc02 = w[2]; in xnn_f32_gemm_relu_ukernel_4x4__scalar() local 66 float vacc12 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 70 float vacc22 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 74 float vacc32 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 92 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemm_relu_ukernel_4x4__scalar() 112 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar() 145 c0[2] = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 171 vacc00 = vacc02; in xnn_f32_gemm_relu_ukernel_4x4__scalar()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 1x4inc-minmax-wasm.c | 47 float vacc02 = acc[2]; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() local 63 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 71 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 76 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 82 c0[2] = vacc02; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 93 vacc00 = vacc02; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm()
|
D | 1x4inc-minmax-scalar.c | 47 float vacc02 = acc[2]; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() local 63 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 71 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 76 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 82 c0[2] = vacc02; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 93 vacc00 = vacc02; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar()
|
D | 2x4inc-minmax-scalar.c | 53 float vacc02 = acc[2]; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() local 74 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 86 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 95 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 110 c0[2] = vacc02; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 126 vacc00 = vacc02; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar()
|
D | 2x4inc-minmax-wasm.c | 53 float vacc02 = acc[2]; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() local 74 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 86 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 95 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 110 c0[2] = vacc02; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 126 vacc00 = vacc02; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 1x4-minmax-scalar.c | 49 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_1x4__scalar() local 74 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 84 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 89 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 95 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 105 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_1x4__scalar()
|
D | 1x4-minmax-wasm.c | 49 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_1x4__wasm() local 74 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 84 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 89 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 95 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 105 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_1x4__wasm()
|
D | 2x4-minmax-scalar.c | 53 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() local 57 float vacc12 = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 88 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 102 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 111 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 126 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 140 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__scalar()
|
D | 2x4-minmax-wasm.c | 53 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() local 57 float vacc12 = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 88 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 102 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 111 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 126 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 140 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_2x4__wasm()
|
D | 1x4-relu-scalar.c | 47 float vacc02 = w[2]; in xnn_f32_igemm_relu_ukernel_1x4__scalar() local 72 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_relu_ukernel_1x4__scalar() 82 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_igemm_relu_ukernel_1x4__scalar() 88 c0[2] = vacc02; in xnn_f32_igemm_relu_ukernel_1x4__scalar() 98 vacc00 = vacc02; in xnn_f32_igemm_relu_ukernel_1x4__scalar()
|
D | 1x4-relu-wasm.c | 47 float vacc02 = w[2]; in xnn_f32_igemm_relu_ukernel_1x4__wasm() local 72 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_relu_ukernel_1x4__wasm() 82 vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); in xnn_f32_igemm_relu_ukernel_1x4__wasm() 88 c0[2] = vacc02; in xnn_f32_igemm_relu_ukernel_1x4__wasm() 98 vacc00 = vacc02; in xnn_f32_igemm_relu_ukernel_1x4__wasm()
|
D | 4x4-minmax-wasm.c | 61 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() local 65 float vacc12 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 69 float vacc22 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 73 float vacc32 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 116 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 138 vacc02 = __builtin_wasm_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 155 vacc02 = __builtin_wasm_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 188 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 210 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
|
D | 4x4-minmax-scalar.c | 61 float vacc02 = w[2]; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() local 65 float vacc12 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 69 float vacc22 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 73 float vacc32 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 116 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 138 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 155 vacc02 = math_min_f32(vacc02, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 188 c0[2] = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 210 vacc00 = vacc02; in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
|
D | 2x4-relu-wasm.c | 51 float vacc02 = w[2]; in xnn_f32_igemm_relu_ukernel_2x4__wasm() local 55 float vacc12 = vacc02; in xnn_f32_igemm_relu_ukernel_2x4__wasm() 86 vacc02 = math_muladd_f32(va0, vb2, vacc02); in xnn_f32_igemm_relu_ukernel_2x4__wasm() 100 vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f); in xnn_f32_igemm_relu_ukernel_2x4__wasm() 115 c0[2] = vacc02; in xnn_f32_igemm_relu_ukernel_2x4__wasm() 129 vacc00 = vacc02; in xnn_f32_igemm_relu_ukernel_2x4__wasm()
|