/external/XNNPACK/src/f32-vbinary/gen/ |
D | vmulc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vrsubc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vsubc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vrdivc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vdivc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vaddc-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() local 47 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 48 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 49 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 50 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 53 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 55 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 57 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 59 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() 78 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vsub-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() local 53 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 54 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 55 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 56 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 59 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 61 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 63 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 65 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() 87 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vsub_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vmul-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() local 53 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 54 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 55 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 56 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 59 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 61 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 63 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 65 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() 87 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vadd-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() local 53 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 54 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 55 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 56 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 59 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 61 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 63 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 65 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() 87 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vdiv-minmax-wasmsimd-x86-x16.c | 31 const v128_t vy_min = wasm_v32x4_load_splat(¶ms->scalar.min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() local 53 const v128_t vltmask0123 = wasm_f32x4_lt(vy0123, vy_min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 54 const v128_t vltmask4567 = wasm_f32x4_lt(vy4567, vy_min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 55 const v128_t vltmask89AB = wasm_f32x4_lt(vy89AB, vy_min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 56 const v128_t vltmaskCDEF = wasm_f32x4_lt(vyCDEF, vy_min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 59 vy0123 = wasm_v128_bitselect(vy_min, vy0123, vltmask0123); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 61 vy4567 = wasm_v128_bitselect(vy_min, vy4567, vltmask4567); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 63 vy89AB = wasm_v128_bitselect(vy_min, vy89AB, vltmask89AB); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 65 vyCDEF = wasm_v128_bitselect(vy_min, vyCDEF, vltmaskCDEF); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() 87 const v128_t vltmask = wasm_f32x4_lt(vy, vy_min); in xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16() [all …]
|
D | vdivc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vdivc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vdivc_minmax_ukernel__wasm_x8()
|
D | vsubc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vsubc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vsubc_minmax_ukernel__scalar_x8()
|
D | vmulc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vmulc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
|
D | vdivc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vdivc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vdivc_minmax_ukernel__scalar_x8()
|
D | vrdivc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vrdivc_minmax_ukernel__wasm_x8()
|
D | vaddc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vaddc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vaddc_minmax_ukernel__scalar_x8()
|
D | vrsubc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vrsubc_minmax_ukernel__wasm_x8()
|
D | vmulc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vmulc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vmulc_minmax_ukernel__wasm_x8()
|
D | vrsubc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
|
D | vrdivc-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() local 55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8() 87 vy = math_max_f32(vy, vy_min); in xnn_f32_vrdivc_minmax_ukernel__scalar_x8()
|
D | vsubc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vsubc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vsubc_minmax_ukernel__wasm_x8()
|
D | vaddc-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vaddc_minmax_ukernel__wasm_x8() local 55 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 56 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 57 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 58 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 59 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 60 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 61 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 62 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8() 87 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vaddc_minmax_ukernel__wasm_x8()
|
D | vadd-minmax-wasm-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vadd_minmax_ukernel__wasm_x8() local 64 vy0 = __builtin_wasm_max_f32(vy0, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 65 vy1 = __builtin_wasm_max_f32(vy1, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 66 vy2 = __builtin_wasm_max_f32(vy2, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 67 vy3 = __builtin_wasm_max_f32(vy3, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 68 vy4 = __builtin_wasm_max_f32(vy4, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 69 vy5 = __builtin_wasm_max_f32(vy5, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 70 vy6 = __builtin_wasm_max_f32(vy6, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 71 vy7 = __builtin_wasm_max_f32(vy7, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8() 97 vy = __builtin_wasm_max_f32(vy, vy_min); in xnn_f32_vadd_minmax_ukernel__wasm_x8()
|
D | vadd-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vadd_minmax_ukernel__scalar_x8() local 64 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 65 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 66 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 67 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 68 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 69 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 70 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 71 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8() 97 vy = math_max_f32(vy, vy_min); in xnn_f32_vadd_minmax_ukernel__scalar_x8()
|
D | vdiv-minmax-scalar-x8.c | 30 const float vy_min = params->scalar.min; in xnn_f32_vdiv_minmax_ukernel__scalar_x8() local 64 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 65 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 66 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 67 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 68 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 69 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 70 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 71 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8() 97 vy = math_max_f32(vy, vy_min); in xnn_f32_vdiv_minmax_ukernel__scalar_x8()
|