Home
last modified time | relevance | path

Searched refs:vs4 (Results 1 – 25 of 135) sorted by relevance

123456

/external/clang/test/Sema/
Dext_vector_conversions.c14 short4 vs4; in test() local
18 vs4 += ll; // expected-warning {{implicit conversion loses integer precision}} in test()
/external/XNNPACK/src/f32-velu/gen/
Dvelu-scalar-rr2-lut16-p3-x5.c87 float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() local
111 vs4 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
135 vt4 *= vs4; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
136 vs4 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
152 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
Dvelu-scalar-rr2-p6-x5.c70 float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
102 vs4 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
144 vt4 *= vs4; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
145 vs4 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
161 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
Dvelu-scalar-rr2-lut16-p3-x6.c93 float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6() local
119 vs4 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
150 vt4 *= vs4; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
151 vs4 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
170 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
Dvelu-scalar-rr2-p6-x6.c73 float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x6() local
109 vs4 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
160 vt4 *= vs4; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
161 vs4 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
180 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
Dvelu-wasm-rr2-lut16-p3-x5.c87 float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5() local
115 vt4 *= vs4; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
116 vs4 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
132 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
Dvelu-wasm-rr2-p6-x5.c70 float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x5() local
124 vt4 *= vs4; in xnn_f32_velu_ukernel__wasm_rr2_p6_x5()
125 vs4 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x5()
141 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x5()
Dvelu-avx512f-rr1-lut16-p3-perm-x80.c78 __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80() local
102 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
108 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
125 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80()
Dvelu-wasm-rr2-p6-x6.c73 float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x6() local
136 vt4 *= vs4; in xnn_f32_velu_ukernel__wasm_rr2_p6_x6()
137 vs4 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x6()
156 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x6()
Dvelu-wasm-rr2-lut16-p3-x6.c93 float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6() local
126 vt4 *= vs4; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6()
127 vs4 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6()
146 const float ve4 = (vp4 + vs4) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6()
Dvelu-avx2-rr1-lut16-p3-gather-x40.c89 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40() local
107 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
117 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
128 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
Dvelu-avx2-rr1-lut8-p4-perm-x40.c82 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40() local
106 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40()
116 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40()
127 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40()
Dvelu-avx2-rr1-p6-x40.c67 __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23)); in xnn_f32_velu_ukernel__avx2_rr1_p6_x40() local
109 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_p6_x40()
119 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_p6_x40()
130 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_p6_x40()
Dvelu-avx512f-rr1-p6-x80.c69 __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80() local
111 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
117 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
134 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()
Dvelu-avx2-rr1-lut4-p4-perm-x40.c82 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40() local
106 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40()
116 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40()
127 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40()
Dvelu-avx512f-rr1-lut16-p3-perm-x96.c83 __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96() local
111 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
119 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
138 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96()
Dvelu-avx512f-rr1-lut16-p3-perm-x112.c88 __m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112() local
120 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
130 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
151 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112()
Dvelu-avx2-rr1-p6-x48.c70 __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23)); in xnn_f32_velu_ukernel__avx2_rr1_p6_x48() local
119 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_p6_x48()
131 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_p6_x48()
144 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_p6_x48()
Dvelu-avx2-rr1-lut16-p3-gather-x48.c96 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48() local
117 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
129 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
142 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
Dvelu-avx2-rr1-lut8-p4-perm-x48.c88 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48() local
116 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48()
128 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48()
141 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48()
Dvelu-avx2-rr1-lut4-p4-perm-x48.c88 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48() local
116 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48()
128 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48()
141 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48()
Dvelu-avx512f-rr1-p6-x96.c72 __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96() local
121 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
129 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
148 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x96()
Dvelu-avx2-rr1-lut8-p4-perm-x56.c94 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56() local
126 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56()
140 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56()
155 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56()
Dvelu-avx2-rr1-lut4-p4-perm-x56.c94 __m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56() local
126 vt4 = _mm256_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56()
140 vs4 = _mm256_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56()
155 const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56()
Dvelu-avx512f-rr1-p6-x112.c75 __m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112() local
131 vt4 = _mm512_mul_ps(vt4, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
141 vs4 = _mm512_fmsub_ps(vs4, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()
162 __m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x112()

123456