/external/XNNPACK/src/f32-vbinary/gen/ |
D | vrdivc-avx-x16.c | 39 __m256 vy01234567 = _mm256_div_ps(vb, va01234567); in xnn_f32_vrdivc_ukernel__avx_x16() 40 __m256 vy89ABCDEF = _mm256_div_ps(vb, va89ABCDEF); in xnn_f32_vrdivc_ukernel__avx_x16() 56 __m256 vy = _mm256_div_ps(vb, va); in xnn_f32_vrdivc_ukernel__avx_x16() 69 __m256 vy = _mm256_div_ps(vb, va); in xnn_f32_vrdivc_ukernel__avx_x16()
|
D | vdivc-avx-x16.c | 39 __m256 vy01234567 = _mm256_div_ps(va01234567, vb); in xnn_f32_vdivc_ukernel__avx_x16() 40 __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb); in xnn_f32_vdivc_ukernel__avx_x16() 56 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdivc_ukernel__avx_x16() 69 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdivc_ukernel__avx_x16()
|
D | vdiv-avx-x16.c | 42 __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567); in xnn_f32_vdiv_ukernel__avx_x16() 43 __m256 vy89ABCDEF = _mm256_div_ps(va89ABCDEF, vb89ABCDEF); in xnn_f32_vdiv_ukernel__avx_x16() 62 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdiv_ukernel__avx_x16() 76 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdiv_ukernel__avx_x16()
|
D | vdivc-avx-x8.c | 38 __m256 vy01234567 = _mm256_div_ps(va01234567, vb); in xnn_f32_vdivc_ukernel__avx_x8() 51 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdivc_ukernel__avx_x8() 64 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdivc_ukernel__avx_x8()
|
D | vrdivc-avx-x8.c | 38 __m256 vy01234567 = _mm256_div_ps(vb, va01234567); in xnn_f32_vrdivc_ukernel__avx_x8() 51 __m256 vy = _mm256_div_ps(vb, va); in xnn_f32_vrdivc_ukernel__avx_x8() 64 __m256 vy = _mm256_div_ps(vb, va); in xnn_f32_vrdivc_ukernel__avx_x8()
|
D | vdiv-avx-x8.c | 40 __m256 vy01234567 = _mm256_div_ps(va01234567, vb01234567); in xnn_f32_vdiv_ukernel__avx_x8() 56 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdiv_ukernel__avx_x8() 70 __m256 vy = _mm256_div_ps(va, vb); in xnn_f32_vdiv_ukernel__avx_x8()
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx2-rr1-p5-div-x80.c | 212 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 213 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 214 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 215 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 216 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 217 __m256 vf5 = _mm256_div_ps(ve5, vd5); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 218 __m256 vf6 = _mm256_div_ps(ve6, vd6); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 219 __m256 vf7 = _mm256_div_ps(ve7, vd7); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 220 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 221 __m256 vf9 = _mm256_div_ps(ve9, vd9); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() [all …]
|
D | avx2-rr1-p5-div-x64.c | 186 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 187 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 188 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 189 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 190 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 191 __m256 vf5 = _mm256_div_ps(ve5, vd5); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 192 __m256 vf6 = _mm256_div_ps(ve6, vd6); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 193 __m256 vf7 = _mm256_div_ps(ve7, vd7); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 274 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64() 337 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x64()
|
D | avx2-rr1-p5-div-x72.c | 199 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 200 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 201 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 202 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 203 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 204 __m256 vf5 = _mm256_div_ps(ve5, vd5); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 205 __m256 vf6 = _mm256_div_ps(ve6, vd6); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 206 __m256 vf7 = _mm256_div_ps(ve7, vd7); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 207 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 291 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() [all …]
|
D | avx2-rr1-p5-div-x56.c | 173 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 174 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 175 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 176 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 177 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 178 __m256 vf5 = _mm256_div_ps(ve5, vd5); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 179 __m256 vf6 = _mm256_div_ps(ve6, vd6); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 257 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56() 320 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x56()
|
D | avx2-rr1-p5-div-x48.c | 160 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 161 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 162 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 163 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 164 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 165 __m256 vf5 = _mm256_div_ps(ve5, vd5); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 240 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48() 303 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x48()
|
D | avx2-rr1-p5-div-x40.c | 147 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 148 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 149 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 150 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 151 __m256 vf4 = _mm256_div_ps(ve4, vd4); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 223 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40() 286 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x40()
|
D | avx2-rr1-p5-div-x32.c | 134 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32() 135 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32() 136 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32() 137 __m256 vf3 = _mm256_div_ps(ve3, vd3); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32() 206 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32() 269 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x32()
|
D | avx2-rr1-p5-div-x24.c | 121 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24() 122 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24() 123 __m256 vf2 = _mm256_div_ps(ve2, vd2); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24() 189 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24() 252 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24()
|
D | avx2-rr1-p5-div-x16.c | 108 __m256 vf0 = _mm256_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() 109 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() 172 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() 235 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16()
|
D | avx2-rr1-p5-div-x8.c | 91 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8() 154 __m256 vf = _mm256_div_ps(ve, vd); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x8()
|
/external/XNNPACK/src/f32-vbinary/ |
D | vopc-avx.c.in | 22 $ "DIV": lambda x: "_mm256_div_ps(%s, vb)" % x, 23 $ "RDIV": lambda x: "_mm256_div_ps(vb, %s)" % x,
|
D | vop-avx.c.in | 22 $ "DIV": lambda x, y: "_mm256_div_ps(%s, %s)" % (x, y),
|
/external/XNNPACK/src/math/ |
D | sigmoid-avx2-rr1-p5-div.c | 83 __m256 vf = _mm256_div_ps(ve, vd); in xnn_math_f32_sigmoid__avx2_rr1_p5_div()
|
D | sigmoid-avx2-rr2-p5-div.c | 86 __m256 vf = _mm256_div_ps(ve, vd); in xnn_math_f32_sigmoid__avx2_rr2_p5_div()
|
/external/XNNPACK/src/f32-sigmoid/ |
D | avx2-p5.c.in | 126 __m256 vf${ABC[N]} = _mm256_div_ps(ve${ABC[N]}, vd${ABC[N]}); 213 __m256 vf = _mm256_div_ps(ve, vd); 293 __m256 vf = _mm256_div_ps(ve, vd);
|
/external/eigen/Eigen/src/Core/arch/AVX/ |
D | MathFunctions.h | 424 return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));
|
D | Complex.h | 231 return Packet4cf(_mm256_div_ps(num.v, denom));
|
D | PacketMath.h | 153 …INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
|
/external/clang/test/CodeGen/ |
D | avx-builtins.c | 308 return _mm256_div_ps(A, B); in test_mm256_div_ps()
|