/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | mmx-bitcast-to-i64.ll | 5 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t) 11 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t) 17 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t) 23 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t) 28 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 29 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 30 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 31 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
D | 2010-04-23-mmx-movdq2q.ll | 53 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) 64 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2) 75 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2) 86 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2) 91 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 92 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 93 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 94 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
D | 2007-04-25-MMX-PADDQ.ll | 52 %tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14) ; <x86_mmx> [#uses=1] 53 …%tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0) ; <x86_mmx> [#use… 64 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | mmx-fold-zero.ll | 83 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %6) 86 %10 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %8, x86_mmx %9) 87 %11 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %5, x86_mmx %10) 88 %12 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %6, x86_mmx %11) 91 %15 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %14, x86_mmx %9) 92 %16 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %15, x86_mmx %13) 93 %17 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %16, x86_mmx %10) 94 %18 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %17, x86_mmx %11) 95 %19 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %18, x86_mmx %8) 97 …%21 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %20, x86_mmx bitcast (double 0.000000e+00 to … [all …]
|
D | mmx-bitcast.ll | 12 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t) 25 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t) 38 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t) 51 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t) 108 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 109 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 110 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 111 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
D | mmx-cvt.ll | 37 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6) 73 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6) 107 %5 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %4, x86_mmx %4) 143 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6) 179 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6) 214 %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5) 250 %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %6, x86_mmx %6) 285 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 321 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 355 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) [all …]
|
D | 2011-10-30-padd.ll | 4 ;CHECK: padd 14 ;CHECK: padd
|
D | mmx-build-vector.ll | 11 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 39 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 62 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 96 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 123 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 157 %4 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %3, x86_mmx %3) 198 %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5) 233 %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5) 258 %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5) 285 %6 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %5) [all …]
|
/external/eigen/Eigen/src/Core/arch/SSE/ |
D | MathFunctions.h | 66 Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1); 78 x = padd(x, tmp); 96 y = padd(y, y1); 99 x = padd(x, y); 100 x = padd(x, y2); 163 y = padd(y, p4f_1); 324 x = padd(x, xmm1); 325 x = padd(x, xmm2); 326 x = padd(x, xmm3); 338 y = padd(y, p4f_1); [all …]
|
D | Complex.h | 55 template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { … 181 { return padd(pmul(x,y),c); } 199 { return padd(pmul(x,y),c); } 217 { return padd(pmul(x,y),c); } 235 { return padd(c, pmul(x,y)); } 244 { return padd(c, pmul(x,y)); } 301 template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { … 382 { return padd(pmul(x,y),c); } 400 { return padd(pmul(x,y),c); } 418 { return padd(pmul(x,y),c); } [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | mmx-bitcast.ll | 11 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t) 24 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t) 37 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t) 50 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t) 103 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 104 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 105 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 106 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
D | 2011-10-30-padd.ll | 4 ;CHECK: padd 14 ;CHECK: padd
|
D | mmx-fold-load.ll | 148 %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v) 153 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 165 %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v) 170 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 181 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v) 186 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 197 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v) 202 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
D | mmx-arith.ll | 249 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) 261 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2) 273 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2) 285 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2) 290 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) 291 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) 292 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) 293 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
|
/external/eigen/Eigen/src/Core/arch/AltiVec/ |
D | Complex.h | 98 template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { … 119 return Packet2cf(padd<Packet4f>(v1, v2)); 148 b = padd<Packet4f>(a.v, b); 163 b2 = padd<Packet4f>(b1, b2); 197 { return padd(pmul(x,y),c); } 208 { return padd(pmul(x,y),c); } 219 { return padd(pmul(x,y),c); } 230 { return padd(c, pmul(x,y)); } 239 { return padd(c, pmul(x,y)); } 250 return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV)))); [all …]
|
D | MathFunctions.h | 114 Packet4f e = padd(vec_ctf(emm0, 0), p4f_1); 126 x = padd(x, tmp); 144 y = padd(y, y1); 147 x = padd(x, y); 148 x = padd(x, y2); 185 y = padd(y, p4f_1);
|
/external/eigen/bench/ |
D | benchVecAdd.cpp | 128 …internal::pstore(&a[i+2*PacketSize], internal::padd(internal::ploadu(&a[i+2*PacketSize]), internal… in benchVec() 129 …internal::pstore(&a[i+3*PacketSize], internal::padd(internal::ploadu(&a[i+3*PacketSize]), internal… in benchVec() 130 …internal::pstore(&a[i+4*PacketSize], internal::padd(internal::ploadu(&a[i+4*PacketSize]), internal… in benchVec() 131 …internal::pstore(&a[i+5*PacketSize], internal::padd(internal::ploadu(&a[i+5*PacketSize]), internal… in benchVec() 132 …internal::pstore(&a[i+6*PacketSize], internal::padd(internal::ploadu(&a[i+6*PacketSize]), internal… in benchVec() 133 …internal::pstore(&a[i+7*PacketSize], internal::padd(internal::ploadu(&a[i+7*PacketSize]), internal… in benchVec()
|
/external/eigen/Eigen/src/Core/arch/AVX/ |
D | Complex.h | 50 template<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { … 139 return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)), 177 { return padd(pmul(x,y),c); } 188 { return padd(pmul(x,y),c); } 199 { return padd(pmul(x,y),c); } 210 { return padd(c, pmul(x,y)); } 219 { return padd(c, pmul(x,y)); } 272 template<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { … 342 return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)), 373 { return padd(pmul(x,y),c); } [all …]
|
D | MathFunctions.h | 65 Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four)); 171 x = padd(x, tmp); 192 y = padd(y, y1); 195 x = padd(x, y); 196 x = padd(x, y2); 258 y = padd(y, p8f_1); 261 Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
|
/external/eigen/Eigen/src/Core/arch/ZVector/ |
D | Complex.h | 134 …late<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { ret… 135 template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { … 220 Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]); 236 return padd<Packet2cf>(transpose.packet[0], transpose.packet[1]); 276 { return padd(pmul(x,y),c); } 287 { return padd(pmul(x,y),c); } 298 { return padd(pmul(x,y),c); } 309 { return padd(pmul(x,y),c); } 320 { return padd(pmul(x,y),c); } 331 { return padd(pmul(x,y),c); }
|
D | PacketMath.h | 498 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { retu… 499 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) 506 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { retu… 552 …i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Pack… 562 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return padd<Packet4i>(ps… 563 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return padd<Packet4f>(ps… 564 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(ps… 720 sum = padd<Packet4i>(a, b); 722 sum = padd<Packet4i>(sum, b); 730 sum = padd<Packet2d>(a, b); [all …]
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | Complex.h | 75 …late<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { ret… 238 { return padd(pmul(x,y),c); } 249 { return padd(pmul(x,y),c); } 260 { return padd(pmul(x,y),c); } 337 …late<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { ret… 429 { return padd(pmul(x,y),c); } 440 { return padd(pmul(x,y),c); } 451 { return padd(pmul(x,y),c); } 466 return Packet1cd(pdiv(res.v, padd<Packet2d>(s,rev_s)));
|
/external/eigen/Eigen/src/Core/arch/AVX512/ |
D | MathFunctions.h | 94 x = padd(x, tmp); 115 y = padd(y, y1); 118 x = padd(x, y); 119 x = padd(x, y2); 171 y = padd(y, p16f_1); 174 Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127));
|
/external/syzkaller/vendor/golang.org/x/text/unicode/norm/ |
D | normalize.go | 578 padd := 0 582 add[padd] = info 589 padd++ 604 for padd--; padd >= 0; padd-- { 605 info = add[padd]
|
/external/eigen/Eigen/src/Geometry/arch/ |
D | Geometry_SSE.h | 113 t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); 119 pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2)))); 128 t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
|