/external/eigen/test/ |
D | vectorization_logic.cpp | 102 PacketSize = internal::unpacket_traits<PacketType>::size, enumerator 108 typedef Matrix<Scalar,PacketSize,1> Vector1; in run() 111 typedef Matrix<Scalar,PacketSize,PacketSize> Matrix11; in run() 112 typedef Matrix<Scalar,2*PacketSize,2*PacketSize> Matrix22; in run() 113 … Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*Packe… in run() 114 … Matrix<Scalar,(Matrix11::Flags&RowMajorBit)?16:4*PacketSize,(Matrix11::Flags&RowMajorBit)?4*Packe… in run() 115 typedef Matrix<Scalar,4*PacketSize,4*PacketSize,ColMajor> Matrix44c; in run() 116 typedef Matrix<Scalar,4*PacketSize,4*PacketSize,RowMajor> Matrix44r; in run() 119 (PacketSize==8 ? 4 : PacketSize==4 ? 2 : PacketSize==2 ? 1 : /*PacketSize==1 ?*/ 1), in run() 120 (PacketSize==8 ? 2 : PacketSize==4 ? 2 : PacketSize==2 ? 2 : /*PacketSize==1 ?*/ 1) in run() [all …]
|
D | packetmath.cpp | 65 for (int i=0; i<PacketSize; ++i) \ 68 VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ 93 for (int i=0; i<PacketSize; ++i) \ 96 VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ 101 for (int i=0; i<PacketSize; ++i) \ 102 ref[i] = REFOP(data1[i], data1[i+PacketSize]); \ 103 h.store(data2, POP(h.load(data1),h.load(data1+PacketSize))); \ 104 VERIFY(areApprox(ref, data2, PacketSize) && #POP); \ 117 const int PacketSize = PacketTraits::size; in packetmath() local 120 const int max_size = PacketSize > 4 ? PacketSize : 4; in packetmath() [all …]
|
D | adjoint.cpp | 78 const Index PacketSize = internal::packet_traits<Scalar>::size; in adjoint() local 124 if(PacketSize<m3.rows() && PacketSize<m3.cols()) in adjoint() 127 Index i = internal::random<Index>(0,m3.rows()-PacketSize); in adjoint() 128 Index j = internal::random<Index>(0,m3.cols()-PacketSize); in adjoint() 129 m3.template block<PacketSize,PacketSize>(i,j).transposeInPlace(); in adjoint() 130 …VERIFY_IS_APPROX( (m3.template block<PacketSize,PacketSize>(i,j)), (m1.template block<PacketSize,P… in adjoint() 131 m3.template block<PacketSize,PacketSize>(i,j).transposeInPlace(); in adjoint()
|
D | product_extra.cpp | 127 const int PacketSize = internal::packet_traits<Scalar>::size; in zero_sized_objects() local 128 const int PacketSize1 = PacketSize>1 ? PacketSize-1 : 1; in zero_sized_objects() 150 Matrix<Scalar,PacketSize,0> a; in zero_sized_objects() 152 Matrix<Scalar,PacketSize,1> res; in zero_sized_objects() 153 VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) ); in zero_sized_objects() 154 VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) ); in zero_sized_objects() 166 Matrix<Scalar,PacketSize,Dynamic> a(PacketSize,0); in zero_sized_objects() 168 Matrix<Scalar,PacketSize,1> res; in zero_sized_objects() 169 VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) ); in zero_sized_objects() 170 VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) ); in zero_sized_objects()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_matmul_op_test.cc | 261 : PacketSize(Eigen::internal::packet_traits<float>::size) { in SparseMatmulOpTest() 265 data1[i] = internal::random<float>() / RealFloat(PacketSize); in SparseMatmulOpTest() 266 data2[i] = internal::random<float>() / RealFloat(PacketSize); in SparseMatmulOpTest() 267 data3[i] = internal::random<float>() / RealFloat(PacketSize); in SparseMatmulOpTest() 270 data3[i] = internal::random<float>() / RealFloat(PacketSize); in SparseMatmulOpTest() 310 const int PacketSize; member in Eigen::internal::SparseMatmulOpTest 323 for (int i = 0; i < PacketSize; ++i) ref[i] = data1[0]; in TEST_F() 326 ASSERT_TRUE(areApprox(ref, data2, PacketSize)); in TEST_F() 327 if (PacketSize > 1) { in TEST_F() 328 for (int i = 0; i < PacketSize; ++i) ref[i] = data1[1]; in TEST_F() [all …]
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorExecutor.h | 61 …const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::Packet… variable 65 const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize; 66 for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) { 68 evaluator.evalPacket(i + j * PacketSize); 71 const Index VectorizedSize = (size / PacketSize) * PacketSize; 72 for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) { 104 static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size; 110 if (last - first >= PacketSize) { 111 eigen_assert(first % PacketSize == 0); 112 Index last_chunk_offset = last - 4 * PacketSize; [all …]
|
D | TensorStriding.h | 109 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 167 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 168 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 171 Index indices[] = {index, index + PacketSize - 1}; 195 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { 200 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 202 values[PacketSize-1] = m_impl.coeff(inputIndices[1]); 203 for (int i = 1; i < PacketSize-1; ++i) { 222 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); 282 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; [all …]
|
D | TensorReverse.h | 110 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 198 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 199 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 204 values[PacketSize]; 205 for (int i = 0; i < PacketSize; ++i) { 222 TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); 261 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 272 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 273 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 276 EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; [all …]
|
D | TensorChipping.h | 139 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 205 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 206 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 213 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 214 for (int i = 0; i < PacketSize; ++i) { 228 if (rem + PacketSize <= m_stride) { 233 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 234 for (int i = 0; i < PacketSize; ++i) { 263 TensorOpCost(0, 0, cost, vectorized, PacketSize); 324 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; [all …]
|
D | TensorShuffling.h | 109 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 169 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 170 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 172 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 173 for (int i = 0; i < PacketSize; ++i) { 185 TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); 231 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 251 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 253 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 255 for (int i = 0; i < PacketSize; ++i) {
|
D | TensorBroadcasting.h | 106 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 250 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 251 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 286 if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) { 289 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 291 for (int i = 1; i < PacketSize; ++i) { 302 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 303 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 338 if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) { 341 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; [all …]
|
D | TensorPatch.h | 90 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 187 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 188 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 191 Index indices[2] = {index, index + PacketSize - 1}; 232 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { 237 EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; 239 values[PacketSize-1] = m_impl.coeff(inputIndices[1]); 240 for (int i = 1; i < PacketSize-1; ++i) { 253 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
|
D | TensorPadding.h | 93 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 260 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 261 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 267 const Index last = index + PacketSize - 1; 292 const Index last = index + PacketSize - 1; 317 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 318 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 325 const Index last = index + PacketSize - 1; 350 const Index last = index + PacketSize - 1; 375 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; [all …]
|
D | TensorInflation.h | 87 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 192 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) 193 eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); 195 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; 196 for (int i = 0; i < PacketSize; ++i) { 213 compute_cost, vectorized, PacketSize);
|
D | TensorEvaluator.h | 249 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 314 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 341 TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); 387 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 421 TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); 482 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 521 TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); 572 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; 600 internal::Selector<PacketSize> select; 601 for (Index i = 0; i < PacketSize; ++i) {
|
/external/eigen/bench/ |
D | benchVecAdd.cpp | 94 const int PacketSize = internal::packet_traits<Scalar>::size; in benchVec() local 97 for (int i=0; i<size; i+=PacketSize*8) in benchVec() 128 …internal::pstore(&a[i+2*PacketSize], internal::padd(internal::ploadu(&a[i+2*PacketSize]), internal… in benchVec() 129 …internal::pstore(&a[i+3*PacketSize], internal::padd(internal::ploadu(&a[i+3*PacketSize]), internal… in benchVec() 130 …internal::pstore(&a[i+4*PacketSize], internal::padd(internal::ploadu(&a[i+4*PacketSize]), internal… in benchVec() 131 …internal::pstore(&a[i+5*PacketSize], internal::padd(internal::ploadu(&a[i+5*PacketSize]), internal… in benchVec() 132 …internal::pstore(&a[i+6*PacketSize], internal::padd(internal::ploadu(&a[i+6*PacketSize]), internal… in benchVec() 133 …internal::pstore(&a[i+7*PacketSize], internal::padd(internal::ploadu(&a[i+7*PacketSize]), internal… in benchVec()
|
/external/eigen/Eigen/src/SparseLU/ |
D | SparseLU_gemm_kernel.h | 33 PacketSize = packet_traits<Scalar>::size, in sparselu_gemm() enumerator 38 SM = PM*PacketSize // step along M in sparselu_gemm() 44 …eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_defau… in sparselu_gemm() 62 …Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for ve… in sparselu_gemm() 109 c0 = pload<Packet>(C0+i+(I)*PacketSize); \ in sparselu_gemm() 110 c1 = pload<Packet>(C1+i+(I)*PacketSize); \ in sparselu_gemm() 113 a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \ in sparselu_gemm() 116 a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \ in sparselu_gemm() 119 if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\ in sparselu_gemm() 122 if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ in sparselu_gemm() [all …]
|
D | SparseLU_kernel_bmod.h | 65 const Index PacketSize = internal::packet_traits<Scalar>::size; in run() local 66 Index ldl = internal::first_multiple(nrow, PacketSize); in run() 68 Index aligned_offset = internal::first_default_aligned(tempv.data()+segsize, PacketSize); in run() 69 …Index aligned_with_B_offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize))%P… in run()
|
D | SparseLU_panel_bmod.h | 69 const Index PacketSize = internal::packet_traits<Scalar>::size; in panel_bmod() local 104 Index ldu = internal::first_multiple<Index>(u_rows, PacketSize); in panel_bmod() 147 Index ldl = internal::first_multiple<Index>(nrow, PacketSize); in panel_bmod() 148 … Index offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize)) % PacketSize; in panel_bmod()
|
/external/eigen/Eigen/src/Jacobi/ |
D | Jacobi.h | 306 PacketSize = packet_traits<Scalar>::size, in apply_rotation_in_the_plane() enumerator 328 (PacketSize == OtherPacketSize) && in apply_rotation_in_the_plane() 329 ((incrx==1 && incry==1) || PacketSize == 1)) in apply_rotation_in_the_plane() 335 Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; in apply_rotation_in_the_plane() 355 for(Index i=alignedStart; i<alignedEnd; i+=PacketSize) in apply_rotation_in_the_plane() 361 px += PacketSize; in apply_rotation_in_the_plane() 362 py += PacketSize; in apply_rotation_in_the_plane() 367 … Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); in apply_rotation_in_the_plane() 368 for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize) in apply_rotation_in_the_plane() 371 Packet xi1 = ploadu<Packet>(px+PacketSize); in apply_rotation_in_the_plane() [all …]
|
/external/eigen/Eigen/src/Core/products/ |
D | GeneralBlockPanelKernel.h | 1700 enum { PacketSize = packet_traits<Scalar>::size }; 1706 eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); 1710 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; 1711 …const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*Pa… 1712 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; 1713 const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1 1719 if(Pack1>=3*PacketSize) 1721 for(; i<peeled_mc3; i+=3*PacketSize) 1723 if(PanelMode) count += (3*PacketSize) * offset; 1728 A = lhs.loadPacket(i+0*PacketSize, k); [all …]
|
D | SelfadjointMatrixMatrix.h | 48 enum { PacketSize = packet_traits<Scalar>::size }; in operator() enumerator 53 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; in operator() 54 …const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*Pa… in operator() 55 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; in operator() 57 if(Pack1>=3*PacketSize) in operator() 58 for(Index i=0; i<peeled_mc3; i+=3*PacketSize) in operator() 59 pack<3*PacketSize>(blockA, lhs, cols, i, count); in operator() 61 if(Pack1>=2*PacketSize) in operator() 62 for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize) in operator() 63 pack<2*PacketSize>(blockA, lhs, cols, i, count); in operator() [all …]
|
D | SelfadjointMatrixVector.h | 48 const Index PacketSize = sizeof(Packet)/sizeof(Scalar); in run() local 89 Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); in run() 116 for (Index i=alignedStart; i<alignedEnd; i+=PacketSize) in run() 118 Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize; in run() 119 Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize; in run() 120 … Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases in run() 126 pstore(resIt,Xi); resIt += PacketSize; in run()
|
/external/eigen/Eigen/src/Core/ |
D | Redux.h | 32 PacketSize = unpacket_traits<PacketType>::size, enumerator 42 MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize 56 …lingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) 72 EIGEN_DEBUG_VAR(PacketSize) in debug() 141 PacketSize = redux_traits<Func, Derived>::PacketSize, 160 index = Start * redux_traits<Func, Derived>::PacketSize, 219 const Index packetSize = redux_traits<Func, Derived>::PacketSize; 280 packetSize = redux_traits<Func, Derived>::PacketSize 313 PacketSize = redux_traits<Func, Derived>::PacketSize, 315 VectorizedSize = (Size / PacketSize) * PacketSize [all …]
|
D | Reverse.h | 75 PacketSize = internal::packet_traits<Scalar>::size, 79 OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, 80 OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1,
|