/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorCostModel.h | 25 class TensorOpCost { 55 TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {} in TensorOpCost() function 57 TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles) in TensorOpCost() function 63 TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles, in TensorOpCost() function 97 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin( in cwiseMin() 98 const TensorOpCost& rhs) const { in cwiseMin() 102 return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); in cwiseMin() 106 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax( in cwiseMax() 107 const TensorOpCost& rhs) const { in cwiseMax() 111 return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); in cwiseMax() [all …]
|
D | TensorBroadcasting.h | 351 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 353 double compute_cost = TensorOpCost::AddCost<Index>(); 356 compute_cost += TensorOpCost::DivCost<Index>(); 359 TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); 362 compute_cost += TensorOpCost::MulCost<Index>() + 363 TensorOpCost::ModCost<Index>() + 364 TensorOpCost::AddCost<Index>(); 368 TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); 372 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
|
D | TensorPadding.h | 189 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 190 TensorOpCost cost = m_impl.costPerCoeff(vectorized); 238 void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const { 246 cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() + 247 reduction * (1 * TensorOpCost::AddCost<Index>())); 249 cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() + 250 2 * TensorOpCost::MulCost<Index>() + 251 reduction * (2 * TensorOpCost::MulCost<Index>() + 252 1 * TensorOpCost::DivCost<Index>()));
|
D | TensorInflation.h | 203 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 204 const double compute_cost = NumDims * (3 * TensorOpCost::DivCost<Index>() + 205 3 * TensorOpCost::MulCost<Index>() + 206 2 * TensorOpCost::AddCost<Index>()); 210 return TensorOpCost(); 212 TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0,
|
D | TensorEvaluator.h | 106 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { in costPerCoeff() 107 return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, in costPerCoeff() 205 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 206 return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, 268 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 270 return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, 338 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 341 TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); 416 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 421 TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); [all …]
|
D | TensorReverse.h | 212 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 213 double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() + 214 2 * TensorOpCost::MulCost<Index>() + 215 TensorOpCost::DivCost<Index>()); 218 compute_cost += 2 * TensorOpCost::AddCost<Index>(); 222 TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
|
D | TensorArgMax.h | 115 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 117 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1); 257 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 260 … (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>())); 262 m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost);
|
D | TensorChipping.h | 244 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 251 cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); 256 cost += TensorOpCost::AddCost<Index>(); 258 cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() + 259 3 * TensorOpCost::AddCost<Index>(); 263 TensorOpCost(0, 0, cost, vectorized, PacketSize);
|
D | TensorGenerator.h | 148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 152 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() + 153 TensorOpCost::MulCost<Scalar>());
|
D | TensorShuffling.h | 180 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 181 const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() + 182 2 * TensorOpCost::MulCost<Index>() + 183 TensorOpCost::DivCost<Index>()); 185 TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
|
D | TensorAssign.h | 153 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 158 TensorOpCost left = m_leftImpl.costPerCoeff(vectorized); 160 TensorOpCost( 163 TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
|
D | TensorPatch.h | 248 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 249 const double compute_cost = NumDims * (TensorOpCost::DivCost<Index>() + 250 TensorOpCost::MulCost<Index>() + 251 2 * TensorOpCost::AddCost<Index>()); 253 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
|
D | TensorStriding.h | 211 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 212 double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() + 213 TensorOpCost::MulCost<Index>() + 214 TensorOpCost::DivCost<Index>()) + 215 TensorOpCost::MulCost<Index>(); 222 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
|
D | TensorConvolution.h | 451 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 456 TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>(); 459 (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() + 460 TensorOpCost::DivCost<Index>()); 461 return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + 464 TensorOpCost(0, 0, convolve_compute_cost, vectorized, 1065 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 1072 TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>(); 1075 (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() + 1076 TensorOpCost::DivCost<Index>()); [all …]
|
D | TensorConcatenation.h | 263 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 265 const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() + 266 2 * TensorOpCost::MulCost<Index>() + 267 TensorOpCost::DivCost<Index>() + 268 TensorOpCost::ModCost<Index>()); 275 TensorOpCost(0, 0, compute_cost);
|
D | TensorCustomOp.h | 136 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 138 return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); 291 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { 293 return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
|
D | TensorConversion.h | 232 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 234 const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>(); 241 TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize)); 243 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost);
|
/external/tensorflow/tensorflow/core/kernels/ |
D | parameterized_truncated_normal_op.cc | 203 (Eigen::TensorOpCost::AddCost<T>() + in operator ()() 204 Eigen::TensorOpCost::MulCost<T>()) * in operator ()() 207 + Eigen::TensorOpCost::AddCost<T>() + in operator ()() 208 Eigen::TensorOpCost::MulCost<T>() + in operator ()() 212 + Eigen::TensorOpCost::MulCost<T>() * 4 + in operator ()() 215 + Eigen::TensorOpCost::AddCost<T>(); in operator ()() 221 uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() + in operator ()() 222 Eigen::TensorOpCost::AddCost<T>() + in operator ()() 223 Eigen::TensorOpCost::MulCost<T>() * 2 + in operator ()() 224 Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost + in operator ()() [all …]
|
D | matrix_triangular_solve_op.cc | 74 (Eigen::TensorOpCost::AddCost<Scalar>() + in GetCostPerUnit() 75 Eigen::TensorOpCost::MulCost<Scalar>()); in GetCostPerUnit() 166 (Eigen::TensorOpCost::AddCost<Scalar>() + in GetCostPerUnit() 167 Eigen::TensorOpCost::MulCost<Scalar>()); in GetCostPerUnit()
|
D | transpose_functor_cpu.cc | 60 (conjugate ? 1 : 0) + ndims * (Eigen::TensorOpCost::DivCost<int64>() + in TransposeSimple() 61 2 * Eigen::TensorOpCost::MulCost<int64>() + in TransposeSimple() 62 2 * Eigen::TensorOpCost::AddCost<int64>()); in TransposeSimple() 63 Eigen::TensorOpCost cost(/*bytes_loaded=*/sizeof(T), in TransposeSimple()
|
D | dense_update_functor.cc | 43 Eigen::TensorOpCost(.1, // chosen to force large chunks in operator ()() 65 Eigen::TensorOpCost(estimated_string_size, estimated_string_size, 0), in operator ()()
|
D | mirror_pad_op.h | 235 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost 240 const double compute_cost = Dims * (7 * TensorOpCost::AddCost<Index>() + 241 2 * TensorOpCost::MulCost<Index>() + 242 TensorOpCost::DivCost<Index>()); 244 TensorOpCost(1, 0, compute_cost, vectorized, kPacketSize);
|
D | population_count_op.cc | 111 const double total_cost = (Eigen::TensorOpCost::CastCost<T, uint8>() + in operator ()() 112 Eigen::TensorOpCost::CastCost<int64, uint8>()); in operator ()()
|
D | topk_op.cc | 210 const double cmp_cost = 3 * Eigen::TensorOpCost::AddCost<int32>() + in Compute() 211 Eigen::TensorOpCost::AddCost<T>(); in Compute() 217 const double copy_cost = 2 * k * Eigen::TensorOpCost::AddCost<T>(); in Compute()
|
/external/tensorflow/tensorflow/contrib/seq2seq/kernels/ |
D | beam_search_ops.cc | 157 Eigen::TensorOpCost::DivCost<int32>() + in operator ()() 158 6 * Eigen::TensorOpCost::AddCost<int32>() + in operator ()() 159 2 * max_time * (5 * Eigen::TensorOpCost::AddCost<int32>()); in operator ()()
|