1 // This file is part of Eigen, a lightweight C++ template library 2 // for linear algebra. 3 // 4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> 5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de> 6 // 7 // This Source Code Form is subject to the terms of the Mozilla 8 // Public License v. 2.0. If a copy of the MPL was not distributed 9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 10 11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H 12 #define EIGEN_CXX11_TENSOR_TENSOR_H 13 14 namespace Eigen { 15 16 /** \class Tensor 17 * \ingroup CXX11_Tensor_Module 18 * 19 * \brief The tensor class. 20 * 21 * The %Tensor class is the work-horse for all \em dense tensors within Eigen. 22 * 23 * The %Tensor class encompasses only dynamic-size objects so far. 24 * 25 * The first two template parameters are required: 26 * \tparam Scalar_ \anchor tensor_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>. 27 * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). 28 * \tparam NumIndices_ Number of indices (i.e. rank of the tensor) 29 * 30 * The remaining template parameters are optional -- in most cases you don't have to worry about them. 31 * \tparam Options_ \anchor tensor_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either 32 * \b #AutoAlign or \b #DontAlign. 33 * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required 34 * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization. 35 * Support for such operations (i.e. adding two tensors etc.) is planned. 36 * 37 * You can access elements of tensors using normal subscripting: 38 * 39 * \code 40 * Eigen::Tensor<double, 4> t(10, 10, 10, 10); 41 * t(0, 1, 2, 3) = 42.0; 42 * \endcode 43 * 44 * This class can be extended with the help of the plugin mechanism described on the page 45 * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN. 46 * 47 * <i><b>Some notes:</b></i> 48 * 49 * <dl> 50 * <dt><b>Relation to other parts of Eigen:</b></dt> 51 * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that 52 * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code 53 * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor 54 * class does not provide any of these features and is only available as a stand-alone class that just allows for 55 * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to 56 * change dramatically.</dd> 57 * </dl> 58 * 59 * \ref TopicStorageOrders 60 */ 61 62 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_> 63 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > 64 { 65 public: 66 typedef Tensor<Scalar_, NumIndices_, Options_, IndexType_> Self; 67 typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > Base; 68 typedef typename Eigen::internal::nested<Self>::type Nested; 69 typedef typename internal::traits<Self>::StorageKind StorageKind; 70 typedef typename internal::traits<Self>::Index Index; 71 typedef Scalar_ Scalar; 72 typedef typename NumTraits<Scalar>::Real RealScalar; 73 typedef typename Base::CoeffReturnType CoeffReturnType; 74 75 enum { 76 IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign), 77 Layout = Options_ & RowMajor ? RowMajor : ColMajor, 78 CoordAccess = true, 79 RawAccess = true 80 }; 81 82 static const int Options = Options_; 83 static const int NumIndices = NumIndices_; 84 typedef DSizes<Index, NumIndices_> Dimensions; 85 86 protected: 87 TensorStorage<Scalar, Dimensions, Options> m_storage; 88 89 #ifdef EIGEN_HAS_SFINAE 90 template<typename CustomIndices> 91 struct isOfNormalIndex{ 92 static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value; 93 static const bool is_int = NumTraits<CustomIndices>::IsInteger; 94 static const bool value = is_array | is_int; 95 }; 96 #endif 97 98 public: 99 // Metadata rank()100 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } dimension(std::size_t n)101 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } dimensions()102 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } size()103 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } data()104 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } data()105 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } 106 107 // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED 108 // work, because that uses base().coeffRef() - and we don't yet 109 // implement a similar class hierarchy base()110 inline Self& base() { return *this; } base()111 inline const Self& base() const { return *this; } 112 113 #if EIGEN_HAS_VARIADIC_TEMPLATES 114 template<typename... IndexTypes> coeff(Index firstIndex,Index secondIndex,IndexTypes...otherIndices)115 EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const 116 { 117 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 118 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 119 return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); 120 } 121 #endif 122 123 // normal indices coeff(const array<Index,NumIndices> & indices)124 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const 125 { 126 eigen_internal_assert(checkIndexRange(indices)); 127 return m_storage.data()[linearizedIndex(indices)]; 128 } 129 130 // custom indices 131 #ifdef EIGEN_HAS_SFINAE 132 template<typename CustomIndices, 133 EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) 134 > coeff(CustomIndices & indices)135 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const 136 { 137 return coeff(internal::customIndices2Array<Index,NumIndices>(indices)); 138 } 139 #endif 140 coeff()141 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const 142 { 143 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 144 return m_storage.data()[0]; 145 } 146 coeff(Index index)147 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const 148 { 149 eigen_internal_assert(index >= 0 && index < size()); 150 return m_storage.data()[index]; 151 } 152 153 #if EIGEN_HAS_VARIADIC_TEMPLATES 154 template<typename... IndexTypes> coeffRef(Index firstIndex,Index secondIndex,IndexTypes...otherIndices)155 inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) 156 { 157 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 158 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 159 return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); 160 } 161 #endif 162 163 // normal indices coeffRef(const array<Index,NumIndices> & indices)164 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices) 165 { 166 eigen_internal_assert(checkIndexRange(indices)); 167 return m_storage.data()[linearizedIndex(indices)]; 168 } 169 170 // custom indices 171 #ifdef EIGEN_HAS_SFINAE 172 template<typename CustomIndices, 173 EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) 174 > coeffRef(CustomIndices & indices)175 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices) 176 { 177 return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices)); 178 } 179 #endif 180 coeffRef()181 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef() 182 { 183 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 184 return m_storage.data()[0]; 185 } 186 coeffRef(Index index)187 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) 188 { 189 eigen_internal_assert(index >= 0 && index < size()); 190 return m_storage.data()[index]; 191 } 192 193 #if EIGEN_HAS_VARIADIC_TEMPLATES 194 template<typename... IndexTypes> operator()195 inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const 196 { 197 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 198 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 199 return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); 200 } 201 #else 202 EIGEN_DEVICE_FUNC operator()203 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const 204 { 205 return coeff(array<Index, 2>(i0, i1)); 206 } 207 EIGEN_DEVICE_FUNC operator()208 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const 209 { 210 return coeff(array<Index, 3>(i0, i1, i2)); 211 } 212 EIGEN_DEVICE_FUNC operator()213 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const 214 { 215 return coeff(array<Index, 4>(i0, i1, i2, i3)); 216 } 217 EIGEN_DEVICE_FUNC operator()218 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const 219 { 220 return coeff(array<Index, 5>(i0, i1, i2, i3, i4)); 221 } 222 #endif 223 224 // custom indices 225 #ifdef EIGEN_HAS_SFINAE 226 template<typename CustomIndices, 227 EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) 228 > operator()229 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const 230 { 231 return coeff(internal::customIndices2Array<Index,NumIndices>(indices)); 232 } 233 #endif 234 235 // normal indices operator()236 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const 237 { 238 return coeff(indices); 239 } 240 operator()241 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const 242 { 243 eigen_internal_assert(index >= 0 && index < size()); 244 return coeff(index); 245 } 246 operator()247 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const 248 { 249 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 250 return coeff(); 251 } 252 253 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const 254 { 255 // The bracket operator is only for vectors, use the parenthesis operator instead. 256 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); 257 return coeff(index); 258 } 259 260 #if EIGEN_HAS_VARIADIC_TEMPLATES 261 template<typename... IndexTypes> operator()262 inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) 263 { 264 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. 265 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 266 return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}}); 267 } 268 #else 269 EIGEN_DEVICE_FUNC operator()270 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) 271 { 272 return coeffRef(array<Index, 2>(i0, i1)); 273 } 274 EIGEN_DEVICE_FUNC operator()275 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) 276 { 277 return coeffRef(array<Index, 3>(i0, i1, i2)); 278 } 279 EIGEN_DEVICE_FUNC operator()280 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) 281 { 282 return coeffRef(array<Index, 4>(i0, i1, i2, i3)); 283 } 284 EIGEN_DEVICE_FUNC operator()285 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) 286 { 287 return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4)); 288 } 289 #endif 290 291 // normal indices operator()292 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices) 293 { 294 return coeffRef(indices); 295 } 296 297 // custom indices 298 #ifdef EIGEN_HAS_SFINAE 299 template<typename CustomIndices, 300 EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) ) 301 > operator()302 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices) 303 { 304 return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices)); 305 } 306 #endif 307 operator()308 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) 309 { 310 eigen_assert(index >= 0 && index < size()); 311 return coeffRef(index); 312 } 313 operator()314 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()() 315 { 316 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 317 return coeffRef(); 318 } 319 320 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) 321 { 322 // The bracket operator is only for vectors, use the parenthesis operator instead 323 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) 324 return coeffRef(index); 325 } 326 327 EIGEN_DEVICE_FUNC Tensor()328 EIGEN_STRONG_INLINE Tensor() 329 : m_storage() 330 { 331 } 332 333 EIGEN_DEVICE_FUNC Tensor(const Self & other)334 EIGEN_STRONG_INLINE Tensor(const Self& other) 335 : m_storage(other.m_storage) 336 { 337 } 338 339 #if EIGEN_HAS_VARIADIC_TEMPLATES 340 template<typename... IndexTypes> Tensor(Index firstDimension,IndexTypes...otherDimensions)341 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions) 342 : m_storage(firstDimension, otherDimensions...) 343 { 344 // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. 345 EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 346 } 347 #else Tensor(Index dim1)348 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1) 349 : m_storage(dim1, array<Index, 1>(dim1)) 350 { 351 EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 352 } Tensor(Index dim1,Index dim2)353 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2) 354 : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2)) 355 { 356 EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 357 } Tensor(Index dim1,Index dim2,Index dim3)358 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) 359 : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3)) 360 { 361 EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 362 } Tensor(Index dim1,Index dim2,Index dim3,Index dim4)363 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) 364 : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4)) 365 { 366 EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 367 } Tensor(Index dim1,Index dim2,Index dim3,Index dim4,Index dim5)368 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) 369 : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5)) 370 { 371 EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 372 } 373 #endif 374 375 /** Normal Dimension */ Tensor(const array<Index,NumIndices> & dimensions)376 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions) 377 : m_storage(internal::array_prod(dimensions), dimensions) 378 { 379 EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED 380 } 381 382 template<typename OtherDerived> 383 EIGEN_DEVICE_FUNC Tensor(const TensorBase<OtherDerived,ReadOnlyAccessors> & other)384 EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other) 385 { 386 typedef TensorAssignOp<Tensor, const OtherDerived> Assign; 387 Assign assign(*this, other.derived()); 388 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 389 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 390 } 391 template<typename OtherDerived> 392 EIGEN_DEVICE_FUNC Tensor(const TensorBase<OtherDerived,WriteAccessors> & other)393 EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other) 394 { 395 typedef TensorAssignOp<Tensor, const OtherDerived> Assign; 396 Assign assign(*this, other.derived()); 397 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 398 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 399 } 400 401 EIGEN_DEVICE_FUNC 402 EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other) 403 { 404 typedef TensorAssignOp<Tensor, const Tensor> Assign; 405 Assign assign(*this, other); 406 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 407 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 408 return *this; 409 } 410 template<typename OtherDerived> 411 EIGEN_DEVICE_FUNC 412 EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) 413 { 414 typedef TensorAssignOp<Tensor, const OtherDerived> Assign; 415 Assign assign(*this, other); 416 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 417 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); 418 return *this; 419 } 420 421 #if EIGEN_HAS_VARIADIC_TEMPLATES 422 template<typename... IndexTypes> EIGEN_DEVICE_FUNC resize(Index firstDimension,IndexTypes...otherDimensions)423 void resize(Index firstDimension, IndexTypes... otherDimensions) 424 { 425 // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. 426 EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) 427 resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}}); 428 } 429 #endif 430 431 /** Normal Dimension */ resize(const array<Index,NumIndices> & dimensions)432 EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions) 433 { 434 int i; 435 Index size = Index(1); 436 for (i = 0; i < NumIndices; i++) { 437 internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]); 438 size *= dimensions[i]; 439 } 440 #ifdef EIGEN_INITIALIZE_COEFFS 441 bool size_changed = size != this->size(); 442 m_storage.resize(size, dimensions); 443 if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED 444 #else 445 m_storage.resize(size, dimensions); 446 #endif 447 } 448 449 // Why this overload, DSizes is derived from array ??? // resize(const DSizes<Index,NumIndices> & dimensions)450 EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) { 451 array<Index, NumIndices> dims; 452 for (int i = 0; i < NumIndices; ++i) { 453 dims[i] = dimensions[i]; 454 } 455 resize(dims); 456 } 457 458 EIGEN_DEVICE_FUNC resize()459 void resize() 460 { 461 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); 462 // Nothing to do: rank 0 tensors have fixed size 463 } 464 465 /** Custom Dimension */ 466 #ifdef EIGEN_HAS_SFINAE 467 template<typename CustomDimension, 468 EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) ) 469 > resize(CustomDimension & dimensions)470 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions) 471 { 472 resize(internal::customIndices2Array<Index,NumIndices>(dimensions)); 473 } 474 #endif 475 476 #ifndef EIGEN_EMULATE_CXX11_META_H 477 template <typename std::ptrdiff_t... Indices> 478 EIGEN_DEVICE_FUNC resize(const Sizes<Indices...> & dimensions)479 void resize(const Sizes<Indices...>& dimensions) { 480 array<Index, NumIndices> dims; 481 for (int i = 0; i < NumIndices; ++i) { 482 dims[i] = static_cast<Index>(dimensions[i]); 483 } 484 resize(dims); 485 } 486 #else 487 template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> 488 EIGEN_DEVICE_FUNC resize(const Sizes<V1,V2,V3,V4,V5> & dimensions)489 void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) { 490 array<Index, NumIndices> dims; 491 for (int i = 0; i < NumIndices; ++i) { 492 dims[i] = static_cast<Index>(dimensions[i]); 493 } 494 resize(dims); 495 } 496 #endif 497 498 protected: 499 checkIndexRange(const array<Index,NumIndices> & indices)500 bool checkIndexRange(const array<Index, NumIndices>& indices) const 501 { 502 using internal::array_apply_and_reduce; 503 using internal::array_zip_and_reduce; 504 using internal::greater_equal_zero_op; 505 using internal::logical_and_op; 506 using internal::lesser_op; 507 508 return 509 // check whether the indices are all >= 0 510 array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) && 511 // check whether the indices fit in the dimensions 512 array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions()); 513 } 514 linearizedIndex(const array<Index,NumIndices> & indices)515 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const 516 { 517 if (Options&RowMajor) { 518 return m_storage.dimensions().IndexOfRowMajor(indices); 519 } else { 520 return m_storage.dimensions().IndexOfColMajor(indices); 521 } 522 } 523 }; 524 525 } // end namespace Eigen 526 527 #endif // EIGEN_CXX11_TENSOR_TENSOR_H 528