• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEVECTOR_H
11 #define EIGEN_SPARSEVECTOR_H
12 
13 namespace Eigen {
14 
15 /** \ingroup SparseCore_Module
16   * \class SparseVector
17   *
18   * \brief a sparse vector class
19   *
20   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
21   *
22   * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
23   *
24   * This class can be extended with the help of the plugin mechanism described on the page
25   * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
26   */
27 
28 namespace internal {
29 template<typename _Scalar, int _Options, typename _StorageIndex>
30 struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
31 {
32   typedef _Scalar Scalar;
33   typedef _StorageIndex StorageIndex;
34   typedef Sparse StorageKind;
35   typedef MatrixXpr XprKind;
36   enum {
37     IsColVector = (_Options & RowMajorBit) ? 0 : 1,
38 
39     RowsAtCompileTime = IsColVector ? Dynamic : 1,
40     ColsAtCompileTime = IsColVector ? 1 : Dynamic,
41     MaxRowsAtCompileTime = RowsAtCompileTime,
42     MaxColsAtCompileTime = ColsAtCompileTime,
43     Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
44     SupportedAccessPatterns = InnerRandomAccessPattern
45   };
46 };
47 
48 // Sparse-Vector-Assignment kinds:
49 enum {
50   SVA_RuntimeSwitch,
51   SVA_Inner,
52   SVA_Outer
53 };
54 
55 template< typename Dest, typename Src,
56           int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
57                              : Src::InnerSizeAtCompileTime==1 ? SVA_Outer
58                              : SVA_Inner>
59 struct sparse_vector_assign_selector;
60 
61 }
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex>
64 class SparseVector
65   : public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
66 {
67     typedef SparseCompressedBase<SparseVector> Base;
68     using Base::convert_index;
69   public:
70     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
71     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
72     EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
73 
74     typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
75     enum { IsColVector = internal::traits<SparseVector>::IsColVector };
76 
77     enum {
78       Options = _Options
79     };
80 
81     EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
82     EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
83     EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
84     EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
85 
86     EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
87     EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
88 
89     EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
90     EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
91 
92     inline const StorageIndex* outerIndexPtr() const { return 0; }
93     inline StorageIndex* outerIndexPtr() { return 0; }
94     inline const StorageIndex* innerNonZeroPtr() const { return 0; }
95     inline StorageIndex* innerNonZeroPtr() { return 0; }
96 
97     /** \internal */
98     inline Storage& data() { return m_data; }
99     /** \internal */
100     inline const Storage& data() const { return m_data; }
101 
102     inline Scalar coeff(Index row, Index col) const
103     {
104       eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
105       return coeff(IsColVector ? row : col);
106     }
107     inline Scalar coeff(Index i) const
108     {
109       eigen_assert(i>=0 && i<m_size);
110       return m_data.at(StorageIndex(i));
111     }
112 
113     inline Scalar& coeffRef(Index row, Index col)
114     {
115       eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
116       return coeffRef(IsColVector ? row : col);
117     }
118 
119     /** \returns a reference to the coefficient value at given index \a i
120       * This operation involes a log(rho*size) binary search. If the coefficient does not
121       * exist yet, then a sorted insertion into a sequential buffer is performed.
122       *
123       * This insertion might be very costly if the number of nonzeros above \a i is large.
124       */
125     inline Scalar& coeffRef(Index i)
126     {
127       eigen_assert(i>=0 && i<m_size);
128 
129       return m_data.atWithInsertion(StorageIndex(i));
130     }
131 
132   public:
133 
134     typedef typename Base::InnerIterator InnerIterator;
135     typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
136 
137     inline void setZero() { m_data.clear(); }
138 
139     /** \returns the number of non zero coefficients */
140     inline Index nonZeros() const  { return m_data.size(); }
141 
142     inline void startVec(Index outer)
143     {
144       EIGEN_UNUSED_VARIABLE(outer);
145       eigen_assert(outer==0);
146     }
147 
148     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
149     {
150       EIGEN_UNUSED_VARIABLE(outer);
151       eigen_assert(outer==0);
152       return insertBack(inner);
153     }
154     inline Scalar& insertBack(Index i)
155     {
156       m_data.append(0, i);
157       return m_data.value(m_data.size()-1);
158     }
159 
160     Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
161     {
162       EIGEN_UNUSED_VARIABLE(outer);
163       eigen_assert(outer==0);
164       return insertBackUnordered(inner);
165     }
166     inline Scalar& insertBackUnordered(Index i)
167     {
168       m_data.append(0, i);
169       return m_data.value(m_data.size()-1);
170     }
171 
172     inline Scalar& insert(Index row, Index col)
173     {
174       eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
175 
176       Index inner = IsColVector ? row : col;
177       Index outer = IsColVector ? col : row;
178       EIGEN_ONLY_USED_FOR_DEBUG(outer);
179       eigen_assert(outer==0);
180       return insert(inner);
181     }
182     Scalar& insert(Index i)
183     {
184       eigen_assert(i>=0 && i<m_size);
185 
186       Index startId = 0;
187       Index p = Index(m_data.size()) - 1;
188       // TODO smart realloc
189       m_data.resize(p+2,1);
190 
191       while ( (p >= startId) && (m_data.index(p) > i) )
192       {
193         m_data.index(p+1) = m_data.index(p);
194         m_data.value(p+1) = m_data.value(p);
195         --p;
196       }
197       m_data.index(p+1) = convert_index(i);
198       m_data.value(p+1) = 0;
199       return m_data.value(p+1);
200     }
201 
202     /**
203       */
204     inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
205 
206 
207     inline void finalize() {}
208 
209     /** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
210     void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
211     {
212       m_data.prune(reference,epsilon);
213     }
214 
215     /** Resizes the sparse vector to \a rows x \a cols
216       *
217       * This method is provided for compatibility with matrices.
218       * For a column vector, \a cols must be equal to 1.
219       * For a row vector, \a rows must be equal to 1.
220       *
221       * \sa resize(Index)
222       */
223     void resize(Index rows, Index cols)
224     {
225       eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
226       resize(IsColVector ? rows : cols);
227     }
228 
229     /** Resizes the sparse vector to \a newSize
230       * This method deletes all entries, thus leaving an empty sparse vector
231       *
232       * \sa  conservativeResize(), setZero() */
233     void resize(Index newSize)
234     {
235       m_size = newSize;
236       m_data.clear();
237     }
238 
239     /** Resizes the sparse vector to \a newSize, while leaving old values untouched.
240       *
241       * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
242       * Call .data().squeeze() to free extra memory.
243       *
244       * \sa reserve(), setZero()
245       */
246     void conservativeResize(Index newSize)
247     {
248       if (newSize < m_size)
249       {
250         Index i = 0;
251         while (i<m_data.size() && m_data.index(i)<newSize) ++i;
252         m_data.resize(i);
253       }
254       m_size = newSize;
255     }
256 
257     void resizeNonZeros(Index size) { m_data.resize(size); }
258 
259     inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
260 
261     explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
262 
263     inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
264 
265     template<typename OtherDerived>
266     inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
267       : m_size(0)
268     {
269       #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
270         EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
271       #endif
272       check_template_parameters();
273       *this = other.derived();
274     }
275 
276     inline SparseVector(const SparseVector& other)
277       : Base(other), m_size(0)
278     {
279       check_template_parameters();
280       *this = other.derived();
281     }
282 
283     /** Swaps the values of \c *this and \a other.
284       * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
285       * \sa SparseMatrixBase::swap()
286       */
287     inline void swap(SparseVector& other)
288     {
289       std::swap(m_size, other.m_size);
290       m_data.swap(other.m_data);
291     }
292 
293     template<int OtherOptions>
294     inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
295     {
296       eigen_assert(other.outerSize()==1);
297       std::swap(m_size, other.m_innerSize);
298       m_data.swap(other.m_data);
299     }
300 
301     inline SparseVector& operator=(const SparseVector& other)
302     {
303       if (other.isRValue())
304       {
305         swap(other.const_cast_derived());
306       }
307       else
308       {
309         resize(other.size());
310         m_data = other.m_data;
311       }
312       return *this;
313     }
314 
315     template<typename OtherDerived>
316     inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
317     {
318       SparseVector tmp(other.size());
319       internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
320       this->swap(tmp);
321       return *this;
322     }
323 
324     #ifndef EIGEN_PARSED_BY_DOXYGEN
325     template<typename Lhs, typename Rhs>
326     inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
327     {
328       return Base::operator=(product);
329     }
330     #endif
331 
332     friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
333     {
334       for (Index i=0; i<m.nonZeros(); ++i)
335         s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
336       s << std::endl;
337       return s;
338     }
339 
340     /** Destructor */
341     inline ~SparseVector() {}
342 
343     /** Overloaded for performance */
344     Scalar sum() const;
345 
346   public:
347 
348     /** \internal \deprecated use setZero() and reserve() */
349     EIGEN_DEPRECATED void startFill(Index reserve)
350     {
351       setZero();
352       m_data.reserve(reserve);
353     }
354 
355     /** \internal \deprecated use insertBack(Index,Index) */
356     EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
357     {
358       eigen_assert(r==0 || c==0);
359       return fill(IsColVector ? r : c);
360     }
361 
362     /** \internal \deprecated use insertBack(Index) */
363     EIGEN_DEPRECATED Scalar& fill(Index i)
364     {
365       m_data.append(0, i);
366       return m_data.value(m_data.size()-1);
367     }
368 
369     /** \internal \deprecated use insert(Index,Index) */
370     EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
371     {
372       eigen_assert(r==0 || c==0);
373       return fillrand(IsColVector ? r : c);
374     }
375 
376     /** \internal \deprecated use insert(Index) */
377     EIGEN_DEPRECATED Scalar& fillrand(Index i)
378     {
379       return insert(i);
380     }
381 
382     /** \internal \deprecated use finalize() */
383     EIGEN_DEPRECATED void endFill() {}
384 
385     // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
386     /** \internal \deprecated use data() */
387     EIGEN_DEPRECATED Storage& _data() { return m_data; }
388     /** \internal \deprecated use data() */
389     EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
390 
391 #   ifdef EIGEN_SPARSEVECTOR_PLUGIN
392 #     include EIGEN_SPARSEVECTOR_PLUGIN
393 #   endif
394 
395 protected:
396 
397     static void check_template_parameters()
398     {
399       EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
400       EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
401     }
402 
403     Storage m_data;
404     Index m_size;
405 };
406 
407 namespace internal {
408 
409 template<typename _Scalar, int _Options, typename _Index>
410 struct evaluator<SparseVector<_Scalar,_Options,_Index> >
411   : evaluator_base<SparseVector<_Scalar,_Options,_Index> >
412 {
413   typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
414   typedef evaluator_base<SparseVectorType> Base;
415   typedef typename SparseVectorType::InnerIterator InnerIterator;
416   typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
417 
418   enum {
419     CoeffReadCost = NumTraits<_Scalar>::ReadCost,
420     Flags = SparseVectorType::Flags
421   };
422 
423   evaluator() : Base() {}
424 
425   explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
426   {
427     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
428   }
429 
430   inline Index nonZerosEstimate() const {
431     return m_matrix->nonZeros();
432   }
433 
434   operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
435   operator const SparseVectorType&() const { return *m_matrix; }
436 
437   const SparseVectorType *m_matrix;
438 };
439 
440 template< typename Dest, typename Src>
441 struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
442   static void run(Dest& dst, const Src& src) {
443     eigen_internal_assert(src.innerSize()==src.size());
444     typedef internal::evaluator<Src> SrcEvaluatorType;
445     SrcEvaluatorType srcEval(src);
446     for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
447       dst.insert(it.index()) = it.value();
448   }
449 };
450 
451 template< typename Dest, typename Src>
452 struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
453   static void run(Dest& dst, const Src& src) {
454     eigen_internal_assert(src.outerSize()==src.size());
455     typedef internal::evaluator<Src> SrcEvaluatorType;
456     SrcEvaluatorType srcEval(src);
457     for(Index i=0; i<src.size(); ++i)
458     {
459       typename SrcEvaluatorType::InnerIterator it(srcEval, i);
460       if(it)
461         dst.insert(i) = it.value();
462     }
463   }
464 };
465 
466 template< typename Dest, typename Src>
467 struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
468   static void run(Dest& dst, const Src& src) {
469     if(src.outerSize()==1)  sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
470     else                    sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
471   }
472 };
473 
474 }
475 
476 } // end namespace Eigen
477 
478 #endif // EIGEN_SPARSEVECTOR_H
479