1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10 #ifndef EIGEN_AMBIVECTOR_H
11 #define EIGEN_AMBIVECTOR_H
12
13 namespace Eigen {
14
15 namespace internal {
16
17 /** \internal
18 * Hybrid sparse/dense vector class designed for intensive read-write operations.
19 *
20 * See BasicSparseLLT and SparseProduct for usage examples.
21 */
22 template<typename _Scalar, typename _StorageIndex>
23 class AmbiVector
24 {
25 public:
26 typedef _Scalar Scalar;
27 typedef _StorageIndex StorageIndex;
28 typedef typename NumTraits<Scalar>::Real RealScalar;
29
AmbiVector(Index size)30 explicit AmbiVector(Index size)
31 : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
32 {
33 resize(size);
34 }
35
36 void init(double estimatedDensity);
37 void init(int mode);
38
39 Index nonZeros() const;
40
41 /** Specifies a sub-vector to work on */
setBounds(Index start,Index end)42 void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
43
44 void setZero();
45
46 void restart();
47 Scalar& coeffRef(Index i);
48 Scalar& coeff(Index i);
49
50 class Iterator;
51
~AmbiVector()52 ~AmbiVector() { delete[] m_buffer; }
53
resize(Index size)54 void resize(Index size)
55 {
56 if (m_allocatedSize < size)
57 reallocate(size);
58 m_size = convert_index(size);
59 }
60
size()61 StorageIndex size() const { return m_size; }
62
63 protected:
convert_index(Index idx)64 StorageIndex convert_index(Index idx)
65 {
66 return internal::convert_index<StorageIndex>(idx);
67 }
68
reallocate(Index size)69 void reallocate(Index size)
70 {
71 // if the size of the matrix is not too large, let's allocate a bit more than needed such
72 // that we can handle dense vector even in sparse mode.
73 delete[] m_buffer;
74 if (size<1000)
75 {
76 Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
77 m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
78 m_buffer = new Scalar[allocSize];
79 }
80 else
81 {
82 m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
83 m_buffer = new Scalar[size];
84 }
85 m_size = convert_index(size);
86 m_start = 0;
87 m_end = m_size;
88 }
89
reallocateSparse()90 void reallocateSparse()
91 {
92 Index copyElements = m_allocatedElements;
93 m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
94 Index allocSize = m_allocatedElements * sizeof(ListEl);
95 allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
96 Scalar* newBuffer = new Scalar[allocSize];
97 memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
98 delete[] m_buffer;
99 m_buffer = newBuffer;
100 }
101
102 protected:
103 // element type of the linked list
104 struct ListEl
105 {
106 StorageIndex next;
107 StorageIndex index;
108 Scalar value;
109 };
110
111 // used to store data in both mode
112 Scalar* m_buffer;
113 Scalar m_zero;
114 StorageIndex m_size;
115 StorageIndex m_start;
116 StorageIndex m_end;
117 StorageIndex m_allocatedSize;
118 StorageIndex m_allocatedElements;
119 StorageIndex m_mode;
120
121 // linked list mode
122 StorageIndex m_llStart;
123 StorageIndex m_llCurrent;
124 StorageIndex m_llSize;
125 };
126
127 /** \returns the number of non zeros in the current sub vector */
128 template<typename _Scalar,typename _StorageIndex>
nonZeros()129 Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
130 {
131 if (m_mode==IsSparse)
132 return m_llSize;
133 else
134 return m_end - m_start;
135 }
136
137 template<typename _Scalar,typename _StorageIndex>
init(double estimatedDensity)138 void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
139 {
140 if (estimatedDensity>0.1)
141 init(IsDense);
142 else
143 init(IsSparse);
144 }
145
146 template<typename _Scalar,typename _StorageIndex>
init(int mode)147 void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
148 {
149 m_mode = mode;
150 if (m_mode==IsSparse)
151 {
152 m_llSize = 0;
153 m_llStart = -1;
154 }
155 }
156
157 /** Must be called whenever we might perform a write access
158 * with an index smaller than the previous one.
159 *
160 * Don't worry, this function is extremely cheap.
161 */
162 template<typename _Scalar,typename _StorageIndex>
restart()163 void AmbiVector<_Scalar,_StorageIndex>::restart()
164 {
165 m_llCurrent = m_llStart;
166 }
167
168 /** Set all coefficients of current subvector to zero */
169 template<typename _Scalar,typename _StorageIndex>
setZero()170 void AmbiVector<_Scalar,_StorageIndex>::setZero()
171 {
172 if (m_mode==IsDense)
173 {
174 for (Index i=m_start; i<m_end; ++i)
175 m_buffer[i] = Scalar(0);
176 }
177 else
178 {
179 eigen_assert(m_mode==IsSparse);
180 m_llSize = 0;
181 m_llStart = -1;
182 }
183 }
184
185 template<typename _Scalar,typename _StorageIndex>
coeffRef(Index i)186 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
187 {
188 if (m_mode==IsDense)
189 return m_buffer[i];
190 else
191 {
192 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
193 // TODO factorize the following code to reduce code generation
194 eigen_assert(m_mode==IsSparse);
195 if (m_llSize==0)
196 {
197 // this is the first element
198 m_llStart = 0;
199 m_llCurrent = 0;
200 ++m_llSize;
201 llElements[0].value = Scalar(0);
202 llElements[0].index = convert_index(i);
203 llElements[0].next = -1;
204 return llElements[0].value;
205 }
206 else if (i<llElements[m_llStart].index)
207 {
208 // this is going to be the new first element of the list
209 ListEl& el = llElements[m_llSize];
210 el.value = Scalar(0);
211 el.index = convert_index(i);
212 el.next = m_llStart;
213 m_llStart = m_llSize;
214 ++m_llSize;
215 m_llCurrent = m_llStart;
216 return el.value;
217 }
218 else
219 {
220 StorageIndex nextel = llElements[m_llCurrent].next;
221 eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
222 while (nextel >= 0 && llElements[nextel].index<=i)
223 {
224 m_llCurrent = nextel;
225 nextel = llElements[nextel].next;
226 }
227
228 if (llElements[m_llCurrent].index==i)
229 {
230 // the coefficient already exists and we found it !
231 return llElements[m_llCurrent].value;
232 }
233 else
234 {
235 if (m_llSize>=m_allocatedElements)
236 {
237 reallocateSparse();
238 llElements = reinterpret_cast<ListEl*>(m_buffer);
239 }
240 eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
241 // let's insert a new coefficient
242 ListEl& el = llElements[m_llSize];
243 el.value = Scalar(0);
244 el.index = convert_index(i);
245 el.next = llElements[m_llCurrent].next;
246 llElements[m_llCurrent].next = m_llSize;
247 ++m_llSize;
248 return el.value;
249 }
250 }
251 }
252 }
253
254 template<typename _Scalar,typename _StorageIndex>
coeff(Index i)255 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
256 {
257 if (m_mode==IsDense)
258 return m_buffer[i];
259 else
260 {
261 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
262 eigen_assert(m_mode==IsSparse);
263 if ((m_llSize==0) || (i<llElements[m_llStart].index))
264 {
265 return m_zero;
266 }
267 else
268 {
269 Index elid = m_llStart;
270 while (elid >= 0 && llElements[elid].index<i)
271 elid = llElements[elid].next;
272
273 if (llElements[elid].index==i)
274 return llElements[m_llCurrent].value;
275 else
276 return m_zero;
277 }
278 }
279 }
280
281 /** Iterator over the nonzero coefficients */
282 template<typename _Scalar,typename _StorageIndex>
283 class AmbiVector<_Scalar,_StorageIndex>::Iterator
284 {
285 public:
286 typedef _Scalar Scalar;
287 typedef typename NumTraits<Scalar>::Real RealScalar;
288
289 /** Default constructor
290 * \param vec the vector on which we iterate
291 * \param epsilon the minimal value used to prune zero coefficients.
292 * In practice, all coefficients having a magnitude smaller than \a epsilon
293 * are skipped.
294 */
295 explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
m_vector(vec)296 : m_vector(vec)
297 {
298 using std::abs;
299 m_epsilon = epsilon;
300 m_isDense = m_vector.m_mode==IsDense;
301 if (m_isDense)
302 {
303 m_currentEl = 0; // this is to avoid a compilation warning
304 m_cachedValue = 0; // this is to avoid a compilation warning
305 m_cachedIndex = m_vector.m_start-1;
306 ++(*this);
307 }
308 else
309 {
310 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
311 m_currentEl = m_vector.m_llStart;
312 while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
313 m_currentEl = llElements[m_currentEl].next;
314 if (m_currentEl<0)
315 {
316 m_cachedValue = 0; // this is to avoid a compilation warning
317 m_cachedIndex = -1;
318 }
319 else
320 {
321 m_cachedIndex = llElements[m_currentEl].index;
322 m_cachedValue = llElements[m_currentEl].value;
323 }
324 }
325 }
326
index()327 StorageIndex index() const { return m_cachedIndex; }
value()328 Scalar value() const { return m_cachedValue; }
329
330 operator bool() const { return m_cachedIndex>=0; }
331
332 Iterator& operator++()
333 {
334 using std::abs;
335 if (m_isDense)
336 {
337 do {
338 ++m_cachedIndex;
339 } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
340 if (m_cachedIndex<m_vector.m_end)
341 m_cachedValue = m_vector.m_buffer[m_cachedIndex];
342 else
343 m_cachedIndex=-1;
344 }
345 else
346 {
347 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
348 do {
349 m_currentEl = llElements[m_currentEl].next;
350 } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
351 if (m_currentEl<0)
352 {
353 m_cachedIndex = -1;
354 }
355 else
356 {
357 m_cachedIndex = llElements[m_currentEl].index;
358 m_cachedValue = llElements[m_currentEl].value;
359 }
360 }
361 return *this;
362 }
363
364 protected:
365 const AmbiVector& m_vector; // the target vector
366 StorageIndex m_currentEl; // the current element in sparse/linked-list mode
367 RealScalar m_epsilon; // epsilon used to prune zero coefficients
368 StorageIndex m_cachedIndex; // current coordinate
369 Scalar m_cachedValue; // current value
370 bool m_isDense; // mode of the vector
371 };
372
373 } // end namespace internal
374
375 } // end namespace Eigen
376
377 #endif // EIGEN_AMBIVECTOR_H
378