• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
11 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 /* Optimized selfadjoint matrix * vector product:
18  * This algorithm processes 2 columns at onces that allows to both reduce
19  * the number of load/stores of the result by a factor 2 and to reduce
20  * the instruction dependency.
21  */
22 
23 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>
24 struct selfadjoint_matrix_vector_product;
25 
26 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
27 struct selfadjoint_matrix_vector_product
28 
29 {
30 static EIGEN_DONT_INLINE void run(
31   Index size,
32   const Scalar*  lhs, Index lhsStride,
33   const Scalar* _rhs, Index rhsIncr,
34   Scalar* res,
35   Scalar alpha);
36 };
37 
38 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
run(Index size,const Scalar * lhs,Index lhsStride,const Scalar * _rhs,Index rhsIncr,Scalar * res,Scalar alpha)39 EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
40   Index size,
41   const Scalar*  lhs, Index lhsStride,
42   const Scalar* _rhs, Index rhsIncr,
43   Scalar* res,
44   Scalar alpha)
45 {
46   typedef typename packet_traits<Scalar>::type Packet;
47   const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
48 
49   enum {
50     IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
51     IsLower = UpLo == Lower ? 1 : 0,
52     FirstTriangular = IsRowMajor == IsLower
53   };
54 
55   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;
56   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
57   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex, ConjugateRhs> cjd;
58 
59   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
60   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
61 
62   Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
63 
64   // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
65   // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
66   // this is because we need to extract packets
67   ei_declare_aligned_stack_constructed_variable(Scalar,rhs,size,rhsIncr==1 ? const_cast<Scalar*>(_rhs) : 0);
68   if (rhsIncr!=1)
69   {
70     const Scalar* it = _rhs;
71     for (Index i=0; i<size; ++i, it+=rhsIncr)
72       rhs[i] = *it;
73   }
74 
75   Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
76   if (FirstTriangular)
77     bound = size - bound;
78 
79   for (Index j=FirstTriangular ? bound : 0;
80        j<(FirstTriangular ? size : bound);j+=2)
81   {
82     const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
83     const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
84 
85     Scalar t0 = cjAlpha * rhs[j];
86     Packet ptmp0 = pset1<Packet>(t0);
87     Scalar t1 = cjAlpha * rhs[j+1];
88     Packet ptmp1 = pset1<Packet>(t1);
89 
90     Scalar t2(0);
91     Packet ptmp2 = pset1<Packet>(t2);
92     Scalar t3(0);
93     Packet ptmp3 = pset1<Packet>(t3);
94 
95     size_t starti = FirstTriangular ? 0 : j+2;
96     size_t endi   = FirstTriangular ? j : size;
97     size_t alignedStart = (starti) + internal::first_aligned(&res[starti], endi-starti);
98     size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
99 
100     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
101     res[j]   += cjd.pmul(numext::real(A0[j]), t0);
102     res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
103     if(FirstTriangular)
104     {
105       res[j]   += cj0.pmul(A1[j],   t1);
106       t3       += cj1.pmul(A1[j],   rhs[j]);
107     }
108     else
109     {
110       res[j+1] += cj0.pmul(A0[j+1],t0);
111       t2 += cj1.pmul(A0[j+1], rhs[j+1]);
112     }
113 
114     for (size_t i=starti; i<alignedStart; ++i)
115     {
116       res[i] += t0 * A0[i] + t1 * A1[i];
117       t2 += numext::conj(A0[i]) * rhs[i];
118       t3 += numext::conj(A1[i]) * rhs[i];
119     }
120     // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
121     // gcc 4.2 does this optimization automatically.
122     const Scalar* EIGEN_RESTRICT a0It  = A0  + alignedStart;
123     const Scalar* EIGEN_RESTRICT a1It  = A1  + alignedStart;
124     const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
125           Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
126     for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize)
127     {
128       Packet A0i = ploadu<Packet>(a0It);  a0It  += PacketSize;
129       Packet A1i = ploadu<Packet>(a1It);  a1It  += PacketSize;
130       Packet Bi  = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
131       Packet Xi  = pload <Packet>(resIt);
132 
133       Xi    = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
134       ptmp2 = pcj1.pmadd(A0i,  Bi, ptmp2);
135       ptmp3 = pcj1.pmadd(A1i,  Bi, ptmp3);
136       pstore(resIt,Xi); resIt += PacketSize;
137     }
138     for (size_t i=alignedEnd; i<endi; i++)
139     {
140       res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
141       t2 += cj1.pmul(A0[i], rhs[i]);
142       t3 += cj1.pmul(A1[i], rhs[i]);
143     }
144 
145     res[j]   += alpha * (t2 + predux(ptmp2));
146     res[j+1] += alpha * (t3 + predux(ptmp3));
147   }
148   for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
149   {
150     const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
151 
152     Scalar t1 = cjAlpha * rhs[j];
153     Scalar t2(0);
154     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
155     res[j] += cjd.pmul(numext::real(A0[j]), t1);
156     for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
157     {
158       res[i] += cj0.pmul(A0[i], t1);
159       t2 += cj1.pmul(A0[i], rhs[i]);
160     }
161     res[j] += alpha * t2;
162   }
163 }
164 
165 } // end namespace internal
166 
167 /***************************************************************************
168 * Wrapper to product_selfadjoint_vector
169 ***************************************************************************/
170 
171 namespace internal {
172 template<typename Lhs, int LhsMode, typename Rhs>
173 struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true> >
174   : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs> >
175 {};
176 }
177 
178 template<typename Lhs, int LhsMode, typename Rhs>
179 struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
180   : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs >
181 {
182   EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
183 
184   enum {
185     LhsUpLo = LhsMode&(Upper|Lower)
186   };
187 
188   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
189 
190   template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
191   {
192     typedef typename Dest::Scalar ResScalar;
193     typedef typename Base::RhsScalar RhsScalar;
194     typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
195 
196     eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols());
197 
198     typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
199     typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
200 
201     Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
202                                * RhsBlasTraits::extractScalarFactor(m_rhs);
203 
204     enum {
205       EvalToDest = (Dest::InnerStrideAtCompileTime==1),
206       UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1)
207     };
208 
209     internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
210     internal::gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!UseRhs> static_rhs;
211 
212     ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
213                                                   EvalToDest ? dest.data() : static_dest.data());
214 
215     ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
216         UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
217 
218     if(!EvalToDest)
219     {
220       #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
221       int size = dest.size();
222       EIGEN_DENSE_STORAGE_CTOR_PLUGIN
223       #endif
224       MappedDest(actualDestPtr, dest.size()) = dest;
225     }
226 
227     if(!UseRhs)
228     {
229       #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
230       int size = rhs.size();
231       EIGEN_DENSE_STORAGE_CTOR_PLUGIN
232       #endif
233       Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
234     }
235 
236 
237     internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run
238       (
239         lhs.rows(),                             // size
240         &lhs.coeffRef(0,0),  lhs.outerStride(), // lhs info
241         actualRhsPtr, 1,                        // rhs info
242         actualDestPtr,                          // result info
243         actualAlpha                             // scale factor
244       );
245 
246     if(!EvalToDest)
247       dest = MappedDest(actualDestPtr, dest.size());
248   }
249 };
250 
251 namespace internal {
252 template<typename Lhs, typename Rhs, int RhsMode>
253 struct traits<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false> >
254   : traits<ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs> >
255 {};
256 }
257 
258 template<typename Lhs, typename Rhs, int RhsMode>
259 struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
260   : public ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs >
261 {
262   EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
263 
264   enum {
265     RhsUpLo = RhsMode&(Upper|Lower)
266   };
267 
268   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
269 
270   template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
271   {
272     // let's simply transpose the product
273     Transpose<Dest> destT(dest);
274     SelfadjointProductMatrix<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
275                              Transpose<const Lhs>, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha);
276   }
277 };
278 
279 } // end namespace Eigen
280 
281 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H
282