1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
11 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
12
13 namespace Eigen {
14
15 namespace internal {
16
17 /* Optimized selfadjoint matrix * vector product:
18 * This algorithm processes 2 columns at once that allows to both reduce
19 * the number of load/stores of the result by a factor 2 and to reduce
20 * the instruction dependency.
21 */
22
23 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>
24 struct selfadjoint_matrix_vector_product;
25
26 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
27 struct selfadjoint_matrix_vector_product
28
29 {
30 static EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
31 void run(
32 Index size,
33 const Scalar* lhs, Index lhsStride,
34 const Scalar* rhs,
35 Scalar* res,
36 Scalar alpha);
37 };
38
39 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
40 EIGEN_DONT_INLINE EIGEN_DEVICE_FUNC
run(Index size,const Scalar * lhs,Index lhsStride,const Scalar * rhs,Scalar * res,Scalar alpha)41 void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
42 Index size,
43 const Scalar* lhs, Index lhsStride,
44 const Scalar* rhs,
45 Scalar* res,
46 Scalar alpha)
47 {
48 typedef typename packet_traits<Scalar>::type Packet;
49 typedef typename NumTraits<Scalar>::Real RealScalar;
50 const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
51
52 enum {
53 IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
54 IsLower = UpLo == Lower ? 1 : 0,
55 FirstTriangular = IsRowMajor == IsLower
56 };
57
58 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0;
59 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
60 conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd;
61
62 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
63 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
64
65 Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
66
67 Index bound = numext::maxi(Index(0), size-8) & 0xfffffffe;
68 if (FirstTriangular)
69 bound = size - bound;
70
71 for (Index j=FirstTriangular ? bound : 0;
72 j<(FirstTriangular ? size : bound);j+=2)
73 {
74 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
75 const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
76
77 Scalar t0 = cjAlpha * rhs[j];
78 Packet ptmp0 = pset1<Packet>(t0);
79 Scalar t1 = cjAlpha * rhs[j+1];
80 Packet ptmp1 = pset1<Packet>(t1);
81
82 Scalar t2(0);
83 Packet ptmp2 = pset1<Packet>(t2);
84 Scalar t3(0);
85 Packet ptmp3 = pset1<Packet>(t3);
86
87 Index starti = FirstTriangular ? 0 : j+2;
88 Index endi = FirstTriangular ? j : size;
89 Index alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti);
90 Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
91
92 res[j] += cjd.pmul(numext::real(A0[j]), t0);
93 res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
94 if(FirstTriangular)
95 {
96 res[j] += cj0.pmul(A1[j], t1);
97 t3 += cj1.pmul(A1[j], rhs[j]);
98 }
99 else
100 {
101 res[j+1] += cj0.pmul(A0[j+1],t0);
102 t2 += cj1.pmul(A0[j+1], rhs[j+1]);
103 }
104
105 for (Index i=starti; i<alignedStart; ++i)
106 {
107 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
108 t2 += cj1.pmul(A0[i], rhs[i]);
109 t3 += cj1.pmul(A1[i], rhs[i]);
110 }
111 // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
112 // gcc 4.2 does this optimization automatically.
113 const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart;
114 const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart;
115 const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
116 Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
117 for (Index i=alignedStart; i<alignedEnd; i+=PacketSize)
118 {
119 Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize;
120 Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize;
121 Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
122 Packet Xi = pload <Packet>(resIt);
123
124 Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
125 ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2);
126 ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3);
127 pstore(resIt,Xi); resIt += PacketSize;
128 }
129 for (Index i=alignedEnd; i<endi; i++)
130 {
131 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
132 t2 += cj1.pmul(A0[i], rhs[i]);
133 t3 += cj1.pmul(A1[i], rhs[i]);
134 }
135
136 res[j] += alpha * (t2 + predux(ptmp2));
137 res[j+1] += alpha * (t3 + predux(ptmp3));
138 }
139 for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
140 {
141 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
142
143 Scalar t1 = cjAlpha * rhs[j];
144 Scalar t2(0);
145 res[j] += cjd.pmul(numext::real(A0[j]), t1);
146 for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
147 {
148 res[i] += cj0.pmul(A0[i], t1);
149 t2 += cj1.pmul(A0[i], rhs[i]);
150 }
151 res[j] += alpha * t2;
152 }
153 }
154
155 } // end namespace internal
156
157 /***************************************************************************
158 * Wrapper to product_selfadjoint_vector
159 ***************************************************************************/
160
161 namespace internal {
162
163 template<typename Lhs, int LhsMode, typename Rhs>
164 struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>
165 {
166 typedef typename Product<Lhs,Rhs>::Scalar Scalar;
167
168 typedef internal::blas_traits<Lhs> LhsBlasTraits;
169 typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
170 typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
171
172 typedef internal::blas_traits<Rhs> RhsBlasTraits;
173 typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
174 typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
175
176 enum { LhsUpLo = LhsMode&(Upper|Lower) };
177
178 template<typename Dest>
179 static EIGEN_DEVICE_FUNC
180 void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
181 {
182 typedef typename Dest::Scalar ResScalar;
183 typedef typename Rhs::Scalar RhsScalar;
184 typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
185
186 eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols());
187
188 typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
189 typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
190
191 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
192 * RhsBlasTraits::extractScalarFactor(a_rhs);
193
194 enum {
195 EvalToDest = (Dest::InnerStrideAtCompileTime==1),
196 UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1)
197 };
198
199 internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
200 internal::gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!UseRhs> static_rhs;
201
202 ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
203 EvalToDest ? dest.data() : static_dest.data());
204
205 ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
206 UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
207
208 if(!EvalToDest)
209 {
210 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
211 Index size = dest.size();
212 EIGEN_DENSE_STORAGE_CTOR_PLUGIN
213 #endif
214 MappedDest(actualDestPtr, dest.size()) = dest;
215 }
216
217 if(!UseRhs)
218 {
219 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
220 Index size = rhs.size();
221 EIGEN_DENSE_STORAGE_CTOR_PLUGIN
222 #endif
223 Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
224 }
225
226
227 internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor,
228 int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run
229 (
230 lhs.rows(), // size
231 &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
232 actualRhsPtr, // rhs info
233 actualDestPtr, // result info
234 actualAlpha // scale factor
235 );
236
237 if(!EvalToDest)
238 dest = MappedDest(actualDestPtr, dest.size());
239 }
240 };
241
242 template<typename Lhs, typename Rhs, int RhsMode>
243 struct selfadjoint_product_impl<Lhs,0,true,Rhs,RhsMode,false>
244 {
245 typedef typename Product<Lhs,Rhs>::Scalar Scalar;
246 enum { RhsUpLo = RhsMode&(Upper|Lower) };
247
248 template<typename Dest>
249 static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
250 {
251 // let's simply transpose the product
252 Transpose<Dest> destT(dest);
253 selfadjoint_product_impl<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
254 Transpose<const Lhs>, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha);
255 }
256 };
257
258 } // end namespace internal
259
260 } // end namespace Eigen
261
262 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H
263