• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2001 Intel Corporation
5 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 // The SSE code for the 4x4 float and double matrix inverse in this file
13 // comes from the following Intel's library:
14 // http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
15 //
16 // Here is the respective copyright and license statement:
17 //
18 //   Copyright (c) 2001 Intel Corporation.
19 //
20 // Permition is granted to use, copy, distribute and prepare derivative works
21 // of this library for any purpose and without fee, provided, that the above
22 // copyright notice and this statement appear in all copies.
23 // Intel makes no representations about the suitability of this software for
24 // any purpose, and specifically disclaims all warranties.
25 // See LEGAL.TXT for all the legal information.
26 
27 #ifndef EIGEN_INVERSE_SSE_H
28 #define EIGEN_INVERSE_SSE_H
29 
30 namespace Eigen {
31 
32 namespace internal {
33 
34 template<typename MatrixType, typename ResultType>
35 struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
36 {
37   enum {
38     MatrixAlignment     = bool(MatrixType::Flags&AlignedBit),
39     ResultAlignment     = bool(ResultType::Flags&AlignedBit),
40     StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
41   };
42 
43   static void run(const MatrixType& matrix, ResultType& result)
44   {
45     EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
46 
47     // Load the full matrix into registers
48     __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
49     __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
50     __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
51     __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
52 
53     // The inverse is calculated using "Divide and Conquer" technique. The
54     // original matrix is divide into four 2x2 sub-matrices. Since each
55     // register holds four matrix element, the smaller matrices are
56     // represented as a registers. Hence we get a better locality of the
57     // calculations.
58 
59     __m128 A, B, C, D; // the four sub-matrices
60     if(!StorageOrdersMatch)
61     {
62       A = _mm_unpacklo_ps(_L1, _L2);
63       B = _mm_unpacklo_ps(_L3, _L4);
64       C = _mm_unpackhi_ps(_L1, _L2);
65       D = _mm_unpackhi_ps(_L3, _L4);
66     }
67     else
68     {
69       A = _mm_movelh_ps(_L1, _L2);
70       B = _mm_movehl_ps(_L2, _L1);
71       C = _mm_movelh_ps(_L3, _L4);
72       D = _mm_movehl_ps(_L4, _L3);
73     }
74 
75     __m128 iA, iB, iC, iD,                 // partial inverse of the sub-matrices
76             DC, AB;
77     __m128 dA, dB, dC, dD;                 // determinant of the sub-matrices
78     __m128 det, d, d1, d2;
79     __m128 rd;                             // reciprocal of the determinant
80 
81     //  AB = A# * B
82     AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
83     AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
84     //  DC = D# * C
85     DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
86     DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
87 
88     //  dA = |A|
89     dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
90     dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
91     //  dB = |B|
92     dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
93     dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
94 
95     //  dC = |C|
96     dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
97     dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
98     //  dD = |D|
99     dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
100     dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
101 
102     //  d = trace(AB*DC) = trace(A#*B*D#*C)
103     d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
104 
105     //  iD = C*A#*B
106     iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
107     iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
108     //  iA = B*D#*C
109     iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
110     iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
111 
112     //  d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
113     d  = _mm_add_ps(d, _mm_movehl_ps(d, d));
114     d  = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
115     d1 = _mm_mul_ss(dA,dD);
116     d2 = _mm_mul_ss(dB,dC);
117 
118     //  iD = D*|A| - C*A#*B
119     iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
120 
121     //  iA = A*|D| - B*D#*C;
122     iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
123 
124     //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
125     det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
126     rd  = _mm_div_ss(_mm_set_ss(1.0f), det);
127 
128 //     #ifdef ZERO_SINGULAR
129 //         rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
130 //     #endif
131 
132     //  iB = D * (A#B)# = D*B#*A
133     iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
134     iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
135     //  iC = A * (D#C)# = A*C#*D
136     iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
137     iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
138 
139     rd = _mm_shuffle_ps(rd,rd,0);
140     rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
141 
142     //  iB = C*|B| - D*B#*A
143     iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
144 
145     //  iC = B*|C| - A*C#*D;
146     iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
147 
148     //  iX = iX / det
149     iA = _mm_mul_ps(rd,iA);
150     iB = _mm_mul_ps(rd,iB);
151     iC = _mm_mul_ps(rd,iC);
152     iD = _mm_mul_ps(rd,iD);
153 
154     result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
155     result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
156     result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
157     result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
158   }
159 
160 };
161 
162 template<typename MatrixType, typename ResultType>
163 struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
164 {
165   enum {
166     MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
167     ResultAlignment = bool(ResultType::Flags&AlignedBit),
168     StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
169   };
170   static void run(const MatrixType& matrix, ResultType& result)
171   {
172     const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
173     const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
174 
175     // The inverse is calculated using "Divide and Conquer" technique. The
176     // original matrix is divide into four 2x2 sub-matrices. Since each
177     // register of the matrix holds two element, the smaller matrices are
178     // consisted of two registers. Hence we get a better locality of the
179     // calculations.
180 
181     // the four sub-matrices
182     __m128d A1, A2, B1, B2, C1, C2, D1, D2;
183 
184     if(StorageOrdersMatch)
185     {
186       A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
187       A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
188       C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
189       C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
190     }
191     else
192     {
193       __m128d tmp;
194       A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
195       A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
196       tmp = A1;
197       A1 = _mm_unpacklo_pd(A1,A2);
198       A2 = _mm_unpackhi_pd(tmp,A2);
199       tmp = C1;
200       C1 = _mm_unpacklo_pd(C1,C2);
201       C2 = _mm_unpackhi_pd(tmp,C2);
202 
203       B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
204       B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
205       tmp = B1;
206       B1 = _mm_unpacklo_pd(B1,B2);
207       B2 = _mm_unpackhi_pd(tmp,B2);
208       tmp = D1;
209       D1 = _mm_unpacklo_pd(D1,D2);
210       D2 = _mm_unpackhi_pd(tmp,D2);
211     }
212 
213     __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2,     // partial invese of the sub-matrices
214             DC1, DC2, AB1, AB2;
215     __m128d dA, dB, dC, dD;     // determinant of the sub-matrices
216     __m128d det, d1, d2, rd;
217 
218     //  dA = |A|
219     dA = _mm_shuffle_pd(A2, A2, 1);
220     dA = _mm_mul_pd(A1, dA);
221     dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
222     //  dB = |B|
223     dB = _mm_shuffle_pd(B2, B2, 1);
224     dB = _mm_mul_pd(B1, dB);
225     dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
226 
227     //  AB = A# * B
228     AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
229     AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
230     AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
231     AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
232 
233     //  dC = |C|
234     dC = _mm_shuffle_pd(C2, C2, 1);
235     dC = _mm_mul_pd(C1, dC);
236     dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
237     //  dD = |D|
238     dD = _mm_shuffle_pd(D2, D2, 1);
239     dD = _mm_mul_pd(D1, dD);
240     dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
241 
242     //  DC = D# * C
243     DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
244     DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
245     DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
246     DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
247 
248     //  rd = trace(AB*DC) = trace(A#*B*D#*C)
249     d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
250     d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
251     rd = _mm_add_pd(d1, d2);
252     rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
253 
254     //  iD = C*A#*B
255     iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
256     iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
257     iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
258     iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
259 
260     //  iA = B*D#*C
261     iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
262     iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
263     iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
264     iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
265 
266     //  iD = D*|A| - C*A#*B
267     dA = _mm_shuffle_pd(dA,dA,0);
268     iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
269     iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
270 
271     //  iA = A*|D| - B*D#*C;
272     dD = _mm_shuffle_pd(dD,dD,0);
273     iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
274     iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
275 
276     d1 = _mm_mul_sd(dA, dD);
277     d2 = _mm_mul_sd(dB, dC);
278 
279     //  iB = D * (A#B)# = D*B#*A
280     iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
281     iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
282     iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
283     iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
284 
285     //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
286     det = _mm_add_sd(d1, d2);
287     det = _mm_sub_sd(det, rd);
288 
289     //  iC = A * (D#C)# = A*C#*D
290     iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
291     iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
292     iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
293     iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
294 
295     rd = _mm_div_sd(_mm_set_sd(1.0), det);
296 //     #ifdef ZERO_SINGULAR
297 //         rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
298 //     #endif
299     rd = _mm_shuffle_pd(rd,rd,0);
300 
301     //  iB = C*|B| - D*B#*A
302     dB = _mm_shuffle_pd(dB,dB,0);
303     iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
304     iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
305 
306     d1 = _mm_xor_pd(rd, _Sign_PN);
307     d2 = _mm_xor_pd(rd, _Sign_NP);
308 
309     //  iC = B*|C| - A*C#*D;
310     dC = _mm_shuffle_pd(dC,dC,0);
311     iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
312     iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
313 
314     result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));     // iA# / det
315     result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
316     result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));     // iB# / det
317     result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
318     result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));     // iC# / det
319     result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
320     result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));     // iD# / det
321     result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
322   }
323 };
324 
325 } // end namespace internal
326 
327 } // end namespace Eigen
328 
329 #endif // EIGEN_INVERSE_SSE_H
330