1 // This file is part of Eigen, a lightweight C++ template library 2 // for linear algebra. 3 // 4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> 5 // Copyright (C) 2010 Konstantinos Margaritis <markos@codex.gr> 6 // Heavily based on Gael's SSE version. 7 // 8 // This Source Code Form is subject to the terms of the Mozilla 9 // Public License v. 2.0. If a copy of the MPL was not distributed 10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 11 12 #ifndef EIGEN_PACKET_MATH_NEON_H 13 #define EIGEN_PACKET_MATH_NEON_H 14 15 namespace Eigen { 16 17 namespace internal { 18 19 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 20 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 21 #endif 22 23 // FIXME NEON has 16 quad registers, but since the current register allocator 24 // is so bad, it is much better to reduce it to 8 25 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 26 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8 27 #endif 28 29 typedef float32x4_t Packet4f; 30 typedef int32x4_t Packet4i; 31 typedef uint32x4_t Packet4ui; 32 33 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 34 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 35 36 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 37 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X)) 38 39 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 40 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 41 42 #if defined(__llvm__) && !defined(__clang__) 43 //Special treatment for Apple's llvm-gcc, its NEON packet types are unions 44 #define EIGEN_INIT_NEON_PACKET2(X, Y) {{X, Y}} 45 #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}} 46 #else 47 //Default initializer for packets 48 #define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y} 49 #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W} 50 #endif 51 52 #ifndef __pld 53 #define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" ); 54 #endif 55 56 template<> struct packet_traits<float> : default_packet_traits 57 { 58 typedef Packet4f type; 59 enum { 60 Vectorizable = 1, 61 AlignedOnScalar = 1, 62 size = 4, 63 64 HasDiv = 1, 65 // FIXME check the Has* 66 HasSin = 0, 67 HasCos = 0, 68 HasLog = 0, 69 HasExp = 0, 70 HasSqrt = 0 71 }; 72 }; 73 template<> struct packet_traits<int> : default_packet_traits 74 { 75 typedef Packet4i type; 76 enum { 77 Vectorizable = 1, 78 AlignedOnScalar = 1, 79 size=4 80 // FIXME check the Has* 81 }; 82 }; 83 84 #if EIGEN_GNUC_AT_MOST(4,4) && !defined(__llvm__) 85 // workaround gcc 4.2, 4.3 and 4.4 compilatin issue 86 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } 87 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } 88 EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } 89 EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } 90 #endif 91 92 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; }; 93 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; }; 94 95 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); } 96 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return vdupq_n_s32(from); } 97 98 template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) 99 { 100 Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3); 101 return vaddq_f32(pset1<Packet4f>(a), countdown); 102 } 103 template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) 104 { 105 Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3); 106 return vaddq_s32(pset1<Packet4i>(a), countdown); 107 } 108 109 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } 110 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } 111 112 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } 113 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } 114 115 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } 116 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } 117 118 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } 119 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } 120 121 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } 122 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } 123 124 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) 125 { 126 Packet4f inv, restep, div; 127 128 // NEON does not offer a divide instruction, we have to do a reciprocal approximation 129 // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers 130 // a reciprocal estimate AND a reciprocal step -which saves a few instructions 131 // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with 132 // Newton-Raphson and vrecpsq_f32() 133 inv = vrecpeq_f32(b); 134 135 // This returns a differential, by which we will have to multiply inv to get a better 136 // approximation of 1/b. 137 restep = vrecpsq_f32(b, inv); 138 inv = vmulq_f32(restep, inv); 139 140 // Finally, multiply a by 1/b and get the wanted result of the division. 141 div = vmulq_f32(a, inv); 142 143 return div; 144 } 145 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/) 146 { eigen_assert(false && "packet integer division are not supported by NEON"); 147 return pset1<Packet4i>(0); 148 } 149 150 // for some weird raisons, it has to be overloaded for packet of integers 151 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); } 152 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } 153 154 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } 155 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } 156 157 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } 158 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } 159 160 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 161 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) 162 { 163 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 164 } 165 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } 166 167 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) 168 { 169 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 170 } 171 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } 172 173 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) 174 { 175 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 176 } 177 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } 178 179 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) 180 { 181 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 182 } 183 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } 184 185 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } 186 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } 187 188 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } 189 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } 190 191 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) 192 { 193 float32x2_t lo, hi; 194 lo = vld1_dup_f32(from); 195 hi = vld1_dup_f32(from+1); 196 return vcombine_f32(lo, hi); 197 } 198 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from) 199 { 200 int32x2_t lo, hi; 201 lo = vld1_dup_s32(from); 202 hi = vld1_dup_s32(from+1); 203 return vcombine_s32(lo, hi); 204 } 205 206 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } 207 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } 208 209 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } 210 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } 211 212 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); } 213 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { __pld(addr); } 214 215 // FIXME only store the 2 first elements ? 216 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } 217 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } 218 219 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { 220 float32x2_t a_lo, a_hi; 221 Packet4f a_r64; 222 223 a_r64 = vrev64q_f32(a); 224 a_lo = vget_low_f32(a_r64); 225 a_hi = vget_high_f32(a_r64); 226 return vcombine_f32(a_hi, a_lo); 227 } 228 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { 229 int32x2_t a_lo, a_hi; 230 Packet4i a_r64; 231 232 a_r64 = vrev64q_s32(a); 233 a_lo = vget_low_s32(a_r64); 234 a_hi = vget_high_s32(a_r64); 235 return vcombine_s32(a_hi, a_lo); 236 } 237 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } 238 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } 239 240 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 241 { 242 float32x2_t a_lo, a_hi, sum; 243 244 a_lo = vget_low_f32(a); 245 a_hi = vget_high_f32(a); 246 sum = vpadd_f32(a_lo, a_hi); 247 sum = vpadd_f32(sum, sum); 248 return vget_lane_f32(sum, 0); 249 } 250 251 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 252 { 253 float32x4x2_t vtrn1, vtrn2, res1, res2; 254 Packet4f sum1, sum2, sum; 255 256 // NEON zip performs interleaving of the supplied vectors. 257 // We perform two interleaves in a row to acquire the transposed vector 258 vtrn1 = vzipq_f32(vecs[0], vecs[2]); 259 vtrn2 = vzipq_f32(vecs[1], vecs[3]); 260 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); 261 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); 262 263 // Do the addition of the resulting vectors 264 sum1 = vaddq_f32(res1.val[0], res1.val[1]); 265 sum2 = vaddq_f32(res2.val[0], res2.val[1]); 266 sum = vaddq_f32(sum1, sum2); 267 268 return sum; 269 } 270 271 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a) 272 { 273 int32x2_t a_lo, a_hi, sum; 274 275 a_lo = vget_low_s32(a); 276 a_hi = vget_high_s32(a); 277 sum = vpadd_s32(a_lo, a_hi); 278 sum = vpadd_s32(sum, sum); 279 return vget_lane_s32(sum, 0); 280 } 281 282 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) 283 { 284 int32x4x2_t vtrn1, vtrn2, res1, res2; 285 Packet4i sum1, sum2, sum; 286 287 // NEON zip performs interleaving of the supplied vectors. 288 // We perform two interleaves in a row to acquire the transposed vector 289 vtrn1 = vzipq_s32(vecs[0], vecs[2]); 290 vtrn2 = vzipq_s32(vecs[1], vecs[3]); 291 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); 292 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); 293 294 // Do the addition of the resulting vectors 295 sum1 = vaddq_s32(res1.val[0], res1.val[1]); 296 sum2 = vaddq_s32(res2.val[0], res2.val[1]); 297 sum = vaddq_s32(sum1, sum2); 298 299 return sum; 300 } 301 302 // Other reduction functions: 303 // mul 304 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) 305 { 306 float32x2_t a_lo, a_hi, prod; 307 308 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 309 a_lo = vget_low_f32(a); 310 a_hi = vget_high_f32(a); 311 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 312 prod = vmul_f32(a_lo, a_hi); 313 // Multiply prod with its swapped value |a2*a4|a1*a3| 314 prod = vmul_f32(prod, vrev64_f32(prod)); 315 316 return vget_lane_f32(prod, 0); 317 } 318 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a) 319 { 320 int32x2_t a_lo, a_hi, prod; 321 322 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 323 a_lo = vget_low_s32(a); 324 a_hi = vget_high_s32(a); 325 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 326 prod = vmul_s32(a_lo, a_hi); 327 // Multiply prod with its swapped value |a2*a4|a1*a3| 328 prod = vmul_s32(prod, vrev64_s32(prod)); 329 330 return vget_lane_s32(prod, 0); 331 } 332 333 // min 334 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) 335 { 336 float32x2_t a_lo, a_hi, min; 337 338 a_lo = vget_low_f32(a); 339 a_hi = vget_high_f32(a); 340 min = vpmin_f32(a_lo, a_hi); 341 min = vpmin_f32(min, min); 342 343 return vget_lane_f32(min, 0); 344 } 345 346 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a) 347 { 348 int32x2_t a_lo, a_hi, min; 349 350 a_lo = vget_low_s32(a); 351 a_hi = vget_high_s32(a); 352 min = vpmin_s32(a_lo, a_hi); 353 min = vpmin_s32(min, min); 354 355 return vget_lane_s32(min, 0); 356 } 357 358 // max 359 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) 360 { 361 float32x2_t a_lo, a_hi, max; 362 363 a_lo = vget_low_f32(a); 364 a_hi = vget_high_f32(a); 365 max = vpmax_f32(a_lo, a_hi); 366 max = vpmax_f32(max, max); 367 368 return vget_lane_f32(max, 0); 369 } 370 371 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a) 372 { 373 int32x2_t a_lo, a_hi, max; 374 375 a_lo = vget_low_s32(a); 376 a_hi = vget_high_s32(a); 377 max = vpmax_s32(a_lo, a_hi); 378 379 return vget_lane_s32(max, 0); 380 } 381 382 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 383 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 384 #define PALIGN_NEON(Offset,Type,Command) \ 385 template<>\ 386 struct palign_impl<Offset,Type>\ 387 {\ 388 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 389 {\ 390 if (Offset!=0)\ 391 first = Command(first, second, Offset);\ 392 }\ 393 };\ 394 395 PALIGN_NEON(0,Packet4f,vextq_f32) 396 PALIGN_NEON(1,Packet4f,vextq_f32) 397 PALIGN_NEON(2,Packet4f,vextq_f32) 398 PALIGN_NEON(3,Packet4f,vextq_f32) 399 PALIGN_NEON(0,Packet4i,vextq_s32) 400 PALIGN_NEON(1,Packet4i,vextq_s32) 401 PALIGN_NEON(2,Packet4i,vextq_s32) 402 PALIGN_NEON(3,Packet4i,vextq_s32) 403 404 #undef PALIGN_NEON 405 406 } // end namespace internal 407 408 } // end namespace Eigen 409 410 #endif // EIGEN_PACKET_MATH_NEON_H 411