• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
12 
13 
14 namespace Eigen {
15 
16 /** \internal
17   *
18   * \class TensorIntDiv
19   * \ingroup CXX11_Tensor_Module
20   *
21   * \brief Fast integer division by a constant.
22   *
23   * See the paper from Granlund and Montgomery for explanation.
24   *   (at http://dx.doi.org/10.1145/773473.178249)
25   *
26   * \sa Tensor
27   */
28 
29 namespace internal {
30 
31 namespace {
32 
33   // Note: result is undefined if val == 0
34   template <typename T>
35   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
count_leading_zeros(const T val)36   typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val)
37   {
38 #ifdef __CUDA_ARCH__
39     return __clz(val);
40 #elif EIGEN_COMP_MSVC
41     unsigned long index;
42     _BitScanReverse(&index, val);
43     return 31 - index;
44 #else
45     EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
46     return __builtin_clz(static_cast<uint32_t>(val));
47 #endif
48   }
49 
50   template <typename T>
51   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
count_leading_zeros(const T val)52   typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val)
53   {
54 #ifdef __CUDA_ARCH__
55     return __clzll(val);
56 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
57     unsigned long index;
58     _BitScanReverse64(&index, val);
59     return 63 - index;
60 #elif EIGEN_COMP_MSVC
61     // MSVC's _BitScanReverse64 is not available for 32bits builds.
62     unsigned int lo = (unsigned int)(val&0xffffffff);
63     unsigned int hi = (unsigned int)((val>>32)&0xffffffff);
64     int n;
65     if(hi==0)
66       n = 32 + count_leading_zeros<unsigned int>(lo);
67     else
68       n = count_leading_zeros<unsigned int>(hi);
69     return n;
70 #else
71     EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
72     return __builtin_clzll(static_cast<uint64_t>(val));
73 #endif
74   }
75 
76   template <typename T>
77   struct UnsignedTraits {
78     typedef typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type type;
79   };
80 
81   template <typename T>
82   struct DividerTraits {
83     typedef typename UnsignedTraits<T>::type type;
84     static const int N = sizeof(T) * 8;
85   };
86 
87   template <typename T>
muluh(const uint32_t a,const T b)88   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) {
89 #if defined(__CUDA_ARCH__)
90     return __umulhi(a, b);
91 #else
92     return (static_cast<uint64_t>(a) * b) >> 32;
93 #endif
94   }
95 
96   template <typename T>
muluh(const uint64_t a,const T b)97   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) {
98 #if defined(__CUDA_ARCH__)
99     return __umul64hi(a, b);
100 #elif defined(__SIZEOF_INT128__)
101     __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b);
102     return static_cast<uint64_t>(v >> 64);
103 #else
104     return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
105 #endif
106   }
107 
108   template <int N, typename T>
109   struct DividerHelper {
computeMultiplierDividerHelper110     static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) {
111       EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
112       return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
113     }
114   };
115 
116   template <typename T>
117   struct DividerHelper<64, T> {
118     static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
119 #if defined(__SIZEOF_INT128__) && !defined(__CUDA_ARCH__)
120       return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
121 #else
122       const uint64_t shift = 1ULL << log_div;
123       TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
124                                                - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
125                                                + TensorUInt128<static_val<0>, static_val<1> >(1);
126       return static_cast<uint64_t>(result);
127 #endif
128     }
129   };
130 }
131 
132 
133 template <typename T, bool div_gt_one = false>
134 struct TensorIntDivisor {
135  public:
136   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
137     multiplier = 0;
138     shift1 = 0;
139     shift2 = 0;
140   }
141 
142   // Must have 0 < divider < 2^31. This is relaxed to
143   // 0 < divider < 2^63 when using 64-bit indices on platforms that support
144   // the __uint128_t type.
145   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) {
146     const int N = DividerTraits<T>::N;
147     eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2);
148     eigen_assert(divider > 0);
149 
150     // fast ln2
151     const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
152     int log_div = N - leading_zeros;
153     // if divider is a power of two then log_div is 1 more than it should be.
154     if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider))
155       log_div--;
156 
157     multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
158     shift1 = log_div > 1 ? 1 : log_div;
159     shift2 = log_div > 1 ? log_div-1 : 0;
160   }
161 
162   // Must have 0 <= numerator. On platforms that dont support the __uint128_t
163   // type numerator should also be less than 2^32-1.
164   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
165     eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
166     //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above
167 
168     UnsignedType t1 = muluh(multiplier, numerator);
169     UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1;
170     return (t1 + t) >> shift2;
171   }
172 
173  private:
174   typedef typename DividerTraits<T>::type UnsignedType;
175   UnsignedType multiplier;
176   int32_t shift1;
177   int32_t shift2;
178 };
179 
180 
181 // Optimized version for signed 32 bit integers.
182 // Derived from Hacker's Delight.
183 // Only works for divisors strictly greater than one
184 template <>
185 class TensorIntDivisor<int32_t, true> {
186  public:
187   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
188     magic = 0;
189     shift = 0;
190   }
191   // Must have 2 <= divider
192   EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider)  {
193     eigen_assert(divider >= 2);
194     calcMagic(divider);
195   }
196 
197   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
198 #ifdef __CUDA_ARCH__
199     return (__umulhi(magic, n) >> shift);
200 #else
201     uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n);
202     return (static_cast<uint32_t>(v >> 32) >> shift);
203 #endif
204   }
205 
206 private:
207   // Compute the magic numbers. See Hacker's Delight section 10 for an in
208   // depth explanation.
209   EIGEN_DEVICE_FUNC void calcMagic(int32_t d) {
210    const unsigned two31 = 0x80000000;     // 2**31.
211    unsigned ad = d;
212    unsigned t = two31 + (ad >> 31);
213    unsigned anc = t - 1 - t%ad;     // Absolute value of nc.
214    int p = 31;                      // Init. p.
215    unsigned q1 = two31/anc;         // Init. q1 = 2**p/|nc|.
216    unsigned r1 = two31 - q1*anc;    // Init. r1 = rem(2**p, |nc|).
217    unsigned q2 = two31/ad;          // Init. q2 = 2**p/|d|.
218    unsigned r2 = two31 - q2*ad;     // Init. r2 = rem(2**p, |d|).
219    unsigned delta = 0;
220    do {
221       p = p + 1;
222       q1 = 2*q1;           // Update q1 = 2**p/|nc|.
223       r1 = 2*r1;           // Update r1 = rem(2**p, |nc|).
224       if (r1 >= anc) {     // (Must be an unsigned
225          q1 = q1 + 1;      // comparison here).
226          r1 = r1 - anc;}
227       q2 = 2*q2;           // Update q2 = 2**p/|d|.
228       r2 = 2*r2;           // Update r2 = rem(2**p, |d|).
229       if (r2 >= ad) {      // (Must be an unsigned
230          q2 = q2 + 1;      // comparison here).
231          r2 = r2 - ad;}
232       delta = ad - r2;
233    } while (q1 < delta || (q1 == delta && r1 == 0));
234 
235    magic = (unsigned)(q2 + 1);
236    shift = p - 32;
237   }
238 
239   uint32_t magic;
240   int32_t shift;
241 };
242 
243 
244 template <typename T, bool div_gt_one>
245 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor<T, div_gt_one>& divisor) {
246   return divisor.divide(numerator);
247 }
248 
249 
250 } // end namespace internal
251 } // end namespace Eigen
252 
253 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
254