1/// @ref core 2/// @file glm/detail/func_integer.inl 3 4#include "type_vec2.hpp" 5#include "type_vec3.hpp" 6#include "type_vec4.hpp" 7#include "type_int.hpp" 8#include "_vectorize.hpp" 9#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) 10# include <intrin.h> 11# pragma intrinsic(_BitScanReverse) 12#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) 13#include <limits> 14 15#if !GLM_HAS_EXTENDED_INTEGER_TYPE 16# if GLM_COMPILER & GLM_COMPILER_GCC 17# pragma GCC diagnostic ignored "-Wlong-long" 18# endif 19# if (GLM_COMPILER & GLM_COMPILER_CLANG) 20# pragma clang diagnostic ignored "-Wc++11-long-long" 21# endif 22#endif 23 24namespace glm{ 25namespace detail 26{ 27 template <typename T> 28 GLM_FUNC_QUALIFIER T mask(T Bits) 29 { 30 return Bits >= sizeof(T) * 8 ? ~static_cast<T>(0) : (static_cast<T>(1) << Bits) - static_cast<T>(1); 31 } 32 33 template <typename T, glm::precision P, template <typename, glm::precision> class vecType, bool Aligned, bool EXEC> 34 struct compute_bitfieldReverseStep 35 { 36 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & v, T, T) 37 { 38 return v; 39 } 40 }; 41 42 template <typename T, glm::precision P, template <typename, glm::precision> class vecType, bool Aligned> 43 struct compute_bitfieldReverseStep<T, P, vecType, Aligned, true> 44 { 45 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & v, T Mask, T Shift) 46 { 47 return (v & Mask) << Shift | (v & (~Mask)) >> Shift; 48 } 49 }; 50 51 template <typename T, glm::precision P, template <typename, glm::precision> class vecType, bool Aligned, bool EXEC> 52 struct compute_bitfieldBitCountStep 53 { 54 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & v, T, T) 55 { 56 return v; 57 } 58 }; 59 60 template <typename T, glm::precision P, template <typename, glm::precision> class vecType, bool Aligned> 61 struct compute_bitfieldBitCountStep<T, P, vecType, Aligned, true> 62 { 63 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & v, T Mask, T Shift) 64 { 65 return (v & Mask) + ((v >> Shift) & Mask); 66 } 67 }; 68 69 template <typename genIUType, size_t Bits> 70 struct compute_findLSB 71 { 72 GLM_FUNC_QUALIFIER static int call(genIUType Value) 73 { 74 if(Value == 0) 75 return -1; 76 77 return glm::bitCount(~Value & (Value - static_cast<genIUType>(1))); 78 } 79 }; 80 81# if GLM_HAS_BITSCAN_WINDOWS 82 template <typename genIUType> 83 struct compute_findLSB<genIUType, 32> 84 { 85 GLM_FUNC_QUALIFIER static int call(genIUType Value) 86 { 87 unsigned long Result(0); 88 unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast<unsigned long*>(&Value)); 89 return IsNotNull ? int(Result) : -1; 90 } 91 }; 92 93# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) 94 template <typename genIUType> 95 struct compute_findLSB<genIUType, 64> 96 { 97 GLM_FUNC_QUALIFIER static int call(genIUType Value) 98 { 99 unsigned long Result(0); 100 unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast<unsigned __int64*>(&Value)); 101 return IsNotNull ? int(Result) : -1; 102 } 103 }; 104# endif 105# endif//GLM_HAS_BITSCAN_WINDOWS 106 107 template <typename T, glm::precision P, template <class, glm::precision> class vecType, bool EXEC = true> 108 struct compute_findMSB_step_vec 109 { 110 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & x, T Shift) 111 { 112 return x | (x >> Shift); 113 } 114 }; 115 116 template <typename T, glm::precision P, template <typename, glm::precision> class vecType> 117 struct compute_findMSB_step_vec<T, P, vecType, false> 118 { 119 GLM_FUNC_QUALIFIER static vecType<T, P> call(vecType<T, P> const & x, T) 120 { 121 return x; 122 } 123 }; 124 125 template <typename T, glm::precision P, template <typename, glm::precision> class vecType, int> 126 struct compute_findMSB_vec 127 { 128 GLM_FUNC_QUALIFIER static vecType<int, P> call(vecType<T, P> const & vec) 129 { 130 vecType<T, P> x(vec); 131 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 1)); 132 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 2)); 133 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 4)); 134 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 16>::call(x, static_cast<T>( 8)); 135 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 32>::call(x, static_cast<T>(16)); 136 x = compute_findMSB_step_vec<T, P, vecType, sizeof(T) * 8 >= 64>::call(x, static_cast<T>(32)); 137 return vecType<int, P>(sizeof(T) * 8 - 1) - glm::bitCount(~x); 138 } 139 }; 140 141# if GLM_HAS_BITSCAN_WINDOWS 142 template <typename genIUType> 143 GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) 144 { 145 unsigned long Result(0); 146 unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast<unsigned long*>(&Value)); 147 return IsNotNull ? int(Result) : -1; 148 } 149 150 template <typename T, glm::precision P, template<typename, glm::precision> class vecType> 151 struct compute_findMSB_vec<T, P, vecType, 32> 152 { 153 GLM_FUNC_QUALIFIER static vecType<int, P> call(vecType<T, P> const & x) 154 { 155 return detail::functor1<int, T, P, vecType>::call(compute_findMSB_32, x); 156 } 157 }; 158 159# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) 160 template <typename genIUType> 161 GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) 162 { 163 unsigned long Result(0); 164 unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast<unsigned __int64*>(&Value)); 165 return IsNotNull ? int(Result) : -1; 166 } 167 168 template <typename T, glm::precision P, template <class, glm::precision> class vecType> 169 struct compute_findMSB_vec<T, P, vecType, 64> 170 { 171 GLM_FUNC_QUALIFIER static vecType<int, P> call(vecType<T, P> const & x) 172 { 173 return detail::functor1<int, T, P, vecType>::call(compute_findMSB_64, x); 174 } 175 }; 176# endif 177# endif//GLM_HAS_BITSCAN_WINDOWS 178}//namespace detail 179 180 // uaddCarry 181 GLM_FUNC_QUALIFIER uint uaddCarry(uint const & x, uint const & y, uint & Carry) 182 { 183 uint64 const Value64(static_cast<uint64>(x) + static_cast<uint64>(y)); 184 uint64 const Max32((static_cast<uint64>(1) << static_cast<uint64>(32)) - static_cast<uint64>(1)); 185 Carry = Value64 > Max32 ? 1u : 0u; 186 return static_cast<uint32>(Value64 % (Max32 + static_cast<uint64>(1))); 187 } 188 189 template <precision P, template <typename, precision> class vecType> 190 GLM_FUNC_QUALIFIER vecType<uint, P> uaddCarry(vecType<uint, P> const & x, vecType<uint, P> const & y, vecType<uint, P> & Carry) 191 { 192 vecType<uint64, P> Value64(vecType<uint64, P>(x) + vecType<uint64, P>(y)); 193 vecType<uint64, P> Max32((static_cast<uint64>(1) << static_cast<uint64>(32)) - static_cast<uint64>(1)); 194 Carry = mix(vecType<uint32, P>(0), vecType<uint32, P>(1), greaterThan(Value64, Max32)); 195 return vecType<uint32,P>(Value64 % (Max32 + static_cast<uint64>(1))); 196 } 197 198 // usubBorrow 199 GLM_FUNC_QUALIFIER uint usubBorrow(uint const & x, uint const & y, uint & Borrow) 200 { 201 GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); 202 203 Borrow = x >= y ? static_cast<uint32>(0) : static_cast<uint32>(1); 204 if(y >= x) 205 return y - x; 206 else 207 return static_cast<uint32>((static_cast<int64>(1) << static_cast<int64>(32)) + (static_cast<int64>(y) - static_cast<int64>(x))); 208 } 209 210 template <precision P, template <typename, precision> class vecType> 211 GLM_FUNC_QUALIFIER vecType<uint, P> usubBorrow(vecType<uint, P> const & x, vecType<uint, P> const & y, vecType<uint, P> & Borrow) 212 { 213 Borrow = mix(vecType<uint, P>(1), vecType<uint, P>(0), greaterThanEqual(x, y)); 214 vecType<uint, P> const YgeX(y - x); 215 vecType<uint, P> const XgeY(vecType<uint32, P>((static_cast<int64>(1) << static_cast<int64>(32)) + (vecType<int64, P>(y) - vecType<int64, P>(x)))); 216 return mix(XgeY, YgeX, greaterThanEqual(y, x)); 217 } 218 219 // umulExtended 220 GLM_FUNC_QUALIFIER void umulExtended(uint const & x, uint const & y, uint & msb, uint & lsb) 221 { 222 GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); 223 224 uint64 Value64 = static_cast<uint64>(x) * static_cast<uint64>(y); 225 msb = static_cast<uint>(Value64 >> static_cast<uint64>(32)); 226 lsb = static_cast<uint>(Value64); 227 } 228 229 template <precision P, template <typename, precision> class vecType> 230 GLM_FUNC_QUALIFIER void umulExtended(vecType<uint, P> const & x, vecType<uint, P> const & y, vecType<uint, P> & msb, vecType<uint, P> & lsb) 231 { 232 GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch"); 233 234 vecType<uint64, P> Value64(vecType<uint64, P>(x) * vecType<uint64, P>(y)); 235 msb = vecType<uint32, P>(Value64 >> static_cast<uint64>(32)); 236 lsb = vecType<uint32, P>(Value64); 237 } 238 239 // imulExtended 240 GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int & msb, int & lsb) 241 { 242 GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); 243 244 int64 Value64 = static_cast<int64>(x) * static_cast<int64>(y); 245 msb = static_cast<int>(Value64 >> static_cast<int64>(32)); 246 lsb = static_cast<int>(Value64); 247 } 248 249 template <precision P, template <typename, precision> class vecType> 250 GLM_FUNC_QUALIFIER void imulExtended(vecType<int, P> const & x, vecType<int, P> const & y, vecType<int, P> & msb, vecType<int, P> & lsb) 251 { 252 GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch"); 253 254 vecType<int64, P> Value64(vecType<int64, P>(x) * vecType<int64, P>(y)); 255 lsb = vecType<int32, P>(Value64 & static_cast<int64>(0xFFFFFFFF)); 256 msb = vecType<int32, P>((Value64 >> static_cast<int64>(32)) & static_cast<int64>(0xFFFFFFFF)); 257 } 258 259 // bitfieldExtract 260 template <typename genIUType> 261 GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) 262 { 263 return bitfieldExtract(tvec1<genIUType>(Value), Offset, Bits).x; 264 } 265 266 template <typename T, precision P, template <typename, precision> class vecType> 267 GLM_FUNC_QUALIFIER vecType<T, P> bitfieldExtract(vecType<T, P> const & Value, int Offset, int Bits) 268 { 269 GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldExtract' only accept integer inputs"); 270 271 return (Value >> static_cast<T>(Offset)) & static_cast<T>(detail::mask(Bits)); 272 } 273 274 // bitfieldInsert 275 template <typename genIUType> 276 GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const & Base, genIUType const & Insert, int Offset, int Bits) 277 { 278 return bitfieldInsert(tvec1<genIUType>(Base), tvec1<genIUType>(Insert), Offset, Bits).x; 279 } 280 281 template <typename T, precision P, template <typename, precision> class vecType> 282 GLM_FUNC_QUALIFIER vecType<T, P> bitfieldInsert(vecType<T, P> const & Base, vecType<T, P> const & Insert, int Offset, int Bits) 283 { 284 GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldInsert' only accept integer values"); 285 286 T const Mask = static_cast<T>(detail::mask(Bits) << Offset); 287 return (Base & ~Mask) | (Insert & Mask); 288 } 289 290 // bitfieldReverse 291 template <typename genType> 292 GLM_FUNC_QUALIFIER genType bitfieldReverse(genType x) 293 { 294 return bitfieldReverse(glm::tvec1<genType, glm::defaultp>(x)).x; 295 } 296 297 template <typename T, glm::precision P, template <typename, glm::precision> class vecType> 298 GLM_FUNC_QUALIFIER vecType<T, P> bitfieldReverse(vecType<T, P> const & v) 299 { 300 vecType<T, P> x(v); 301 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 2>::call(x, T(0x5555555555555555ull), static_cast<T>( 1)); 302 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 4>::call(x, T(0x3333333333333333ull), static_cast<T>( 2)); 303 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 8>::call(x, T(0x0F0F0F0F0F0F0F0Full), static_cast<T>( 4)); 304 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 16>::call(x, T(0x00FF00FF00FF00FFull), static_cast<T>( 8)); 305 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 32>::call(x, T(0x0000FFFF0000FFFFull), static_cast<T>(16)); 306 x = detail::compute_bitfieldReverseStep<T, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 64>::call(x, T(0x00000000FFFFFFFFull), static_cast<T>(32)); 307 return x; 308 } 309 310 // bitCount 311 template <typename genType> 312 GLM_FUNC_QUALIFIER int bitCount(genType x) 313 { 314 return bitCount(glm::tvec1<genType, glm::defaultp>(x)).x; 315 } 316 317 template <typename T, glm::precision P, template <typename, glm::precision> class vecType> 318 GLM_FUNC_QUALIFIER vecType<int, P> bitCount(vecType<T, P> const & v) 319 { 320 vecType<typename detail::make_unsigned<T>::type, P> x(*reinterpret_cast<vecType<typename detail::make_unsigned<T>::type, P> const *>(&v)); 321 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned<T>::type(0x5555555555555555ull), typename detail::make_unsigned<T>::type( 1)); 322 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned<T>::type(0x3333333333333333ull), typename detail::make_unsigned<T>::type( 2)); 323 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned<T>::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned<T>::type( 4)); 324 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned<T>::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned<T>::type( 8)); 325 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned<T>::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned<T>::type(16)); 326 x = detail::compute_bitfieldBitCountStep<typename detail::make_unsigned<T>::type, P, vecType, detail::is_aligned<P>::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned<T>::type(0x00000000FFFFFFFFull), typename detail::make_unsigned<T>::type(32)); 327 return vecType<int, P>(x); 328 } 329 330 // findLSB 331 template <typename genIUType> 332 GLM_FUNC_QUALIFIER int findLSB(genIUType Value) 333 { 334 GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findLSB' only accept integer values"); 335 336 return detail::compute_findLSB<genIUType, sizeof(genIUType) * 8>::call(Value); 337 } 338 339 template <typename T, precision P, template <typename, precision> class vecType> 340 GLM_FUNC_QUALIFIER vecType<int, P> findLSB(vecType<T, P> const & x) 341 { 342 GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findLSB' only accept integer values"); 343 344 return detail::functor1<int, T, P, vecType>::call(findLSB, x); 345 } 346 347 // findMSB 348 template <typename genIUType> 349 GLM_FUNC_QUALIFIER int findMSB(genIUType x) 350 { 351 GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values"); 352 353 return findMSB(tvec1<genIUType>(x)).x; 354 } 355 356 template <typename T, precision P, template <typename, precision> class vecType> 357 GLM_FUNC_QUALIFIER vecType<int, P> findMSB(vecType<T, P> const & x) 358 { 359 GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findMSB' only accept integer values"); 360 361 return detail::compute_findMSB_vec<T, P, vecType, sizeof(T) * 8>::call(x); 362 } 363}//namespace glm 364 365#if GLM_ARCH != GLM_ARCH_PURE && GLM_HAS_UNRESTRICTED_UNIONS 366# include "func_integer_simd.inl" 367#endif 368 369