1 /**************************************************************************
2 *
3 * Copyright 2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Math utilities and approximations for common math functions.
31 * Reduced precision is usually acceptable in shaders...
32 *
33 * "fast" is used in the names of functions which are low-precision,
34 * or at least lower-precision than the normal C lib functions.
35 */
36
37
38 #ifndef U_MATH_H
39 #define U_MATH_H
40
41
42 #include "c99_compat.h"
43 #include <assert.h>
44 #include <float.h>
45 #include <stdarg.h>
46 #include <math.h>
47
48 #include "bitscan.h"
49 #include "u_endian.h" /* for UTIL_ARCH_BIG_ENDIAN */
50
51 #ifdef __cplusplus
52 extern "C" {
53 #endif
54
55
56 #ifndef M_SQRT2
57 #define M_SQRT2 1.41421356237309504880
58 #endif
59
60
61 /**
62 * Initialize math module. This should be called before using any
63 * other functions in this module.
64 */
65 extern void
66 util_init_math(void);
67
68
69 union fi {
70 float f;
71 int32_t i;
72 uint32_t ui;
73 };
74
75
76 union di {
77 double d;
78 int64_t i;
79 uint64_t ui;
80 };
81
82
83 /**
84 * Extract the IEEE float32 exponent.
85 */
86 static inline signed
util_get_float32_exponent(float x)87 util_get_float32_exponent(float x)
88 {
89 union fi f;
90
91 f.f = x;
92
93 return ((f.ui >> 23) & 0xff) - 127;
94 }
95
96
97 #define LOG2_TABLE_SIZE_LOG2 8
98 #define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
99 #define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
100 extern float log2_table[LOG2_TABLE_SIZE];
101
102
103 /**
104 * Fast approximation to log2(x).
105 */
106 static inline float
util_fast_log2(float x)107 util_fast_log2(float x)
108 {
109 union fi num;
110 float epart, mpart;
111 num.f = x;
112 epart = (float)(((num.i & 0x7f800000) >> 23) - 127);
113 /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
114 mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)];
115 return epart + mpart;
116 }
117
118
119 /**
120 * Floor(x), returned as int.
121 */
122 static inline int
util_ifloor(float f)123 util_ifloor(float f)
124 {
125 #if defined(USE_X86_ASM) && defined(__GNUC__) && defined(__i386__)
126 /*
127 * IEEE floor for computers that round to nearest or even.
128 * 'f' must be between -4194304 and 4194303.
129 * This floor operation is done by "(iround(f + .5) + iround(f - .5)) >> 1",
130 * but uses some IEEE specific tricks for better speed.
131 * Contributed by Josh Vanderhoof
132 */
133 int ai, bi;
134 double af, bf;
135 af = (3 << 22) + 0.5 + (double)f;
136 bf = (3 << 22) + 0.5 - (double)f;
137 /* GCC generates an extra fstp/fld without this. */
138 __asm__ ("fstps %0" : "=m" (ai) : "t" (af) : "st");
139 __asm__ ("fstps %0" : "=m" (bi) : "t" (bf) : "st");
140 return (ai - bi) >> 1;
141 #else
142 int ai, bi;
143 double af, bf;
144 union fi u;
145 af = (3 << 22) + 0.5 + (double) f;
146 bf = (3 << 22) + 0.5 - (double) f;
147 u.f = (float) af; ai = u.i;
148 u.f = (float) bf; bi = u.i;
149 return (ai - bi) >> 1;
150 #endif
151 }
152
153
154 /**
155 * Round float to nearest int.
156 */
157 static inline int
util_iround(float f)158 util_iround(float f)
159 {
160 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
161 int r;
162 __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st");
163 return r;
164 #elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
165 int r;
166 _asm {
167 fld f
168 fistp r
169 }
170 return r;
171 #else
172 if (f >= 0.0f)
173 return (int) (f + 0.5f);
174 else
175 return (int) (f - 0.5f);
176 #endif
177 }
178
179
180 /**
181 * Approximate floating point comparison
182 */
183 static inline bool
util_is_approx(float a,float b,float tol)184 util_is_approx(float a, float b, float tol)
185 {
186 return fabsf(b - a) <= tol;
187 }
188
189
190 /**
191 * util_is_X_inf_or_nan = test if x is NaN or +/- Inf
192 * util_is_X_nan = test if x is NaN
193 * util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf
194 *
195 * NaN can be checked with x != x, however this fails with the fast math flag
196 **/
197
198
199 /**
200 * Single-float
201 */
202 static inline bool
util_is_inf_or_nan(float x)203 util_is_inf_or_nan(float x)
204 {
205 union fi tmp;
206 tmp.f = x;
207 return (tmp.ui & 0x7f800000) == 0x7f800000;
208 }
209
210
211 static inline bool
util_is_nan(float x)212 util_is_nan(float x)
213 {
214 union fi tmp;
215 tmp.f = x;
216 return (tmp.ui & 0x7fffffff) > 0x7f800000;
217 }
218
219
220 static inline int
util_inf_sign(float x)221 util_inf_sign(float x)
222 {
223 union fi tmp;
224 tmp.f = x;
225 if ((tmp.ui & 0x7fffffff) != 0x7f800000) {
226 return 0;
227 }
228
229 return (x < 0) ? -1 : 1;
230 }
231
232
233 /**
234 * Double-float
235 */
236 static inline bool
util_is_double_inf_or_nan(double x)237 util_is_double_inf_or_nan(double x)
238 {
239 union di tmp;
240 tmp.d = x;
241 return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL;
242 }
243
244
245 static inline bool
util_is_double_nan(double x)246 util_is_double_nan(double x)
247 {
248 union di tmp;
249 tmp.d = x;
250 return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL;
251 }
252
253
254 static inline int
util_double_inf_sign(double x)255 util_double_inf_sign(double x)
256 {
257 union di tmp;
258 tmp.d = x;
259 if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) {
260 return 0;
261 }
262
263 return (x < 0) ? -1 : 1;
264 }
265
266
267 /**
268 * Half-float
269 */
270 static inline bool
util_is_half_inf_or_nan(int16_t x)271 util_is_half_inf_or_nan(int16_t x)
272 {
273 return (x & 0x7c00) == 0x7c00;
274 }
275
276
277 static inline bool
util_is_half_nan(int16_t x)278 util_is_half_nan(int16_t x)
279 {
280 return (x & 0x7fff) > 0x7c00;
281 }
282
283
284 static inline int
util_half_inf_sign(int16_t x)285 util_half_inf_sign(int16_t x)
286 {
287 if ((x & 0x7fff) != 0x7c00) {
288 return 0;
289 }
290
291 return (x < 0) ? -1 : 1;
292 }
293
294
295 /**
296 * Return float bits.
297 */
298 static inline unsigned
fui(float f)299 fui( float f )
300 {
301 union fi fi;
302 fi.f = f;
303 return fi.ui;
304 }
305
306 static inline float
uif(uint32_t ui)307 uif(uint32_t ui)
308 {
309 union fi fi;
310 fi.ui = ui;
311 return fi.f;
312 }
313
314
315 /**
316 * Convert uint8_t to float in [0, 1].
317 */
318 static inline float
ubyte_to_float(uint8_t ub)319 ubyte_to_float(uint8_t ub)
320 {
321 return (float) ub * (1.0f / 255.0f);
322 }
323
324
325 /**
326 * Convert float in [0,1] to uint8_t in [0,255] with clamping.
327 */
328 static inline uint8_t
float_to_ubyte(float f)329 float_to_ubyte(float f)
330 {
331 /* return 0 for NaN too */
332 if (!(f > 0.0f)) {
333 return (uint8_t) 0;
334 }
335 else if (f >= 1.0f) {
336 return (uint8_t) 255;
337 }
338 else {
339 union fi tmp;
340 tmp.f = f;
341 tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
342 return (uint8_t) tmp.i;
343 }
344 }
345
346 /**
347 * Convert uint16_t to float in [0, 1].
348 */
349 static inline float
ushort_to_float(uint16_t us)350 ushort_to_float(uint16_t us)
351 {
352 return (float) us * (1.0f / 65535.0f);
353 }
354
355
356 /**
357 * Convert float in [0,1] to uint16_t in [0,65535] with clamping.
358 */
359 static inline uint16_t
float_to_ushort(float f)360 float_to_ushort(float f)
361 {
362 /* return 0 for NaN too */
363 if (!(f > 0.0f)) {
364 return (uint16_t) 0;
365 }
366 else if (f >= 1.0f) {
367 return (uint16_t) 65535;
368 }
369 else {
370 union fi tmp;
371 tmp.f = f;
372 tmp.f = tmp.f * (65535.0f/65536.0f) + 128.0f;
373 return (uint16_t) tmp.i;
374 }
375 }
376
377 static inline float
byte_to_float_tex(int8_t b)378 byte_to_float_tex(int8_t b)
379 {
380 return (b == -128) ? -1.0F : b * 1.0F / 127.0F;
381 }
382
383 static inline int8_t
float_to_byte_tex(float f)384 float_to_byte_tex(float f)
385 {
386 return (int8_t) (127.0F * f);
387 }
388
389 /**
390 * Calc log base 2
391 */
392 static inline unsigned
util_logbase2(unsigned n)393 util_logbase2(unsigned n)
394 {
395 #if defined(HAVE___BUILTIN_CLZ)
396 return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1));
397 #else
398 unsigned pos = 0;
399 if (n >= 1<<16) { n >>= 16; pos += 16; }
400 if (n >= 1<< 8) { n >>= 8; pos += 8; }
401 if (n >= 1<< 4) { n >>= 4; pos += 4; }
402 if (n >= 1<< 2) { n >>= 2; pos += 2; }
403 if (n >= 1<< 1) { pos += 1; }
404 return pos;
405 #endif
406 }
407
408 static inline uint64_t
util_logbase2_64(uint64_t n)409 util_logbase2_64(uint64_t n)
410 {
411 #if defined(HAVE___BUILTIN_CLZLL)
412 return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1));
413 #else
414 uint64_t pos = 0ull;
415 if (n >= 1ull<<32) { n >>= 32; pos += 32; }
416 if (n >= 1ull<<16) { n >>= 16; pos += 16; }
417 if (n >= 1ull<< 8) { n >>= 8; pos += 8; }
418 if (n >= 1ull<< 4) { n >>= 4; pos += 4; }
419 if (n >= 1ull<< 2) { n >>= 2; pos += 2; }
420 if (n >= 1ull<< 1) { pos += 1; }
421 return pos;
422 #endif
423 }
424
425 /**
426 * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
427 * returns the smallest x such that n <= 2**x.
428 */
429 static inline unsigned
util_logbase2_ceil(unsigned n)430 util_logbase2_ceil(unsigned n)
431 {
432 if (n <= 1)
433 return 0;
434
435 return 1 + util_logbase2(n - 1);
436 }
437
438 static inline uint64_t
util_logbase2_ceil64(uint64_t n)439 util_logbase2_ceil64(uint64_t n)
440 {
441 if (n <= 1)
442 return 0;
443
444 return 1ull + util_logbase2_64(n - 1);
445 }
446
447 /**
448 * Returns the smallest power of two >= x
449 */
450 static inline unsigned
util_next_power_of_two(unsigned x)451 util_next_power_of_two(unsigned x)
452 {
453 #if defined(HAVE___BUILTIN_CLZ)
454 if (x <= 1)
455 return 1;
456
457 return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
458 #else
459 unsigned val = x;
460
461 if (x <= 1)
462 return 1;
463
464 if (util_is_power_of_two_or_zero(x))
465 return x;
466
467 val--;
468 val = (val >> 1) | val;
469 val = (val >> 2) | val;
470 val = (val >> 4) | val;
471 val = (val >> 8) | val;
472 val = (val >> 16) | val;
473 val++;
474 return val;
475 #endif
476 }
477
478 static inline uint64_t
util_next_power_of_two64(uint64_t x)479 util_next_power_of_two64(uint64_t x)
480 {
481 #if defined(HAVE___BUILTIN_CLZLL)
482 if (x <= 1)
483 return 1;
484
485 return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1)));
486 #else
487 uint64_t val = x;
488
489 if (x <= 1)
490 return 1;
491
492 if (util_is_power_of_two_or_zero64(x))
493 return x;
494
495 val--;
496 val = (val >> 1) | val;
497 val = (val >> 2) | val;
498 val = (val >> 4) | val;
499 val = (val >> 8) | val;
500 val = (val >> 16) | val;
501 val = (val >> 32) | val;
502 val++;
503 return val;
504 #endif
505 }
506
507 /**
508 * Reverse bits in n
509 * Algorithm taken from:
510 * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer
511 */
512 static inline unsigned
util_bitreverse(unsigned n)513 util_bitreverse(unsigned n)
514 {
515 n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1);
516 n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2);
517 n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4);
518 n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8);
519 n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16);
520 return n;
521 }
522
523 /**
524 * Convert from little endian to CPU byte order.
525 */
526
527 #if UTIL_ARCH_BIG_ENDIAN
528 #define util_le64_to_cpu(x) util_bswap64(x)
529 #define util_le32_to_cpu(x) util_bswap32(x)
530 #define util_le16_to_cpu(x) util_bswap16(x)
531 #else
532 #define util_le64_to_cpu(x) (x)
533 #define util_le32_to_cpu(x) (x)
534 #define util_le16_to_cpu(x) (x)
535 #endif
536
537 #define util_cpu_to_le64(x) util_le64_to_cpu(x)
538 #define util_cpu_to_le32(x) util_le32_to_cpu(x)
539 #define util_cpu_to_le16(x) util_le16_to_cpu(x)
540
541 /**
542 * Reverse byte order of a 32 bit word.
543 */
544 static inline uint32_t
util_bswap32(uint32_t n)545 util_bswap32(uint32_t n)
546 {
547 #if defined(HAVE___BUILTIN_BSWAP32)
548 return __builtin_bswap32(n);
549 #else
550 return (n >> 24) |
551 ((n >> 8) & 0x0000ff00) |
552 ((n << 8) & 0x00ff0000) |
553 (n << 24);
554 #endif
555 }
556
557 /**
558 * Reverse byte order of a 64bit word.
559 */
560 static inline uint64_t
util_bswap64(uint64_t n)561 util_bswap64(uint64_t n)
562 {
563 #if defined(HAVE___BUILTIN_BSWAP64)
564 return __builtin_bswap64(n);
565 #else
566 return ((uint64_t)util_bswap32((uint32_t)n) << 32) |
567 util_bswap32((n >> 32));
568 #endif
569 }
570
571
572 /**
573 * Reverse byte order of a 16 bit word.
574 */
575 static inline uint16_t
util_bswap16(uint16_t n)576 util_bswap16(uint16_t n)
577 {
578 return (n >> 8) |
579 (n << 8);
580 }
581
582 /**
583 * Mask and sign-extend a number
584 *
585 * The bit at position `width - 1` is replicated to all the higher bits.
586 * This makes no assumptions about the high bits of the value and will
587 * overwrite them with the sign bit.
588 */
589 static inline int64_t
util_mask_sign_extend(uint64_t val,unsigned width)590 util_mask_sign_extend(uint64_t val, unsigned width)
591 {
592 assert(width > 0 && width <= 64);
593 unsigned shift = 64 - width;
594 return (int64_t)(val << shift) >> shift;
595 }
596
597 /**
598 * Sign-extend a number
599 *
600 * The bit at position `width - 1` is replicated to all the higher bits.
601 * This assumes and asserts that the value fits into `width` bits.
602 */
603 static inline int64_t
util_sign_extend(uint64_t val,unsigned width)604 util_sign_extend(uint64_t val, unsigned width)
605 {
606 assert(width == 64 || val < (UINT64_C(1) << width));
607 return util_mask_sign_extend(val, width);
608 }
609
610 static inline void*
util_memcpy_cpu_to_le32(void * restrict dest,const void * restrict src,size_t n)611 util_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n)
612 {
613 #if UTIL_ARCH_BIG_ENDIAN
614 size_t i, e;
615 assert(n % 4 == 0);
616
617 for (i = 0, e = n / 4; i < e; i++) {
618 uint32_t * restrict d = (uint32_t* restrict)dest;
619 const uint32_t * restrict s = (const uint32_t* restrict)src;
620 d[i] = util_bswap32(s[i]);
621 }
622 return dest;
623 #else
624 return memcpy(dest, src, n);
625 #endif
626 }
627
628 /**
629 * Clamp X to [MIN, MAX].
630 * This is a macro to allow float, int, uint, etc. types.
631 * We arbitrarily turn NaN into MIN.
632 */
633 #define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
634
635 /* Syntax sugar occuring frequently in graphics code */
636 #define SATURATE( X ) CLAMP(X, 0.0f, 1.0f)
637
638 #define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
639 #define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
640
641 #define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
642 #define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
643
644 #define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D))
645 #define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D))
646
647
648 /**
649 * Align a value up to an alignment value
650 *
651 * If \c value is not already aligned to the requested alignment value, it
652 * will be rounded up.
653 *
654 * \param value Value to be rounded
655 * \param alignment Alignment value to be used. This must be a power of two.
656 *
657 * \sa ROUND_DOWN_TO()
658 */
659
660 #if defined(ALIGN)
661 #undef ALIGN
662 #endif
663 static inline uintptr_t
ALIGN(uintptr_t value,int32_t alignment)664 ALIGN(uintptr_t value, int32_t alignment)
665 {
666 assert(util_is_power_of_two_nonzero(alignment));
667 return (((value) + (alignment) - 1) & ~((alignment) - 1));
668 }
669
670 /**
671 * Like ALIGN(), but works with a non-power-of-two alignment.
672 */
673 static inline uintptr_t
ALIGN_NPOT(uintptr_t value,int32_t alignment)674 ALIGN_NPOT(uintptr_t value, int32_t alignment)
675 {
676 assert(alignment > 0);
677 return (value + alignment - 1) / alignment * alignment;
678 }
679
680 /**
681 * Align a value down to an alignment value
682 *
683 * If \c value is not already aligned to the requested alignment value, it
684 * will be rounded down.
685 *
686 * \param value Value to be rounded
687 * \param alignment Alignment value to be used. This must be a power of two.
688 *
689 * \sa ALIGN()
690 */
691 static inline uint64_t
ROUND_DOWN_TO(uint64_t value,int32_t alignment)692 ROUND_DOWN_TO(uint64_t value, int32_t alignment)
693 {
694 assert(util_is_power_of_two_nonzero(alignment));
695 return ((value) & ~(alignment - 1));
696 }
697
698 /**
699 * Align a value, only works pot alignemnts.
700 */
701 static inline int
align(int value,int alignment)702 align(int value, int alignment)
703 {
704 return (value + alignment - 1) & ~(alignment - 1);
705 }
706
707 static inline uint64_t
align64(uint64_t value,unsigned alignment)708 align64(uint64_t value, unsigned alignment)
709 {
710 return (value + alignment - 1) & ~((uint64_t)alignment - 1);
711 }
712
713 /**
714 * Works like align but on npot alignments.
715 */
716 static inline size_t
util_align_npot(size_t value,size_t alignment)717 util_align_npot(size_t value, size_t alignment)
718 {
719 if (value % alignment)
720 return value + (alignment - (value % alignment));
721 return value;
722 }
723
724 static inline unsigned
u_minify(unsigned value,unsigned levels)725 u_minify(unsigned value, unsigned levels)
726 {
727 return MAX2(1, value >> levels);
728 }
729
730 #ifndef COPY_4V
731 #define COPY_4V( DST, SRC ) \
732 do { \
733 (DST)[0] = (SRC)[0]; \
734 (DST)[1] = (SRC)[1]; \
735 (DST)[2] = (SRC)[2]; \
736 (DST)[3] = (SRC)[3]; \
737 } while (0)
738 #endif
739
740
741 #ifndef COPY_4FV
742 #define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC)
743 #endif
744
745
746 #ifndef ASSIGN_4V
747 #define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
748 do { \
749 (DST)[0] = (V0); \
750 (DST)[1] = (V1); \
751 (DST)[2] = (V2); \
752 (DST)[3] = (V3); \
753 } while (0)
754 #endif
755
756
757 static inline uint32_t
util_unsigned_fixed(float value,unsigned frac_bits)758 util_unsigned_fixed(float value, unsigned frac_bits)
759 {
760 return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits));
761 }
762
763 static inline int32_t
util_signed_fixed(float value,unsigned frac_bits)764 util_signed_fixed(float value, unsigned frac_bits)
765 {
766 return (int32_t)(value * (1<<frac_bits));
767 }
768
769 unsigned
770 util_fpstate_get(void);
771 unsigned
772 util_fpstate_set_denorms_to_zero(unsigned current_fpstate);
773 void
774 util_fpstate_set(unsigned fpstate);
775
776 /**
777 * For indexed draw calls, return true if the vertex count to be drawn is
778 * much lower than the vertex count that has to be uploaded, meaning
779 * that the driver should flatten indices instead of trying to upload
780 * a too big range.
781 *
782 * This is used by vertex upload code in u_vbuf and glthread.
783 */
784 static inline bool
util_is_vbo_upload_ratio_too_large(unsigned draw_vertex_count,unsigned upload_vertex_count)785 util_is_vbo_upload_ratio_too_large(unsigned draw_vertex_count,
786 unsigned upload_vertex_count)
787 {
788 if (draw_vertex_count > 1024)
789 return upload_vertex_count > draw_vertex_count * 4;
790 else if (draw_vertex_count > 32)
791 return upload_vertex_count > draw_vertex_count * 8;
792 else
793 return upload_vertex_count > draw_vertex_count * 16;
794 }
795
796 bool util_invert_mat4x4(float *out, const float *m);
797
798 /* Quantize the lod bias value to reduce the number of sampler state
799 * variants in gallium because apps use it for smooth mipmap transitions,
800 * thrashing cso_cache and degrading performance.
801 *
802 * This quantization matches the AMD hw specification, so having more
803 * precision would have no effect anyway.
804 */
805 static inline float
util_quantize_lod_bias(float lod)806 util_quantize_lod_bias(float lod)
807 {
808 lod = CLAMP(lod, -16, 16);
809 return roundf(lod * 256) / 256;
810 }
811
812 #ifdef __cplusplus
813 }
814 #endif
815
816 #endif /* U_MATH_H */
817