1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "nir_range_analysis.h"
24 #include <float.h>
25 #include <math.h>
26 #include "util/hash_table.h"
27 #include "util/u_dynarray.h"
28 #include "util/u_math.h"
29 #include "c99_alloca.h"
30 #include "nir.h"
31
32 /**
33 * Analyzes a sequence of operations to determine some aspects of the range of
34 * the result.
35 */
36
37 struct analysis_query {
38 uint32_t pushed_queries;
39 uint32_t result_index;
40 };
41
42 struct analysis_state {
43 nir_shader *shader;
44 const nir_unsigned_upper_bound_config *config;
45 struct hash_table *range_ht;
46
47 struct util_dynarray query_stack;
48 struct util_dynarray result_stack;
49
50 size_t query_size;
51 uintptr_t (*get_key)(struct analysis_query *q);
52 void (*process_query)(struct analysis_state *state, struct analysis_query *q,
53 uint32_t *result, const uint32_t *src);
54 };
55
56 static void *
push_analysis_query(struct analysis_state * state,size_t size)57 push_analysis_query(struct analysis_state *state, size_t size)
58 {
59 struct analysis_query *q = util_dynarray_grow_bytes(&state->query_stack, 1, size);
60 q->pushed_queries = 0;
61 q->result_index = util_dynarray_num_elements(&state->result_stack, uint32_t);
62
63 util_dynarray_append(&state->result_stack, uint32_t, 0);
64
65 return q;
66 }
67
68 /* Helper for performing range analysis without recursion. */
69 static uint32_t
perform_analysis(struct analysis_state * state)70 perform_analysis(struct analysis_state *state)
71 {
72 while (state->query_stack.size) {
73 struct analysis_query *cur =
74 (struct analysis_query *)((char *)util_dynarray_end(&state->query_stack) - state->query_size);
75 uint32_t *result = util_dynarray_element(&state->result_stack, uint32_t, cur->result_index);
76
77 uintptr_t key = state->get_key(cur);
78 struct hash_entry *he = NULL;
79 /* There might be a cycle-resolving entry for loop header phis. Ignore this when finishing
80 * them by testing pushed_queries.
81 */
82 if (cur->pushed_queries == 0 && key &&
83 (he = _mesa_hash_table_search(state->range_ht, (void *)key))) {
84 *result = (uintptr_t)he->data;
85 state->query_stack.size -= state->query_size;
86 continue;
87 }
88
89 uint32_t *src = (uint32_t *)util_dynarray_end(&state->result_stack) - cur->pushed_queries;
90 state->result_stack.size -= sizeof(uint32_t) * cur->pushed_queries;
91
92 uint32_t prev_num_queries = state->query_stack.size;
93 state->process_query(state, cur, result, src);
94
95 uint32_t num_queries = state->query_stack.size;
96 if (num_queries > prev_num_queries) {
97 cur = (struct analysis_query *)util_dynarray_element(&state->query_stack, char,
98 prev_num_queries - state->query_size);
99 cur->pushed_queries = (num_queries - prev_num_queries) / state->query_size;
100 continue;
101 }
102
103 if (key)
104 _mesa_hash_table_insert(state->range_ht, (void *)key, (void *)(uintptr_t)*result);
105
106 state->query_stack.size -= state->query_size;
107 }
108
109 assert(state->result_stack.size == sizeof(uint32_t));
110
111 uint32_t res = util_dynarray_top(&state->result_stack, uint32_t);
112 util_dynarray_fini(&state->query_stack);
113 util_dynarray_fini(&state->result_stack);
114
115 return res;
116 }
117
118 static bool
is_not_negative(enum ssa_ranges r)119 is_not_negative(enum ssa_ranges r)
120 {
121 return r == gt_zero || r == ge_zero || r == eq_zero;
122 }
123
124 static bool
is_not_zero(enum ssa_ranges r)125 is_not_zero(enum ssa_ranges r)
126 {
127 return r == gt_zero || r == lt_zero || r == ne_zero;
128 }
129
130 static uint32_t
pack_data(const struct ssa_result_range r)131 pack_data(const struct ssa_result_range r)
132 {
133 return r.range | r.is_integral << 8 | r.is_finite << 9 | r.is_a_number << 10;
134 }
135
136 static struct ssa_result_range
unpack_data(uint32_t v)137 unpack_data(uint32_t v)
138 {
139 return (struct ssa_result_range){
140 .range = v & 0xff,
141 .is_integral = (v & 0x00100) != 0,
142 .is_finite = (v & 0x00200) != 0,
143 .is_a_number = (v & 0x00400) != 0
144 };
145 }
146
147 static nir_alu_type
nir_alu_src_type(const nir_alu_instr * instr,unsigned src)148 nir_alu_src_type(const nir_alu_instr *instr, unsigned src)
149 {
150 return nir_alu_type_get_base_type(nir_op_infos[instr->op].input_types[src]) |
151 nir_src_bit_size(instr->src[src].src);
152 }
153
154 static struct ssa_result_range
analyze_constant(const struct nir_alu_instr * instr,unsigned src,nir_alu_type use_type)155 analyze_constant(const struct nir_alu_instr *instr, unsigned src,
156 nir_alu_type use_type)
157 {
158 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0, 1, 2, 3,
159 4, 5, 6, 7,
160 8, 9, 10, 11,
161 12, 13, 14, 15 };
162
163 /* If the source is an explicitly sized source, then we need to reset
164 * both the number of components and the swizzle.
165 */
166 const unsigned num_components = nir_ssa_alu_instr_src_components(instr, src);
167
168 for (unsigned i = 0; i < num_components; ++i)
169 swizzle[i] = instr->src[src].swizzle[i];
170
171 const nir_load_const_instr *const load =
172 nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
173
174 struct ssa_result_range r = { unknown, false, false, false };
175
176 switch (nir_alu_type_get_base_type(use_type)) {
177 case nir_type_float: {
178 double min_value = NAN;
179 double max_value = NAN;
180 bool any_zero = false;
181 bool all_zero = true;
182
183 r.is_integral = true;
184 r.is_a_number = true;
185 r.is_finite = true;
186
187 for (unsigned i = 0; i < num_components; ++i) {
188 const double v = nir_const_value_as_float(load->value[swizzle[i]],
189 load->def.bit_size);
190
191 if (floor(v) != v)
192 r.is_integral = false;
193
194 if (isnan(v))
195 r.is_a_number = false;
196
197 if (!isfinite(v))
198 r.is_finite = false;
199
200 any_zero = any_zero || (v == 0.0);
201 all_zero = all_zero && (v == 0.0);
202 min_value = fmin(min_value, v);
203 max_value = fmax(max_value, v);
204 }
205
206 assert(any_zero >= all_zero);
207 assert(isnan(max_value) || max_value >= min_value);
208
209 if (all_zero)
210 r.range = eq_zero;
211 else if (min_value > 0.0)
212 r.range = gt_zero;
213 else if (min_value == 0.0)
214 r.range = ge_zero;
215 else if (max_value < 0.0)
216 r.range = lt_zero;
217 else if (max_value == 0.0)
218 r.range = le_zero;
219 else if (!any_zero)
220 r.range = ne_zero;
221 else
222 r.range = unknown;
223
224 return r;
225 }
226
227 case nir_type_int:
228 case nir_type_bool: {
229 int64_t min_value = INT_MAX;
230 int64_t max_value = INT_MIN;
231 bool any_zero = false;
232 bool all_zero = true;
233
234 for (unsigned i = 0; i < num_components; ++i) {
235 const int64_t v = nir_const_value_as_int(load->value[swizzle[i]],
236 load->def.bit_size);
237
238 any_zero = any_zero || (v == 0);
239 all_zero = all_zero && (v == 0);
240 min_value = MIN2(min_value, v);
241 max_value = MAX2(max_value, v);
242 }
243
244 assert(any_zero >= all_zero);
245 assert(max_value >= min_value);
246
247 if (all_zero)
248 r.range = eq_zero;
249 else if (min_value > 0)
250 r.range = gt_zero;
251 else if (min_value == 0)
252 r.range = ge_zero;
253 else if (max_value < 0)
254 r.range = lt_zero;
255 else if (max_value == 0)
256 r.range = le_zero;
257 else if (!any_zero)
258 r.range = ne_zero;
259 else
260 r.range = unknown;
261
262 return r;
263 }
264
265 case nir_type_uint: {
266 bool any_zero = false;
267 bool all_zero = true;
268
269 for (unsigned i = 0; i < num_components; ++i) {
270 const uint64_t v = nir_const_value_as_uint(load->value[swizzle[i]],
271 load->def.bit_size);
272
273 any_zero = any_zero || (v == 0);
274 all_zero = all_zero && (v == 0);
275 }
276
277 assert(any_zero >= all_zero);
278
279 if (all_zero)
280 r.range = eq_zero;
281 else if (any_zero)
282 r.range = ge_zero;
283 else
284 r.range = gt_zero;
285
286 return r;
287 }
288
289 default:
290 unreachable("Invalid alu source type");
291 }
292 }
293
294 /**
295 * Short-hand name for use in the tables in process_fp_query. If this name
296 * becomes a problem on some compiler, we can change it to _.
297 */
298 #define _______ unknown
299
300 #if defined(__clang__)
301 /* clang wants _Pragma("unroll X") */
302 #define pragma_unroll_5 _Pragma("unroll 5")
303 #define pragma_unroll_7 _Pragma("unroll 7")
304 /* gcc wants _Pragma("GCC unroll X") */
305 #elif defined(__GNUC__)
306 #if __GNUC__ >= 8
307 #define pragma_unroll_5 _Pragma("GCC unroll 5")
308 #define pragma_unroll_7 _Pragma("GCC unroll 7")
309 #else
310 #pragma GCC optimize("unroll-loops")
311 #define pragma_unroll_5
312 #define pragma_unroll_7
313 #endif
314 #else
315 /* MSVC doesn't have C99's _Pragma() */
316 #define pragma_unroll_5
317 #define pragma_unroll_7
318 #endif
319
320 #ifndef NDEBUG
321 #define ASSERT_TABLE_IS_COMMUTATIVE(t) \
322 do { \
323 static bool first = true; \
324 if (first) { \
325 first = false; \
326 pragma_unroll_7 for (unsigned r = 0; r < ARRAY_SIZE(t); r++) \
327 { \
328 pragma_unroll_7 for (unsigned c = 0; c < ARRAY_SIZE(t[0]); c++) \
329 assert(t[r][c] == t[c][r]); \
330 } \
331 } \
332 } while (false)
333
334 #define ASSERT_TABLE_IS_DIAGONAL(t) \
335 do { \
336 static bool first = true; \
337 if (first) { \
338 first = false; \
339 pragma_unroll_7 for (unsigned r = 0; r < ARRAY_SIZE(t); r++) \
340 assert(t[r][r] == r); \
341 } \
342 } while (false)
343
344 #else
345 #define ASSERT_TABLE_IS_COMMUTATIVE(t)
346 #define ASSERT_TABLE_IS_DIAGONAL(t)
347 #endif /* !defined(NDEBUG) */
348
349 static enum ssa_ranges
union_ranges(enum ssa_ranges a,enum ssa_ranges b)350 union_ranges(enum ssa_ranges a, enum ssa_ranges b)
351 {
352 static const enum ssa_ranges union_table[last_range + 1][last_range + 1] = {
353 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
354 /* unknown */ { _______, _______, _______, _______, _______, _______, _______ },
355 /* lt_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, le_zero },
356 /* le_zero */ { _______, le_zero, le_zero, _______, _______, _______, le_zero },
357 /* gt_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, ge_zero },
358 /* ge_zero */ { _______, _______, _______, ge_zero, ge_zero, _______, ge_zero },
359 /* ne_zero */ { _______, ne_zero, _______, ne_zero, _______, ne_zero, _______ },
360 /* eq_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
361 };
362
363 ASSERT_TABLE_IS_COMMUTATIVE(union_table);
364 ASSERT_TABLE_IS_DIAGONAL(union_table);
365
366 return union_table[a][b];
367 }
368
369 #ifndef NDEBUG
370 /* Verify that the 'unknown' entry in each row (or column) of the table is the
371 * union of all the other values in the row (or column).
372 */
373 #define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t) \
374 do { \
375 static bool first = true; \
376 if (first) { \
377 first = false; \
378 pragma_unroll_7 for (unsigned i = 0; i < last_range; i++) \
379 { \
380 enum ssa_ranges col_range = t[i][unknown + 1]; \
381 enum ssa_ranges row_range = t[unknown + 1][i]; \
382 \
383 pragma_unroll_5 for (unsigned j = unknown + 2; j < last_range; j++) \
384 { \
385 col_range = union_ranges(col_range, t[i][j]); \
386 row_range = union_ranges(row_range, t[j][i]); \
387 } \
388 \
389 assert(col_range == t[i][unknown]); \
390 assert(row_range == t[unknown][i]); \
391 } \
392 } \
393 } while (false)
394
395 /* For most operations, the union of ranges for a strict inequality and
396 * equality should be the range of the non-strict inequality (e.g.,
397 * union_ranges(range(op(lt_zero), range(op(eq_zero))) == range(op(le_zero)).
398 *
399 * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.).
400 */
401 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t) \
402 do { \
403 assert(union_ranges(t[lt_zero], t[eq_zero]) == t[le_zero]); \
404 assert(union_ranges(t[gt_zero], t[eq_zero]) == t[ge_zero]); \
405 } while (false)
406
407 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t) \
408 do { \
409 static bool first = true; \
410 if (first) { \
411 first = false; \
412 pragma_unroll_7 for (unsigned i = 0; i < last_range; i++) \
413 { \
414 assert(union_ranges(t[i][lt_zero], t[i][eq_zero]) == t[i][le_zero]); \
415 assert(union_ranges(t[i][gt_zero], t[i][eq_zero]) == t[i][ge_zero]); \
416 assert(union_ranges(t[lt_zero][i], t[eq_zero][i]) == t[le_zero][i]); \
417 assert(union_ranges(t[gt_zero][i], t[eq_zero][i]) == t[ge_zero][i]); \
418 } \
419 } \
420 } while (false)
421
422 /* Several other unordered tuples span the range of "everything." Each should
423 * have the same value as unknown: (lt_zero, ge_zero), (le_zero, gt_zero), and
424 * (eq_zero, ne_zero). union_ranges is already commutative, so only one
425 * ordering needs to be checked.
426 *
427 * Does not apply to selection-like opcodes (bcsel, fmin, fmax, etc.).
428 *
429 * In cases where this can be used, it is unnecessary to also use
430 * ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_*_SOURCE. For any range X,
431 * union_ranges(X, X) == X. The disjoint ranges cover all of the non-unknown
432 * possibilities, so the union of all the unions of disjoint ranges is
433 * equivalent to the union of "others."
434 */
435 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t) \
436 do { \
437 assert(union_ranges(t[lt_zero], t[ge_zero]) == t[unknown]); \
438 assert(union_ranges(t[le_zero], t[gt_zero]) == t[unknown]); \
439 assert(union_ranges(t[eq_zero], t[ne_zero]) == t[unknown]); \
440 } while (false)
441
442 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t) \
443 do { \
444 static bool first = true; \
445 if (first) { \
446 first = false; \
447 pragma_unroll_7 for (unsigned i = 0; i < last_range; i++) \
448 { \
449 assert(union_ranges(t[i][lt_zero], t[i][ge_zero]) == \
450 t[i][unknown]); \
451 assert(union_ranges(t[i][le_zero], t[i][gt_zero]) == \
452 t[i][unknown]); \
453 assert(union_ranges(t[i][eq_zero], t[i][ne_zero]) == \
454 t[i][unknown]); \
455 \
456 assert(union_ranges(t[lt_zero][i], t[ge_zero][i]) == \
457 t[unknown][i]); \
458 assert(union_ranges(t[le_zero][i], t[gt_zero][i]) == \
459 t[unknown][i]); \
460 assert(union_ranges(t[eq_zero][i], t[ne_zero][i]) == \
461 t[unknown][i]); \
462 } \
463 } \
464 } while (false)
465
466 #else
467 #define ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(t)
468 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(t)
469 #define ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(t)
470 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(t)
471 #define ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(t)
472 #endif /* !defined(NDEBUG) */
473
474 struct fp_query {
475 struct analysis_query head;
476 const nir_alu_instr *instr;
477 unsigned src;
478 nir_alu_type use_type;
479 };
480
481 static void
push_fp_query(struct analysis_state * state,const nir_alu_instr * alu,unsigned src,nir_alu_type type)482 push_fp_query(struct analysis_state *state, const nir_alu_instr *alu, unsigned src, nir_alu_type type)
483 {
484 struct fp_query *pushed_q = push_analysis_query(state, sizeof(struct fp_query));
485 pushed_q->instr = alu;
486 pushed_q->src = src;
487 pushed_q->use_type = type == nir_type_invalid ? nir_alu_src_type(alu, src) : type;
488 }
489
490 static uintptr_t
get_fp_key(struct analysis_query * q)491 get_fp_key(struct analysis_query *q)
492 {
493 struct fp_query *fp_q = (struct fp_query *)q;
494 const nir_src *src = &fp_q->instr->src[fp_q->src].src;
495
496 if (src->ssa->parent_instr->type != nir_instr_type_alu)
497 return 0;
498
499 uintptr_t type_encoding;
500 uintptr_t ptr = (uintptr_t)nir_instr_as_alu(src->ssa->parent_instr);
501
502 /* The low 2 bits have to be zero or this whole scheme falls apart. */
503 assert((ptr & 0x3) == 0);
504
505 /* NIR is typeless in the sense that sequences of bits have whatever
506 * meaning is attached to them by the instruction that consumes them.
507 * However, the number of bits must match between producer and consumer.
508 * As a result, the number of bits does not need to be encoded here.
509 */
510 switch (nir_alu_type_get_base_type(fp_q->use_type)) {
511 case nir_type_int:
512 type_encoding = 0;
513 break;
514 case nir_type_uint:
515 type_encoding = 1;
516 break;
517 case nir_type_bool:
518 type_encoding = 2;
519 break;
520 case nir_type_float:
521 type_encoding = 3;
522 break;
523 default:
524 unreachable("Invalid base type.");
525 }
526
527 return ptr | type_encoding;
528 }
529
530 /**
531 * Analyze an expression to determine the range of its result
532 *
533 * The end result of this analysis is a token that communicates something
534 * about the range of values. There's an implicit grammar that produces
535 * tokens from sequences of literal values, other tokens, and operations.
536 * This function implements this grammar as a recursive-descent parser. Some
537 * (but not all) of the grammar is listed in-line in the function.
538 */
539 static void
process_fp_query(struct analysis_state * state,struct analysis_query * aq,uint32_t * result,const uint32_t * src_res)540 process_fp_query(struct analysis_state *state, struct analysis_query *aq, uint32_t *result,
541 const uint32_t *src_res)
542 {
543 /* Ensure that the _Pragma("GCC unroll 7") above are correct. */
544 STATIC_ASSERT(last_range + 1 == 7);
545
546 struct fp_query q = *(struct fp_query *)aq;
547 const nir_alu_instr *instr = q.instr;
548 unsigned src = q.src;
549 nir_alu_type use_type = q.use_type;
550
551 if (nir_src_is_const(instr->src[src].src)) {
552 *result = pack_data(analyze_constant(instr, src, use_type));
553 return;
554 }
555
556 if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu) {
557 *result = pack_data((struct ssa_result_range){ unknown, false, false, false });
558 return;
559 }
560
561 const struct nir_alu_instr *const alu =
562 nir_instr_as_alu(instr->src[src].src.ssa->parent_instr);
563
564 /* Bail if the type of the instruction generating the value does not match
565 * the type the value will be interpreted as. int/uint/bool can be
566 * reinterpreted trivially. The most important cases are between float and
567 * non-float.
568 */
569 if (alu->op != nir_op_mov && alu->op != nir_op_bcsel) {
570 const nir_alu_type use_base_type =
571 nir_alu_type_get_base_type(use_type);
572 const nir_alu_type src_base_type =
573 nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
574
575 if (use_base_type != src_base_type &&
576 (use_base_type == nir_type_float ||
577 src_base_type == nir_type_float)) {
578 *result = pack_data((struct ssa_result_range){ unknown, false, false, false });
579 return;
580 }
581 }
582
583 if (!aq->pushed_queries) {
584 switch (alu->op) {
585 case nir_op_bcsel:
586 push_fp_query(state, alu, 1, use_type);
587 push_fp_query(state, alu, 2, use_type);
588 return;
589 case nir_op_mov:
590 push_fp_query(state, alu, 0, use_type);
591 return;
592 case nir_op_i2f32:
593 case nir_op_u2f32:
594 case nir_op_fabs:
595 case nir_op_fexp2:
596 case nir_op_frcp:
597 case nir_op_frsq:
598 case nir_op_fneg:
599 case nir_op_fsat:
600 case nir_op_fsign:
601 case nir_op_ffloor:
602 case nir_op_fceil:
603 case nir_op_ftrunc:
604 case nir_op_fdot2:
605 case nir_op_fdot3:
606 case nir_op_fdot4:
607 case nir_op_fdot8:
608 case nir_op_fdot16:
609 case nir_op_fdot2_replicated:
610 case nir_op_fdot3_replicated:
611 case nir_op_fdot4_replicated:
612 case nir_op_fdot8_replicated:
613 case nir_op_fdot16_replicated:
614 push_fp_query(state, alu, 0, nir_type_invalid);
615 return;
616 case nir_op_fadd:
617 case nir_op_fmax:
618 case nir_op_fmin:
619 case nir_op_fmul:
620 case nir_op_fmulz:
621 case nir_op_fpow:
622 push_fp_query(state, alu, 0, nir_type_invalid);
623 push_fp_query(state, alu, 1, nir_type_invalid);
624 return;
625 case nir_op_ffma:
626 case nir_op_flrp:
627 push_fp_query(state, alu, 0, nir_type_invalid);
628 push_fp_query(state, alu, 1, nir_type_invalid);
629 push_fp_query(state, alu, 2, nir_type_invalid);
630 return;
631 default:
632 break;
633 }
634 }
635
636 struct ssa_result_range r = { unknown, false, false, false };
637
638 /* ge_zero: ge_zero + ge_zero
639 *
640 * gt_zero: gt_zero + eq_zero
641 * | gt_zero + ge_zero
642 * | eq_zero + gt_zero # Addition is commutative
643 * | ge_zero + gt_zero # Addition is commutative
644 * | gt_zero + gt_zero
645 * ;
646 *
647 * le_zero: le_zero + le_zero
648 *
649 * lt_zero: lt_zero + eq_zero
650 * | lt_zero + le_zero
651 * | eq_zero + lt_zero # Addition is commutative
652 * | le_zero + lt_zero # Addition is commutative
653 * | lt_zero + lt_zero
654 * ;
655 *
656 * ne_zero: eq_zero + ne_zero
657 * | ne_zero + eq_zero # Addition is commutative
658 * ;
659 *
660 * eq_zero: eq_zero + eq_zero
661 * ;
662 *
663 * All other cases are 'unknown'. The seeming odd entry is (ne_zero,
664 * ne_zero), but that could be (-5, +5) which is not ne_zero.
665 */
666 static const enum ssa_ranges fadd_table[last_range + 1][last_range + 1] = {
667 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
668 /* unknown */ { _______, _______, _______, _______, _______, _______, _______ },
669 /* lt_zero */ { _______, lt_zero, lt_zero, _______, _______, _______, lt_zero },
670 /* le_zero */ { _______, lt_zero, le_zero, _______, _______, _______, le_zero },
671 /* gt_zero */ { _______, _______, _______, gt_zero, gt_zero, _______, gt_zero },
672 /* ge_zero */ { _______, _______, _______, gt_zero, ge_zero, _______, ge_zero },
673 /* ne_zero */ { _______, _______, _______, _______, _______, _______, ne_zero },
674 /* eq_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
675 };
676
677 ASSERT_TABLE_IS_COMMUTATIVE(fadd_table);
678 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fadd_table);
679 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fadd_table);
680
681 /* Due to flush-to-zero semanatics of floating-point numbers with very
682 * small mangnitudes, we can never really be sure a result will be
683 * non-zero.
684 *
685 * ge_zero: ge_zero * ge_zero
686 * | ge_zero * gt_zero
687 * | ge_zero * eq_zero
688 * | le_zero * lt_zero
689 * | lt_zero * le_zero # Multiplication is commutative
690 * | le_zero * le_zero
691 * | gt_zero * ge_zero # Multiplication is commutative
692 * | eq_zero * ge_zero # Multiplication is commutative
693 * | a * a # Left source == right source
694 * | gt_zero * gt_zero
695 * | lt_zero * lt_zero
696 * ;
697 *
698 * le_zero: ge_zero * le_zero
699 * | ge_zero * lt_zero
700 * | lt_zero * ge_zero # Multiplication is commutative
701 * | le_zero * ge_zero # Multiplication is commutative
702 * | le_zero * gt_zero
703 * | lt_zero * gt_zero
704 * | gt_zero * lt_zero # Multiplication is commutative
705 * ;
706 *
707 * eq_zero: eq_zero * <any>
708 * <any> * eq_zero # Multiplication is commutative
709 *
710 * All other cases are 'unknown'.
711 */
712 static const enum ssa_ranges fmul_table[last_range + 1][last_range + 1] = {
713 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
714 /* unknown */ { _______, _______, _______, _______, _______, _______, eq_zero },
715 /* lt_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero },
716 /* le_zero */ { _______, ge_zero, ge_zero, le_zero, le_zero, _______, eq_zero },
717 /* gt_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
718 /* ge_zero */ { _______, le_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
719 /* ne_zero */ { _______, _______, _______, _______, _______, _______, eq_zero },
720 /* eq_zero */ { eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero, eq_zero }
721 };
722
723 ASSERT_TABLE_IS_COMMUTATIVE(fmul_table);
724 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(fmul_table);
725 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(fmul_table);
726
727 static const enum ssa_ranges fneg_table[last_range + 1] = {
728 /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
729 _______, gt_zero, ge_zero, lt_zero, le_zero, ne_zero, eq_zero
730 };
731
732 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(fneg_table);
733 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(fneg_table);
734
735 switch (alu->op) {
736 case nir_op_b2f32:
737 case nir_op_b2i32:
738 /* b2f32 will generate either 0.0 or 1.0. This case is trivial.
739 *
740 * b2i32 will generate either 0x00000000 or 0x00000001. When those bit
741 * patterns are interpreted as floating point, they are 0.0 and
742 * 1.401298464324817e-45. The latter is subnormal, but it is finite and
743 * a number.
744 */
745 r = (struct ssa_result_range){ ge_zero, alu->op == nir_op_b2f32, true, true };
746 break;
747
748 case nir_op_bcsel: {
749 const struct ssa_result_range left = unpack_data(src_res[0]);
750 const struct ssa_result_range right = unpack_data(src_res[1]);
751
752 r.is_integral = left.is_integral && right.is_integral;
753
754 /* This could be better, but it would require a lot of work. For
755 * example, the result of the following is a number:
756 *
757 * bcsel(a > 0.0, a, 38.6)
758 *
759 * If the result of 'a > 0.0' is true, then the use of 'a' in the true
760 * part of the bcsel must be a number.
761 *
762 * Other cases are even more challenging.
763 *
764 * bcsel(a > 0.5, a - 0.5, 0.0)
765 */
766 r.is_a_number = left.is_a_number && right.is_a_number;
767 r.is_finite = left.is_finite && right.is_finite;
768
769 r.range = union_ranges(left.range, right.range);
770 break;
771 }
772
773 case nir_op_i2f32:
774 case nir_op_u2f32:
775 r = unpack_data(src_res[0]);
776
777 r.is_integral = true;
778 r.is_a_number = true;
779 r.is_finite = true;
780
781 if (r.range == unknown && alu->op == nir_op_u2f32)
782 r.range = ge_zero;
783
784 break;
785
786 case nir_op_fabs:
787 r = unpack_data(src_res[0]);
788
789 switch (r.range) {
790 case unknown:
791 case le_zero:
792 case ge_zero:
793 r.range = ge_zero;
794 break;
795
796 case lt_zero:
797 case gt_zero:
798 case ne_zero:
799 r.range = gt_zero;
800 break;
801
802 case eq_zero:
803 break;
804 }
805
806 break;
807
808 case nir_op_fadd: {
809 const struct ssa_result_range left = unpack_data(src_res[0]);
810 const struct ssa_result_range right = unpack_data(src_res[1]);
811
812 r.is_integral = left.is_integral && right.is_integral;
813 r.range = fadd_table[left.range][right.range];
814
815 /* X + Y is NaN if either operand is NaN or if one operand is +Inf and
816 * the other is -Inf. If neither operand is NaN and at least one of the
817 * operands is finite, then the result cannot be NaN.
818 */
819 r.is_a_number = left.is_a_number && right.is_a_number &&
820 (left.is_finite || right.is_finite);
821 break;
822 }
823
824 case nir_op_fexp2: {
825 /* If the parameter might be less than zero, the mathematically result
826 * will be on (0, 1). For sufficiently large magnitude negative
827 * parameters, the result will flush to zero.
828 */
829 static const enum ssa_ranges table[last_range + 1] = {
830 /* unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
831 ge_zero, ge_zero, ge_zero, gt_zero, gt_zero, ge_zero, gt_zero
832 };
833
834 r = unpack_data(src_res[0]);
835
836 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_1_SOURCE(table);
837 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_1_SOURCE(table);
838
839 r.is_integral = r.is_integral && is_not_negative(r.range);
840 r.range = table[r.range];
841
842 /* Various cases can result in NaN, so assume the worst. */
843 r.is_finite = false;
844 r.is_a_number = false;
845 break;
846 }
847
848 case nir_op_fmax: {
849 const struct ssa_result_range left = unpack_data(src_res[0]);
850 const struct ssa_result_range right = unpack_data(src_res[1]);
851
852 r.is_integral = left.is_integral && right.is_integral;
853
854 /* This is conservative. It may be possible to determine that the
855 * result must be finite in more cases, but it would take some effort to
856 * work out all the corners. For example, fmax({lt_zero, finite},
857 * {lt_zero}) should result in {lt_zero, finite}.
858 */
859 r.is_finite = left.is_finite && right.is_finite;
860
861 /* If one source is NaN, fmax always picks the other source. */
862 r.is_a_number = left.is_a_number || right.is_a_number;
863
864 /* gt_zero: fmax(gt_zero, *)
865 * | fmax(*, gt_zero) # Treat fmax as commutative
866 * ;
867 *
868 * ge_zero: fmax(ge_zero, ne_zero)
869 * | fmax(ge_zero, lt_zero)
870 * | fmax(ge_zero, le_zero)
871 * | fmax(ge_zero, eq_zero)
872 * | fmax(ne_zero, ge_zero) # Treat fmax as commutative
873 * | fmax(lt_zero, ge_zero) # Treat fmax as commutative
874 * | fmax(le_zero, ge_zero) # Treat fmax as commutative
875 * | fmax(eq_zero, ge_zero) # Treat fmax as commutative
876 * | fmax(ge_zero, ge_zero)
877 * ;
878 *
879 * le_zero: fmax(le_zero, lt_zero)
880 * | fmax(lt_zero, le_zero) # Treat fmax as commutative
881 * | fmax(le_zero, le_zero)
882 * ;
883 *
884 * lt_zero: fmax(lt_zero, lt_zero)
885 * ;
886 *
887 * ne_zero: fmax(ne_zero, lt_zero)
888 * | fmax(lt_zero, ne_zero) # Treat fmax as commutative
889 * | fmax(ne_zero, ne_zero)
890 * ;
891 *
892 * eq_zero: fmax(eq_zero, le_zero)
893 * | fmax(eq_zero, lt_zero)
894 * | fmax(le_zero, eq_zero) # Treat fmax as commutative
895 * | fmax(lt_zero, eq_zero) # Treat fmax as commutative
896 * | fmax(eq_zero, eq_zero)
897 * ;
898 *
899 * All other cases are 'unknown'.
900 */
901 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
902 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
903 /* unknown */ { _______, _______, _______, gt_zero, ge_zero, _______, ge_zero },
904 /* lt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
905 /* le_zero */ { _______, le_zero, le_zero, gt_zero, ge_zero, _______, eq_zero },
906 /* gt_zero */ { gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero, gt_zero },
907 /* ge_zero */ { ge_zero, ge_zero, ge_zero, gt_zero, ge_zero, ge_zero, ge_zero },
908 /* ne_zero */ { _______, ne_zero, _______, gt_zero, ge_zero, ne_zero, ge_zero },
909 /* eq_zero */ { ge_zero, eq_zero, eq_zero, gt_zero, ge_zero, ge_zero, eq_zero }
910 };
911
912 /* Treat fmax as commutative. */
913 ASSERT_TABLE_IS_COMMUTATIVE(table);
914 ASSERT_TABLE_IS_DIAGONAL(table);
915 ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table);
916
917 r.range = table[left.range][right.range];
918
919 /* Recall that when either value is NaN, fmax will pick the other value.
920 * This means the result range of the fmax will either be the "ideal"
921 * result range (calculated above) or the range of the non-NaN value.
922 */
923 if (!left.is_a_number)
924 r.range = union_ranges(r.range, right.range);
925
926 if (!right.is_a_number)
927 r.range = union_ranges(r.range, left.range);
928
929 break;
930 }
931
932 case nir_op_fmin: {
933 const struct ssa_result_range left = unpack_data(src_res[0]);
934 const struct ssa_result_range right = unpack_data(src_res[1]);
935
936 r.is_integral = left.is_integral && right.is_integral;
937
938 /* This is conservative. It may be possible to determine that the
939 * result must be finite in more cases, but it would take some effort to
940 * work out all the corners. For example, fmin({gt_zero, finite},
941 * {gt_zero}) should result in {gt_zero, finite}.
942 */
943 r.is_finite = left.is_finite && right.is_finite;
944
945 /* If one source is NaN, fmin always picks the other source. */
946 r.is_a_number = left.is_a_number || right.is_a_number;
947
948 /* lt_zero: fmin(lt_zero, *)
949 * | fmin(*, lt_zero) # Treat fmin as commutative
950 * ;
951 *
952 * le_zero: fmin(le_zero, ne_zero)
953 * | fmin(le_zero, gt_zero)
954 * | fmin(le_zero, ge_zero)
955 * | fmin(le_zero, eq_zero)
956 * | fmin(ne_zero, le_zero) # Treat fmin as commutative
957 * | fmin(gt_zero, le_zero) # Treat fmin as commutative
958 * | fmin(ge_zero, le_zero) # Treat fmin as commutative
959 * | fmin(eq_zero, le_zero) # Treat fmin as commutative
960 * | fmin(le_zero, le_zero)
961 * ;
962 *
963 * ge_zero: fmin(ge_zero, gt_zero)
964 * | fmin(gt_zero, ge_zero) # Treat fmin as commutative
965 * | fmin(ge_zero, ge_zero)
966 * ;
967 *
968 * gt_zero: fmin(gt_zero, gt_zero)
969 * ;
970 *
971 * ne_zero: fmin(ne_zero, gt_zero)
972 * | fmin(gt_zero, ne_zero) # Treat fmin as commutative
973 * | fmin(ne_zero, ne_zero)
974 * ;
975 *
976 * eq_zero: fmin(eq_zero, ge_zero)
977 * | fmin(eq_zero, gt_zero)
978 * | fmin(ge_zero, eq_zero) # Treat fmin as commutative
979 * | fmin(gt_zero, eq_zero) # Treat fmin as commutative
980 * | fmin(eq_zero, eq_zero)
981 * ;
982 *
983 * All other cases are 'unknown'.
984 */
985 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
986 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
987 /* unknown */ { _______, lt_zero, le_zero, _______, _______, _______, le_zero },
988 /* lt_zero */ { lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero, lt_zero },
989 /* le_zero */ { le_zero, lt_zero, le_zero, le_zero, le_zero, le_zero, le_zero },
990 /* gt_zero */ { _______, lt_zero, le_zero, gt_zero, ge_zero, ne_zero, eq_zero },
991 /* ge_zero */ { _______, lt_zero, le_zero, ge_zero, ge_zero, _______, eq_zero },
992 /* ne_zero */ { _______, lt_zero, le_zero, ne_zero, _______, ne_zero, le_zero },
993 /* eq_zero */ { le_zero, lt_zero, le_zero, eq_zero, eq_zero, le_zero, eq_zero }
994 };
995
996 /* Treat fmin as commutative. */
997 ASSERT_TABLE_IS_COMMUTATIVE(table);
998 ASSERT_TABLE_IS_DIAGONAL(table);
999 ASSERT_UNION_OF_OTHERS_MATCHES_UNKNOWN_2_SOURCE(table);
1000
1001 r.range = table[left.range][right.range];
1002
1003 /* Recall that when either value is NaN, fmin will pick the other value.
1004 * This means the result range of the fmin will either be the "ideal"
1005 * result range (calculated above) or the range of the non-NaN value.
1006 */
1007 if (!left.is_a_number)
1008 r.range = union_ranges(r.range, right.range);
1009
1010 if (!right.is_a_number)
1011 r.range = union_ranges(r.range, left.range);
1012
1013 break;
1014 }
1015
1016 case nir_op_fmul:
1017 case nir_op_fmulz: {
1018 const struct ssa_result_range left = unpack_data(src_res[0]);
1019 const struct ssa_result_range right = unpack_data(src_res[1]);
1020
1021 r.is_integral = left.is_integral && right.is_integral;
1022
1023 /* x * x => ge_zero */
1024 if (left.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) {
1025 /* Even if x > 0, the result of x*x can be zero when x is, for
1026 * example, a subnormal number.
1027 */
1028 r.range = ge_zero;
1029 } else if (left.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) {
1030 /* -x * x => le_zero. */
1031 r.range = le_zero;
1032 } else
1033 r.range = fmul_table[left.range][right.range];
1034
1035 if (alu->op == nir_op_fmul) {
1036 /* Mulitpliation produces NaN for X * NaN and for 0 * ±Inf. If both
1037 * operands are numbers and either both are finite or one is finite and
1038 * the other cannot be zero, then the result must be a number.
1039 */
1040 r.is_a_number = (left.is_a_number && right.is_a_number) &&
1041 ((left.is_finite && right.is_finite) ||
1042 (!is_not_zero(left.range) && right.is_finite) ||
1043 (left.is_finite && !is_not_zero(right.range)));
1044 } else {
1045 /* nir_op_fmulz: unlike nir_op_fmul, 0 * ±Inf is a number. */
1046 r.is_a_number = left.is_a_number && right.is_a_number;
1047 }
1048
1049 break;
1050 }
1051
1052 case nir_op_frcp: {
1053 const struct ssa_result_range left = unpack_data(src_res[0]);
1054
1055 /* Only rcp(NaN) is NaN. */
1056 r.is_a_number = left.is_a_number;
1057
1058 /* rcp can be zero for large values if denorms are flushed, or for Inf.
1059 * Also, rcp(-0) is -Inf and rcp(+0) is Inf.
1060 */
1061 if (left.range == gt_zero)
1062 r.range = ge_zero;
1063 else if (left.range == lt_zero)
1064 r.range = le_zero;
1065
1066 if (left.range == gt_zero || left.range == lt_zero || left.range == ne_zero)
1067 r.is_finite = left.is_a_number;
1068
1069 break;
1070 }
1071
1072 case nir_op_mov:
1073 r = unpack_data(src_res[0]);
1074 break;
1075
1076 case nir_op_fneg:
1077 r = unpack_data(src_res[0]);
1078 r.range = fneg_table[r.range];
1079 break;
1080
1081 case nir_op_fsat: {
1082 const struct ssa_result_range left = unpack_data(src_res[0]);
1083
1084 /* fsat(NaN) = 0. */
1085 r.is_a_number = true;
1086 r.is_finite = true;
1087
1088 switch (left.range) {
1089 case le_zero:
1090 case lt_zero:
1091 case eq_zero:
1092 r.range = eq_zero;
1093 r.is_integral = true;
1094 break;
1095
1096 case gt_zero:
1097 /* fsat is equivalent to fmin(fmax(X, 0.0), 1.0), so if X is not a
1098 * number, the result will be 0.
1099 */
1100 r.range = left.is_a_number ? gt_zero : ge_zero;
1101 r.is_integral = left.is_integral;
1102 break;
1103
1104 case ge_zero:
1105 case ne_zero:
1106 case unknown:
1107 /* Since the result must be in [0, 1], the value must be >= 0. */
1108 r.range = ge_zero;
1109 r.is_integral = left.is_integral;
1110 break;
1111 }
1112 break;
1113 }
1114
1115 case nir_op_fsign:
1116 r = (struct ssa_result_range){
1117 unpack_data(src_res[0]).range,
1118 true,
1119 true, /* fsign is -1, 0, or 1, even for NaN, so it must be a number. */
1120 true /* fsign is -1, 0, or 1, even for NaN, so it must be finite. */
1121 };
1122 break;
1123
1124 case nir_op_fsqrt:
1125 r = (struct ssa_result_range){ ge_zero, false, false, false };
1126 break;
1127
1128 case nir_op_frsq: {
1129 const struct ssa_result_range left = unpack_data(src_res[0]);
1130
1131 /* rsq(NaN) and rsq(< 0) is NaN. */
1132 if (left.range == eq_zero || left.range == ge_zero || left.range == gt_zero)
1133 r.is_a_number = left.is_a_number;
1134
1135 /* rsq(-0) is -Inf and rsq(+0) is +Inf */
1136 if (left.range == gt_zero || left.range == ne_zero) {
1137 if (left.is_finite)
1138 r.range = gt_zero;
1139 else
1140 r.range = ge_zero;
1141 r.is_finite = r.is_a_number;
1142 }
1143
1144 break;
1145 }
1146
1147 case nir_op_ffloor: {
1148 const struct ssa_result_range left = unpack_data(src_res[0]);
1149
1150 r.is_integral = true;
1151
1152 /* In IEEE 754, floor(NaN) is NaN, and floor(±Inf) is ±Inf. See
1153 * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/floor.html
1154 */
1155 r.is_a_number = left.is_a_number;
1156 r.is_finite = left.is_finite;
1157
1158 if (left.is_integral || left.range == le_zero || left.range == lt_zero)
1159 r.range = left.range;
1160 else if (left.range == ge_zero || left.range == gt_zero)
1161 r.range = ge_zero;
1162 else if (left.range == ne_zero)
1163 r.range = unknown;
1164
1165 break;
1166 }
1167
1168 case nir_op_fceil: {
1169 const struct ssa_result_range left = unpack_data(src_res[0]);
1170
1171 r.is_integral = true;
1172
1173 /* In IEEE 754, ceil(NaN) is NaN, and ceil(±Inf) is ±Inf. See
1174 * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/ceil.html
1175 */
1176 r.is_a_number = left.is_a_number;
1177 r.is_finite = left.is_finite;
1178
1179 if (left.is_integral || left.range == ge_zero || left.range == gt_zero)
1180 r.range = left.range;
1181 else if (left.range == le_zero || left.range == lt_zero)
1182 r.range = le_zero;
1183 else if (left.range == ne_zero)
1184 r.range = unknown;
1185
1186 break;
1187 }
1188
1189 case nir_op_ftrunc: {
1190 const struct ssa_result_range left = unpack_data(src_res[0]);
1191
1192 r.is_integral = true;
1193
1194 /* In IEEE 754, trunc(NaN) is NaN, and trunc(±Inf) is ±Inf. See
1195 * https://pubs.opengroup.org/onlinepubs/9699919799.2016edition/functions/trunc.html
1196 */
1197 r.is_a_number = left.is_a_number;
1198 r.is_finite = left.is_finite;
1199
1200 if (left.is_integral)
1201 r.range = left.range;
1202 else if (left.range == ge_zero || left.range == gt_zero)
1203 r.range = ge_zero;
1204 else if (left.range == le_zero || left.range == lt_zero)
1205 r.range = le_zero;
1206 else if (left.range == ne_zero)
1207 r.range = unknown;
1208
1209 break;
1210 }
1211
1212 case nir_op_flt:
1213 case nir_op_fge:
1214 case nir_op_feq:
1215 case nir_op_fneu:
1216 case nir_op_ilt:
1217 case nir_op_ige:
1218 case nir_op_ieq:
1219 case nir_op_ine:
1220 case nir_op_ult:
1221 case nir_op_uge:
1222 /* Boolean results are 0 or -1. */
1223 r = (struct ssa_result_range){ le_zero, false, true, false };
1224 break;
1225
1226 case nir_op_fdot2:
1227 case nir_op_fdot3:
1228 case nir_op_fdot4:
1229 case nir_op_fdot8:
1230 case nir_op_fdot16:
1231 case nir_op_fdot2_replicated:
1232 case nir_op_fdot3_replicated:
1233 case nir_op_fdot4_replicated:
1234 case nir_op_fdot8_replicated:
1235 case nir_op_fdot16_replicated: {
1236 const struct ssa_result_range left = unpack_data(src_res[0]);
1237
1238 /* If the two sources are the same SSA value, then the result is either
1239 * NaN or some number >= 0. If one source is the negation of the other,
1240 * the result is either NaN or some number <= 0.
1241 *
1242 * In either of these two cases, if one source is a number, then the
1243 * other must also be a number. Since it should not be possible to get
1244 * Inf-Inf in the dot-product, the result must also be a number.
1245 */
1246 if (nir_alu_srcs_equal(alu, alu, 0, 1)) {
1247 r = (struct ssa_result_range){ ge_zero, false, left.is_a_number, false };
1248 } else if (nir_alu_srcs_negative_equal(alu, alu, 0, 1)) {
1249 r = (struct ssa_result_range){ le_zero, false, left.is_a_number, false };
1250 } else {
1251 r = (struct ssa_result_range){ unknown, false, false, false };
1252 }
1253 break;
1254 }
1255
1256 case nir_op_fpow: {
1257 /* Due to flush-to-zero semanatics of floating-point numbers with very
1258 * small mangnitudes, we can never really be sure a result will be
1259 * non-zero.
1260 *
1261 * NIR uses pow() and powf() to constant evaluate nir_op_fpow. The man
1262 * page for that function says:
1263 *
1264 * If y is 0, the result is 1.0 (even if x is a NaN).
1265 *
1266 * gt_zero: pow(*, eq_zero)
1267 * | pow(eq_zero, lt_zero) # 0^-y = +inf
1268 * | pow(eq_zero, le_zero) # 0^-y = +inf or 0^0 = 1.0
1269 * ;
1270 *
1271 * eq_zero: pow(eq_zero, gt_zero)
1272 * ;
1273 *
1274 * ge_zero: pow(gt_zero, gt_zero)
1275 * | pow(gt_zero, ge_zero)
1276 * | pow(gt_zero, lt_zero)
1277 * | pow(gt_zero, le_zero)
1278 * | pow(gt_zero, ne_zero)
1279 * | pow(gt_zero, unknown)
1280 * | pow(ge_zero, gt_zero)
1281 * | pow(ge_zero, ge_zero)
1282 * | pow(ge_zero, lt_zero)
1283 * | pow(ge_zero, le_zero)
1284 * | pow(ge_zero, ne_zero)
1285 * | pow(ge_zero, unknown)
1286 * | pow(eq_zero, ge_zero) # 0^0 = 1.0 or 0^+y = 0.0
1287 * | pow(eq_zero, ne_zero) # 0^-y = +inf or 0^+y = 0.0
1288 * | pow(eq_zero, unknown) # union of all other y cases
1289 * ;
1290 *
1291 * All other cases are unknown.
1292 *
1293 * We could do better if the right operand is a constant, integral
1294 * value.
1295 */
1296 static const enum ssa_ranges table[last_range + 1][last_range + 1] = {
1297 /* left\right unknown lt_zero le_zero gt_zero ge_zero ne_zero eq_zero */
1298 /* unknown */ { _______, _______, _______, _______, _______, _______, gt_zero },
1299 /* lt_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
1300 /* le_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
1301 /* gt_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero },
1302 /* ge_zero */ { ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, ge_zero, gt_zero },
1303 /* ne_zero */ { _______, _______, _______, _______, _______, _______, gt_zero },
1304 /* eq_zero */ { ge_zero, gt_zero, gt_zero, eq_zero, ge_zero, ge_zero, gt_zero },
1305 };
1306
1307 const struct ssa_result_range left = unpack_data(src_res[0]);
1308 const struct ssa_result_range right = unpack_data(src_res[1]);
1309
1310 ASSERT_UNION_OF_DISJOINT_MATCHES_UNKNOWN_2_SOURCE(table);
1311 ASSERT_UNION_OF_EQ_AND_STRICT_INEQ_MATCHES_NONSTRICT_2_SOURCE(table);
1312
1313 r.is_integral = left.is_integral && right.is_integral &&
1314 is_not_negative(right.range);
1315 r.range = table[left.range][right.range];
1316
1317 /* Various cases can result in NaN, so assume the worst. */
1318 r.is_a_number = false;
1319
1320 break;
1321 }
1322
1323 case nir_op_ffma: {
1324 const struct ssa_result_range first = unpack_data(src_res[0]);
1325 const struct ssa_result_range second = unpack_data(src_res[1]);
1326 const struct ssa_result_range third = unpack_data(src_res[2]);
1327
1328 r.is_integral = first.is_integral && second.is_integral &&
1329 third.is_integral;
1330
1331 /* Various cases can result in NaN, so assume the worst. */
1332 r.is_a_number = false;
1333
1334 enum ssa_ranges fmul_range;
1335
1336 if (first.range != eq_zero && nir_alu_srcs_equal(alu, alu, 0, 1)) {
1337 /* See handling of nir_op_fmul for explanation of why ge_zero is the
1338 * range.
1339 */
1340 fmul_range = ge_zero;
1341 } else if (first.range != eq_zero && nir_alu_srcs_negative_equal(alu, alu, 0, 1)) {
1342 /* -x * x => le_zero */
1343 fmul_range = le_zero;
1344 } else
1345 fmul_range = fmul_table[first.range][second.range];
1346
1347 r.range = fadd_table[fmul_range][third.range];
1348 break;
1349 }
1350
1351 case nir_op_flrp: {
1352 const struct ssa_result_range first = unpack_data(src_res[0]);
1353 const struct ssa_result_range second = unpack_data(src_res[1]);
1354 const struct ssa_result_range third = unpack_data(src_res[2]);
1355
1356 r.is_integral = first.is_integral && second.is_integral &&
1357 third.is_integral;
1358
1359 /* Various cases can result in NaN, so assume the worst. */
1360 r.is_a_number = false;
1361
1362 /* Decompose the flrp to first + third * (second + -first) */
1363 const enum ssa_ranges inner_fadd_range =
1364 fadd_table[second.range][fneg_table[first.range]];
1365
1366 const enum ssa_ranges fmul_range =
1367 fmul_table[third.range][inner_fadd_range];
1368
1369 r.range = fadd_table[first.range][fmul_range];
1370 break;
1371 }
1372
1373 default:
1374 r = (struct ssa_result_range){ unknown, false, false, false };
1375 break;
1376 }
1377
1378 if (r.range == eq_zero)
1379 r.is_integral = true;
1380
1381 /* Just like isfinite(), the is_finite flag implies the value is a number. */
1382 assert((int)r.is_finite <= (int)r.is_a_number);
1383
1384 *result = pack_data(r);
1385 }
1386
1387 #undef _______
1388
1389 struct ssa_result_range
nir_analyze_range(struct hash_table * range_ht,const nir_alu_instr * alu,unsigned src)1390 nir_analyze_range(struct hash_table *range_ht,
1391 const nir_alu_instr *alu, unsigned src)
1392 {
1393 struct fp_query query_alloc[64];
1394 uint32_t result_alloc[64];
1395
1396 struct analysis_state state;
1397 state.range_ht = range_ht;
1398 util_dynarray_init_from_stack(&state.query_stack, query_alloc, sizeof(query_alloc));
1399 util_dynarray_init_from_stack(&state.result_stack, result_alloc, sizeof(result_alloc));
1400 state.query_size = sizeof(struct fp_query);
1401 state.get_key = &get_fp_key;
1402 state.process_query = &process_fp_query;
1403
1404 push_fp_query(&state, alu, src, nir_type_invalid);
1405
1406 return unpack_data(perform_analysis(&state));
1407 }
1408
1409 static uint32_t
bitmask(uint32_t size)1410 bitmask(uint32_t size)
1411 {
1412 return size >= 32 ? 0xffffffffu : ((uint32_t)1 << size) - 1u;
1413 }
1414
1415 static uint64_t
mul_clamp(uint32_t a,uint32_t b)1416 mul_clamp(uint32_t a, uint32_t b)
1417 {
1418 if (a != 0 && (a * b) / a != b)
1419 return (uint64_t)UINT32_MAX + 1;
1420 else
1421 return a * b;
1422 }
1423
1424 /* recursively gather at most "buf_size" phi/bcsel sources */
1425 static unsigned
search_phi_bcsel(nir_scalar scalar,nir_scalar * buf,unsigned buf_size,struct set * visited)1426 search_phi_bcsel(nir_scalar scalar, nir_scalar *buf, unsigned buf_size, struct set *visited)
1427 {
1428 if (_mesa_set_search(visited, scalar.def))
1429 return 0;
1430 _mesa_set_add(visited, scalar.def);
1431
1432 if (scalar.def->parent_instr->type == nir_instr_type_phi) {
1433 nir_phi_instr *phi = nir_instr_as_phi(scalar.def->parent_instr);
1434 unsigned num_sources_left = exec_list_length(&phi->srcs);
1435 if (buf_size >= num_sources_left) {
1436 unsigned total_added = 0;
1437 nir_foreach_phi_src(src, phi) {
1438 num_sources_left--;
1439 unsigned added = search_phi_bcsel(nir_get_scalar(src->src.ssa, scalar.comp),
1440 buf + total_added, buf_size - num_sources_left, visited);
1441 assert(added <= buf_size);
1442 buf_size -= added;
1443 total_added += added;
1444 }
1445 return total_added;
1446 }
1447 }
1448
1449 if (nir_scalar_is_alu(scalar)) {
1450 nir_op op = nir_scalar_alu_op(scalar);
1451
1452 if ((op == nir_op_bcsel || op == nir_op_b32csel) && buf_size >= 2) {
1453 nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
1454 nir_scalar src2 = nir_scalar_chase_alu_src(scalar, 2);
1455
1456 unsigned added = search_phi_bcsel(src1, buf, buf_size - 1, visited);
1457 buf_size -= added;
1458 added += search_phi_bcsel(src2, buf + added, buf_size, visited);
1459 return added;
1460 }
1461 }
1462
1463 buf[0] = scalar;
1464 return 1;
1465 }
1466
1467 static nir_variable *
lookup_input(nir_shader * shader,unsigned driver_location)1468 lookup_input(nir_shader *shader, unsigned driver_location)
1469 {
1470 return nir_find_variable_with_driver_location(shader, nir_var_shader_in,
1471 driver_location);
1472 }
1473
1474 /* The config here should be generic enough to be correct on any HW. */
1475 static const nir_unsigned_upper_bound_config default_ub_config = {
1476 .min_subgroup_size = 1u,
1477 .max_subgroup_size = UINT16_MAX,
1478 .max_workgroup_invocations = UINT16_MAX,
1479
1480 /* max_workgroup_count represents the maximum compute shader / kernel
1481 * dispatchable work size. On most hardware, this is essentially
1482 * unbounded. On some hardware max_workgroup_count[1] and
1483 * max_workgroup_count[2] may be smaller.
1484 */
1485 .max_workgroup_count = { UINT32_MAX, UINT32_MAX, UINT32_MAX },
1486
1487 /* max_workgroup_size is the local invocation maximum. This is generally
1488 * small the OpenGL 4.2 minimum maximum is 1024.
1489 */
1490 .max_workgroup_size = { UINT16_MAX, UINT16_MAX, UINT16_MAX },
1491
1492 .vertex_attrib_max = {
1493 UINT32_MAX,
1494 UINT32_MAX,
1495 UINT32_MAX,
1496 UINT32_MAX,
1497 UINT32_MAX,
1498 UINT32_MAX,
1499 UINT32_MAX,
1500 UINT32_MAX,
1501 UINT32_MAX,
1502 UINT32_MAX,
1503 UINT32_MAX,
1504 UINT32_MAX,
1505 UINT32_MAX,
1506 UINT32_MAX,
1507 UINT32_MAX,
1508 UINT32_MAX,
1509 UINT32_MAX,
1510 UINT32_MAX,
1511 UINT32_MAX,
1512 UINT32_MAX,
1513 UINT32_MAX,
1514 UINT32_MAX,
1515 UINT32_MAX,
1516 UINT32_MAX,
1517 UINT32_MAX,
1518 UINT32_MAX,
1519 UINT32_MAX,
1520 UINT32_MAX,
1521 UINT32_MAX,
1522 UINT32_MAX,
1523 UINT32_MAX,
1524 UINT32_MAX,
1525 },
1526 };
1527
1528 struct uub_query {
1529 struct analysis_query head;
1530 nir_scalar scalar;
1531 };
1532
1533 static void
push_uub_query(struct analysis_state * state,nir_scalar scalar)1534 push_uub_query(struct analysis_state *state, nir_scalar scalar)
1535 {
1536 struct uub_query *pushed_q = push_analysis_query(state, sizeof(struct uub_query));
1537 pushed_q->scalar = scalar;
1538 }
1539
1540 static uintptr_t
get_uub_key(struct analysis_query * q)1541 get_uub_key(struct analysis_query *q)
1542 {
1543 nir_scalar scalar = ((struct uub_query *)q)->scalar;
1544 /* keys can't be 0, so we have to add 1 to the index */
1545 unsigned shift_amount = ffs(NIR_MAX_VEC_COMPONENTS) - 1;
1546 return nir_scalar_is_const(scalar)
1547 ? 0
1548 : ((uintptr_t)(scalar.def->index + 1) << shift_amount) | scalar.comp;
1549 }
1550
1551 static void
get_intrinsic_uub(struct analysis_state * state,struct uub_query q,uint32_t * result,const uint32_t * src)1552 get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
1553 const uint32_t *src)
1554 {
1555 nir_shader *shader = state->shader;
1556 const nir_unsigned_upper_bound_config *config = state->config;
1557
1558 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(q.scalar.def->parent_instr);
1559 switch (intrin->intrinsic) {
1560 case nir_intrinsic_load_local_invocation_index:
1561 /* The local invocation index is used under the hood by RADV for
1562 * some non-compute-like shaders (eg. LS and NGG). These technically
1563 * run in workgroups on the HW, even though this fact is not exposed
1564 * by the API.
1565 * They can safely use the same code path here as variable sized
1566 * compute-like shader stages.
1567 */
1568 if (!gl_shader_stage_uses_workgroup(shader->info.stage) ||
1569 shader->info.workgroup_size_variable) {
1570 *result = config->max_workgroup_invocations - 1;
1571 } else {
1572 *result = (shader->info.workgroup_size[0] *
1573 shader->info.workgroup_size[1] *
1574 shader->info.workgroup_size[2]) -
1575 1u;
1576 }
1577 break;
1578 case nir_intrinsic_load_local_invocation_id:
1579 if (shader->info.workgroup_size_variable)
1580 *result = config->max_workgroup_size[q.scalar.comp] - 1u;
1581 else
1582 *result = shader->info.workgroup_size[q.scalar.comp] - 1u;
1583 break;
1584 case nir_intrinsic_load_workgroup_id:
1585 *result = config->max_workgroup_count[q.scalar.comp] - 1u;
1586 break;
1587 case nir_intrinsic_load_num_workgroups:
1588 *result = config->max_workgroup_count[q.scalar.comp];
1589 break;
1590 case nir_intrinsic_load_global_invocation_id:
1591 if (shader->info.workgroup_size_variable) {
1592 *result = mul_clamp(config->max_workgroup_size[q.scalar.comp],
1593 config->max_workgroup_count[q.scalar.comp]) -
1594 1u;
1595 } else {
1596 *result = (shader->info.workgroup_size[q.scalar.comp] *
1597 config->max_workgroup_count[q.scalar.comp]) -
1598 1u;
1599 }
1600 break;
1601 case nir_intrinsic_load_invocation_id:
1602 if (shader->info.stage == MESA_SHADER_TESS_CTRL)
1603 *result = shader->info.tess.tcs_vertices_out
1604 ? (shader->info.tess.tcs_vertices_out - 1)
1605 : 511; /* Generous maximum output patch size of 512 */
1606 break;
1607 case nir_intrinsic_load_subgroup_invocation:
1608 case nir_intrinsic_first_invocation:
1609 *result = config->max_subgroup_size - 1;
1610 break;
1611 case nir_intrinsic_mbcnt_amd: {
1612 if (!q.head.pushed_queries) {
1613 push_uub_query(state, nir_get_scalar(intrin->src[1].ssa, 0));
1614 return;
1615 } else {
1616 uint32_t src0 = config->max_subgroup_size - 1;
1617 uint32_t src1 = src[0];
1618 if (src0 + src1 >= src0) /* check overflow */
1619 *result = src0 + src1;
1620 }
1621 break;
1622 }
1623 case nir_intrinsic_load_subgroup_size:
1624 *result = config->max_subgroup_size;
1625 break;
1626 case nir_intrinsic_load_subgroup_id:
1627 case nir_intrinsic_load_num_subgroups: {
1628 uint32_t workgroup_size = config->max_workgroup_invocations;
1629 if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
1630 !shader->info.workgroup_size_variable) {
1631 workgroup_size = shader->info.workgroup_size[0] *
1632 shader->info.workgroup_size[1] *
1633 shader->info.workgroup_size[2];
1634 }
1635 *result = DIV_ROUND_UP(workgroup_size, config->min_subgroup_size);
1636 if (intrin->intrinsic == nir_intrinsic_load_subgroup_id)
1637 (*result)--;
1638 break;
1639 }
1640 case nir_intrinsic_load_input: {
1641 if (shader->info.stage == MESA_SHADER_VERTEX && nir_src_is_const(intrin->src[0])) {
1642 nir_variable *var = lookup_input(shader, nir_intrinsic_base(intrin));
1643 if (var) {
1644 int loc = var->data.location - VERT_ATTRIB_GENERIC0;
1645 if (loc >= 0)
1646 *result = config->vertex_attrib_max[loc];
1647 }
1648 }
1649 break;
1650 }
1651 case nir_intrinsic_reduce:
1652 case nir_intrinsic_inclusive_scan:
1653 case nir_intrinsic_exclusive_scan: {
1654 nir_op op = nir_intrinsic_reduction_op(intrin);
1655 if (op == nir_op_umin || op == nir_op_umax || op == nir_op_imin || op == nir_op_imax) {
1656 if (!q.head.pushed_queries) {
1657 push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
1658 return;
1659 } else {
1660 *result = src[0];
1661 }
1662 }
1663 break;
1664 }
1665 case nir_intrinsic_read_first_invocation:
1666 case nir_intrinsic_read_invocation:
1667 case nir_intrinsic_shuffle:
1668 case nir_intrinsic_shuffle_xor:
1669 case nir_intrinsic_shuffle_up:
1670 case nir_intrinsic_shuffle_down:
1671 case nir_intrinsic_quad_broadcast:
1672 case nir_intrinsic_quad_swap_horizontal:
1673 case nir_intrinsic_quad_swap_vertical:
1674 case nir_intrinsic_quad_swap_diagonal:
1675 case nir_intrinsic_quad_swizzle_amd:
1676 case nir_intrinsic_masked_swizzle_amd:
1677 if (!q.head.pushed_queries) {
1678 push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
1679 return;
1680 } else {
1681 *result = src[0];
1682 }
1683 break;
1684 case nir_intrinsic_write_invocation_amd:
1685 if (!q.head.pushed_queries) {
1686 push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
1687 push_uub_query(state, nir_get_scalar(intrin->src[1].ssa, q.scalar.comp));
1688 return;
1689 } else {
1690 *result = MAX2(src[0], src[1]);
1691 }
1692 break;
1693 case nir_intrinsic_load_tess_rel_patch_id_amd:
1694 case nir_intrinsic_load_tcs_num_patches_amd:
1695 /* Very generous maximum: TCS/TES executed by largest possible workgroup */
1696 *result = config->max_workgroup_invocations / MAX2(shader->info.tess.tcs_vertices_out, 1u);
1697 break;
1698 case nir_intrinsic_load_typed_buffer_amd: {
1699 const enum pipe_format format = nir_intrinsic_format(intrin);
1700 if (format == PIPE_FORMAT_NONE)
1701 break;
1702
1703 const struct util_format_description *desc = util_format_description(format);
1704 if (desc->channel[q.scalar.comp].type != UTIL_FORMAT_TYPE_UNSIGNED)
1705 break;
1706
1707 if (desc->channel[q.scalar.comp].normalized) {
1708 *result = fui(1.0);
1709 break;
1710 }
1711
1712 const uint32_t chan_max = u_uintN_max(desc->channel[q.scalar.comp].size);
1713 *result = desc->channel[q.scalar.comp].pure_integer ? chan_max : fui(chan_max);
1714 break;
1715 }
1716 case nir_intrinsic_load_scalar_arg_amd:
1717 case nir_intrinsic_load_vector_arg_amd: {
1718 uint32_t upper_bound = nir_intrinsic_arg_upper_bound_u32_amd(intrin);
1719 if (upper_bound)
1720 *result = upper_bound;
1721 break;
1722 }
1723 default:
1724 break;
1725 }
1726 }
1727
1728 static void
get_alu_uub(struct analysis_state * state,struct uub_query q,uint32_t * result,const uint32_t * src)1729 get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result, const uint32_t *src)
1730 {
1731 nir_op op = nir_scalar_alu_op(q.scalar);
1732
1733 /* Early exit for unsupported ALU opcodes. */
1734 switch (op) {
1735 case nir_op_umin:
1736 case nir_op_imin:
1737 case nir_op_imax:
1738 case nir_op_umax:
1739 case nir_op_iand:
1740 case nir_op_ior:
1741 case nir_op_ixor:
1742 case nir_op_ishl:
1743 case nir_op_imul:
1744 case nir_op_ushr:
1745 case nir_op_ishr:
1746 case nir_op_iadd:
1747 case nir_op_umod:
1748 case nir_op_udiv:
1749 case nir_op_bcsel:
1750 case nir_op_b32csel:
1751 case nir_op_ubfe:
1752 case nir_op_bfm:
1753 case nir_op_extract_u8:
1754 case nir_op_extract_i8:
1755 case nir_op_extract_u16:
1756 case nir_op_extract_i16:
1757 case nir_op_b2i8:
1758 case nir_op_b2i16:
1759 case nir_op_b2i32:
1760 break;
1761 case nir_op_u2u1:
1762 case nir_op_u2u8:
1763 case nir_op_u2u16:
1764 case nir_op_u2u32:
1765 if (nir_scalar_chase_alu_src(q.scalar, 0).def->bit_size > 32) {
1766 /* If src is >32 bits, return max */
1767 return;
1768 }
1769 break;
1770 case nir_op_fsat:
1771 case nir_op_fmul:
1772 case nir_op_fmulz:
1773 case nir_op_f2u32:
1774 case nir_op_f2i32:
1775 if (nir_scalar_chase_alu_src(q.scalar, 0).def->bit_size != 32) {
1776 /* Only 32bit floats support for now, return max */
1777 return;
1778 }
1779 break;
1780 default:
1781 return;
1782 }
1783
1784 if (!q.head.pushed_queries) {
1785 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
1786 push_uub_query(state, nir_scalar_chase_alu_src(q.scalar, i));
1787 return;
1788 }
1789
1790 uint32_t max = bitmask(q.scalar.def->bit_size);
1791 switch (op) {
1792 case nir_op_umin:
1793 *result = src[0] < src[1] ? src[0] : src[1];
1794 break;
1795 case nir_op_imin:
1796 case nir_op_imax:
1797 case nir_op_umax:
1798 *result = src[0] > src[1] ? src[0] : src[1];
1799 break;
1800 case nir_op_iand:
1801 *result = bitmask(util_last_bit64(src[0])) & bitmask(util_last_bit64(src[1]));
1802 break;
1803 case nir_op_ior:
1804 case nir_op_ixor:
1805 *result = bitmask(util_last_bit64(src[0])) | bitmask(util_last_bit64(src[1]));
1806 break;
1807 case nir_op_ishl: {
1808 uint32_t src1 = MIN2(src[1], q.scalar.def->bit_size - 1u);
1809 if (util_last_bit64(src[0]) + src1 <= q.scalar.def->bit_size) /* check overflow */
1810 *result = src[0] << src1;
1811 break;
1812 }
1813 case nir_op_imul:
1814 if (src[0] == 0 || (src[0] * src[1]) / src[0] == src[1]) /* check overflow */
1815 *result = src[0] * src[1];
1816 break;
1817 case nir_op_ushr: {
1818 nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
1819 uint32_t mask = q.scalar.def->bit_size - 1u;
1820 if (nir_scalar_is_const(src1_scalar))
1821 *result = src[0] >> (nir_scalar_as_uint(src1_scalar) & mask);
1822 else
1823 *result = src[0];
1824 break;
1825 }
1826 case nir_op_ishr: {
1827 nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
1828 uint32_t mask = q.scalar.def->bit_size - 1u;
1829 if (src[0] <= 2147483647 && nir_scalar_is_const(src1_scalar))
1830 *result = src[0] >> (nir_scalar_as_uint(src1_scalar) & mask);
1831 else
1832 *result = src[0];
1833 break;
1834 }
1835 case nir_op_iadd:
1836 if (src[0] + src[1] >= src[0]) /* check overflow */
1837 *result = src[0] + src[1];
1838 break;
1839 case nir_op_umod:
1840 *result = src[1] ? src[1] - 1 : 0;
1841 break;
1842 case nir_op_udiv: {
1843 nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
1844 if (nir_scalar_is_const(src1_scalar))
1845 *result = nir_scalar_as_uint(src1_scalar)
1846 ? src[0] / nir_scalar_as_uint(src1_scalar)
1847 : 0;
1848 else
1849 *result = src[0];
1850 break;
1851 }
1852 case nir_op_bcsel:
1853 case nir_op_b32csel:
1854 *result = src[1] > src[2] ? src[1] : src[2];
1855 break;
1856 case nir_op_ubfe:
1857 *result = bitmask(MIN2(src[2], q.scalar.def->bit_size));
1858 break;
1859 case nir_op_bfm: {
1860 nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
1861 if (nir_scalar_is_const(src1_scalar)) {
1862 uint32_t src0 = MIN2(src[0], 31);
1863 uint32_t src1 = nir_scalar_as_uint(src1_scalar) & 0x1fu;
1864 *result = bitmask(src0) << src1;
1865 } else {
1866 uint32_t src0 = MIN2(src[0], 31);
1867 uint32_t src1 = MIN2(src[1], 31);
1868 *result = bitmask(MIN2(src0 + src1, 32));
1869 }
1870 break;
1871 }
1872 /* limited floating-point support for f2u32(fmul(load_input(), <constant>)) */
1873 case nir_op_f2i32:
1874 case nir_op_f2u32:
1875 /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */
1876 if (src[0] < 0x7f800000u) {
1877 float val;
1878 memcpy(&val, &src[0], 4);
1879 *result = (uint32_t)val;
1880 }
1881 break;
1882 case nir_op_fmul:
1883 case nir_op_fmulz:
1884 /* infinity/NaN starts at 0x7f800000u, negative numbers at 0x80000000 */
1885 if (src[0] < 0x7f800000u && src[1] < 0x7f800000u) {
1886 float src0_f, src1_f;
1887 memcpy(&src0_f, &src[0], 4);
1888 memcpy(&src1_f, &src[1], 4);
1889 /* not a proper rounding-up multiplication, but should be good enough */
1890 float max_f = ceilf(src0_f) * ceilf(src1_f);
1891 memcpy(result, &max_f, 4);
1892 }
1893 break;
1894 case nir_op_fsat:
1895 *result = 0x3f800000u;
1896 break;
1897 case nir_op_u2u1:
1898 case nir_op_u2u8:
1899 case nir_op_u2u16:
1900 case nir_op_u2u32:
1901 *result = MIN2(src[0], max);
1902 break;
1903 case nir_op_b2i8:
1904 case nir_op_b2i16:
1905 case nir_op_b2i32:
1906 *result = 1;
1907 break;
1908 case nir_op_msad_4x8:
1909 *result = MIN2((uint64_t)src[2] + 4 * 255, UINT32_MAX);
1910 break;
1911 case nir_op_extract_u8:
1912 *result = MIN2(src[0], UINT8_MAX);
1913 break;
1914 case nir_op_extract_i8:
1915 *result = (src[0] >= 0x80) ? max : MIN2(src[0], INT8_MAX);
1916 break;
1917 case nir_op_extract_u16:
1918 *result = MIN2(src[0], UINT16_MAX);
1919 break;
1920 case nir_op_extract_i16:
1921 *result = (src[0] >= 0x8000) ? max : MIN2(src[0], INT16_MAX);
1922 break;
1923 default:
1924 break;
1925 }
1926 }
1927
1928 static void
get_phi_uub(struct analysis_state * state,struct uub_query q,uint32_t * result,const uint32_t * src)1929 get_phi_uub(struct analysis_state *state, struct uub_query q, uint32_t *result, const uint32_t *src)
1930 {
1931 nir_phi_instr *phi = nir_instr_as_phi(q.scalar.def->parent_instr);
1932
1933 if (exec_list_is_empty(&phi->srcs))
1934 return;
1935
1936 if (q.head.pushed_queries) {
1937 *result = src[0];
1938 for (unsigned i = 1; i < q.head.pushed_queries; i++)
1939 *result = MAX2(*result, src[i]);
1940 return;
1941 }
1942
1943 nir_cf_node *prev = nir_cf_node_prev(&phi->instr.block->cf_node);
1944 if (!prev || prev->type == nir_cf_node_block) {
1945 /* Resolve cycles by inserting max into range_ht. */
1946 uint32_t max = bitmask(q.scalar.def->bit_size);
1947 _mesa_hash_table_insert(state->range_ht, (void *)get_uub_key(&q.head), (void *)(uintptr_t)max);
1948
1949 struct set *visited = _mesa_pointer_set_create(NULL);
1950 nir_scalar *defs = alloca(sizeof(nir_scalar) * 64);
1951 unsigned def_count = search_phi_bcsel(q.scalar, defs, 64, visited);
1952 _mesa_set_destroy(visited, NULL);
1953
1954 for (unsigned i = 0; i < def_count; i++)
1955 push_uub_query(state, defs[i]);
1956 } else {
1957 nir_foreach_phi_src(src, phi)
1958 push_uub_query(state, nir_get_scalar(src->src.ssa, q.scalar.comp));
1959 }
1960 }
1961
1962 static void
process_uub_query(struct analysis_state * state,struct analysis_query * aq,uint32_t * result,const uint32_t * src)1963 process_uub_query(struct analysis_state *state, struct analysis_query *aq, uint32_t *result,
1964 const uint32_t *src)
1965 {
1966 struct uub_query q = *(struct uub_query *)aq;
1967
1968 *result = bitmask(q.scalar.def->bit_size);
1969 if (nir_scalar_is_const(q.scalar))
1970 *result = nir_scalar_as_uint(q.scalar);
1971 else if (nir_scalar_is_intrinsic(q.scalar))
1972 get_intrinsic_uub(state, q, result, src);
1973 else if (nir_scalar_is_alu(q.scalar))
1974 get_alu_uub(state, q, result, src);
1975 else if (q.scalar.def->parent_instr->type == nir_instr_type_phi)
1976 get_phi_uub(state, q, result, src);
1977 }
1978
1979 uint32_t
nir_unsigned_upper_bound(nir_shader * shader,struct hash_table * range_ht,nir_scalar scalar,const nir_unsigned_upper_bound_config * config)1980 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
1981 nir_scalar scalar,
1982 const nir_unsigned_upper_bound_config *config)
1983 {
1984 if (!config)
1985 config = &default_ub_config;
1986
1987 struct uub_query query_alloc[16];
1988 uint32_t result_alloc[16];
1989
1990 struct analysis_state state;
1991 state.shader = shader;
1992 state.config = config;
1993 state.range_ht = range_ht;
1994 util_dynarray_init_from_stack(&state.query_stack, query_alloc, sizeof(query_alloc));
1995 util_dynarray_init_from_stack(&state.result_stack, result_alloc, sizeof(result_alloc));
1996 state.query_size = sizeof(struct uub_query);
1997 state.get_key = &get_uub_key;
1998 state.process_query = &process_uub_query;
1999
2000 push_uub_query(&state, scalar);
2001
2002 return perform_analysis(&state);
2003 }
2004
2005 bool
nir_addition_might_overflow(nir_shader * shader,struct hash_table * range_ht,nir_scalar ssa,unsigned const_val,const nir_unsigned_upper_bound_config * config)2006 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
2007 nir_scalar ssa, unsigned const_val,
2008 const nir_unsigned_upper_bound_config *config)
2009 {
2010 if (nir_scalar_is_alu(ssa)) {
2011 nir_op alu_op = nir_scalar_alu_op(ssa);
2012
2013 /* iadd(imul(a, #b), #c) */
2014 if (alu_op == nir_op_imul || alu_op == nir_op_ishl) {
2015 nir_scalar mul_src0 = nir_scalar_chase_alu_src(ssa, 0);
2016 nir_scalar mul_src1 = nir_scalar_chase_alu_src(ssa, 1);
2017 uint32_t stride = 1;
2018 if (nir_scalar_is_const(mul_src0))
2019 stride = nir_scalar_as_uint(mul_src0);
2020 else if (nir_scalar_is_const(mul_src1))
2021 stride = nir_scalar_as_uint(mul_src1);
2022
2023 if (alu_op == nir_op_ishl)
2024 stride = 1u << (stride % 32u);
2025
2026 if (!stride || const_val <= UINT32_MAX - (UINT32_MAX / stride * stride))
2027 return false;
2028 }
2029
2030 /* iadd(iand(a, #b), #c) */
2031 if (alu_op == nir_op_iand) {
2032 nir_scalar and_src0 = nir_scalar_chase_alu_src(ssa, 0);
2033 nir_scalar and_src1 = nir_scalar_chase_alu_src(ssa, 1);
2034 uint32_t mask = 0xffffffff;
2035 if (nir_scalar_is_const(and_src0))
2036 mask = nir_scalar_as_uint(and_src0);
2037 else if (nir_scalar_is_const(and_src1))
2038 mask = nir_scalar_as_uint(and_src1);
2039 if (mask == 0 || const_val < (1u << (ffs(mask) - 1)))
2040 return false;
2041 }
2042 }
2043
2044 uint32_t ub = nir_unsigned_upper_bound(shader, range_ht, ssa, config);
2045 return const_val + ub < const_val;
2046 }
2047
2048 static uint64_t
ssa_def_bits_used(const nir_def * def,int recur)2049 ssa_def_bits_used(const nir_def *def, int recur)
2050 {
2051 uint64_t bits_used = 0;
2052 uint64_t all_bits = BITFIELD64_MASK(def->bit_size);
2053
2054 /* Querying the bits used from a vector is too hard of a question to
2055 * answer. Return the conservative answer that all bits are used. To
2056 * handle this, the function would need to be extended to be a query of a
2057 * single component of the vector. That would also necessary to fully
2058 * handle the 'num_components > 1' inside the loop below.
2059 *
2060 * FINISHME: This restriction will eventually need to be restricted to be
2061 * useful for hardware that uses u16vec2 as the native 16-bit integer type.
2062 */
2063 if (def->num_components > 1)
2064 return all_bits;
2065
2066 /* Limit recursion */
2067 if (recur-- <= 0)
2068 return all_bits;
2069
2070 nir_foreach_use(src, def) {
2071 switch (nir_src_parent_instr(src)->type) {
2072 case nir_instr_type_alu: {
2073 nir_alu_instr *use_alu = nir_instr_as_alu(nir_src_parent_instr(src));
2074 unsigned src_idx = container_of(src, nir_alu_src, src) - use_alu->src;
2075
2076 /* If a user of the value produces a vector result, return the
2077 * conservative answer that all bits are used. It is possible to
2078 * answer this query by looping over the components used. For example,
2079 *
2080 * vec4 32 ssa_5 = load_const(0x0000f000, 0x00000f00, 0x000000f0, 0x0000000f)
2081 * ...
2082 * vec4 32 ssa_8 = iand ssa_7.xxxx, ssa_5
2083 *
2084 * could conceivably return 0x0000ffff when queyring the bits used of
2085 * ssa_7. This is unlikely to be worth the effort because the
2086 * question can eventually answered after the shader has been
2087 * scalarized.
2088 */
2089 if (use_alu->def.num_components > 1)
2090 return all_bits;
2091
2092 switch (use_alu->op) {
2093 case nir_op_u2u8:
2094 case nir_op_i2i8:
2095 bits_used |= 0xff;
2096 break;
2097
2098 case nir_op_u2u16:
2099 case nir_op_i2i16:
2100 bits_used |= all_bits & 0xffff;
2101 break;
2102
2103 case nir_op_u2u32:
2104 case nir_op_i2i32:
2105 bits_used |= all_bits & 0xffffffff;
2106 break;
2107
2108 case nir_op_extract_u8:
2109 case nir_op_extract_i8:
2110 if (src_idx == 0 && nir_src_is_const(use_alu->src[1].src)) {
2111 unsigned chunk = nir_src_comp_as_uint(use_alu->src[1].src,
2112 use_alu->src[1].swizzle[0]);
2113 bits_used |= 0xffull << (chunk * 8);
2114 break;
2115 } else {
2116 return all_bits;
2117 }
2118
2119 case nir_op_extract_u16:
2120 case nir_op_extract_i16:
2121 if (src_idx == 0 && nir_src_is_const(use_alu->src[1].src)) {
2122 unsigned chunk = nir_src_comp_as_uint(use_alu->src[1].src,
2123 use_alu->src[1].swizzle[0]);
2124 bits_used |= 0xffffull << (chunk * 16);
2125 break;
2126 } else {
2127 return all_bits;
2128 }
2129
2130 case nir_op_ishl:
2131 case nir_op_ishr:
2132 case nir_op_ushr:
2133 if (src_idx == 1) {
2134 bits_used |= (nir_src_bit_size(use_alu->src[0].src) - 1);
2135 break;
2136 } else {
2137 return all_bits;
2138 }
2139
2140 case nir_op_iand:
2141 assert(src_idx < 2);
2142 if (nir_src_is_const(use_alu->src[1 - src_idx].src)) {
2143 uint64_t u64 = nir_src_comp_as_uint(use_alu->src[1 - src_idx].src,
2144 use_alu->src[1 - src_idx].swizzle[0]);
2145 bits_used |= u64;
2146 break;
2147 } else {
2148 return all_bits;
2149 }
2150
2151 case nir_op_ior:
2152 assert(src_idx < 2);
2153 if (nir_src_is_const(use_alu->src[1 - src_idx].src)) {
2154 uint64_t u64 = nir_src_comp_as_uint(use_alu->src[1 - src_idx].src,
2155 use_alu->src[1 - src_idx].swizzle[0]);
2156 bits_used |= all_bits & ~u64;
2157 break;
2158 } else {
2159 return all_bits;
2160 }
2161
2162 default:
2163 /* We don't know what this op does */
2164 return all_bits;
2165 }
2166 break;
2167 }
2168
2169 case nir_instr_type_intrinsic: {
2170 nir_intrinsic_instr *use_intrin =
2171 nir_instr_as_intrinsic(nir_src_parent_instr(src));
2172 unsigned src_idx = src - use_intrin->src;
2173
2174 switch (use_intrin->intrinsic) {
2175 case nir_intrinsic_read_invocation:
2176 case nir_intrinsic_shuffle:
2177 case nir_intrinsic_shuffle_up:
2178 case nir_intrinsic_shuffle_down:
2179 case nir_intrinsic_shuffle_xor:
2180 case nir_intrinsic_quad_broadcast:
2181 case nir_intrinsic_quad_swap_horizontal:
2182 case nir_intrinsic_quad_swap_vertical:
2183 case nir_intrinsic_quad_swap_diagonal:
2184 if (src_idx == 0) {
2185 bits_used |= ssa_def_bits_used(&use_intrin->def, recur);
2186 } else {
2187 if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) {
2188 bits_used |= 3;
2189 } else {
2190 /* Subgroups larger than 128 are not a thing */
2191 bits_used |= 127;
2192 }
2193 }
2194 break;
2195
2196 case nir_intrinsic_reduce:
2197 case nir_intrinsic_inclusive_scan:
2198 case nir_intrinsic_exclusive_scan:
2199 assert(src_idx == 0);
2200 switch (nir_intrinsic_reduction_op(use_intrin)) {
2201 case nir_op_iadd:
2202 case nir_op_imul:
2203 case nir_op_ior:
2204 case nir_op_iand:
2205 case nir_op_ixor:
2206 bits_used |= ssa_def_bits_used(&use_intrin->def, recur);
2207 break;
2208
2209 default:
2210 return all_bits;
2211 }
2212 break;
2213
2214 default:
2215 /* We don't know what this op does */
2216 return all_bits;
2217 }
2218 break;
2219 }
2220
2221 case nir_instr_type_phi: {
2222 nir_phi_instr *use_phi = nir_instr_as_phi(nir_src_parent_instr(src));
2223 bits_used |= ssa_def_bits_used(&use_phi->def, recur);
2224 break;
2225 }
2226
2227 default:
2228 return all_bits;
2229 }
2230
2231 /* If we've somehow shown that all our bits are used, we're done */
2232 assert((bits_used & ~all_bits) == 0);
2233 if (bits_used == all_bits)
2234 return all_bits;
2235 }
2236
2237 return bits_used;
2238 }
2239
2240 uint64_t
nir_def_bits_used(const nir_def * def)2241 nir_def_bits_used(const nir_def *def)
2242 {
2243 return ssa_def_bits_used(def, 2);
2244 }
2245