1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_search.h"
25 #include <inttypes.h>
26 #include "util/half_float.h"
27 #include "nir_builder.h"
28 #include "nir_worklist.h"
29
30 /* This should be the same as nir_search_max_comm_ops in nir_algebraic.py. */
31 #define NIR_SEARCH_MAX_COMM_OPS 8
32
33 struct match_state {
34 bool inexact_match;
35 bool has_exact_alu;
36 uint8_t comm_op_direction;
37 unsigned variables_seen;
38
39 /* Used for running the automaton on newly-constructed instructions. */
40 struct util_dynarray *states;
41 const struct per_op_table *pass_op_table;
42 const nir_algebraic_table *table;
43
44 nir_alu_src variables[NIR_SEARCH_MAX_VARIABLES];
45 struct hash_table *range_ht;
46 };
47
48 static bool
49 match_expression(const nir_algebraic_table *table, const nir_search_expression *expr, nir_alu_instr *instr,
50 unsigned num_components, const uint8_t *swizzle,
51 struct match_state *state);
52 static bool
53 nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
54 const struct per_op_table *pass_op_table);
55
56 static const uint8_t identity_swizzle[NIR_MAX_VEC_COMPONENTS] = {
57 0,
58 1,
59 2,
60 3,
61 4,
62 5,
63 6,
64 7,
65 8,
66 9,
67 10,
68 11,
69 12,
70 13,
71 14,
72 15,
73 };
74
75 /**
76 * Check if a source produces a value of the given type.
77 *
78 * Used for satisfying 'a@type' constraints.
79 */
80 static bool
src_is_type(nir_src src,nir_alu_type type)81 src_is_type(nir_src src, nir_alu_type type)
82 {
83 assert(type != nir_type_invalid);
84
85 if (src.ssa->parent_instr->type == nir_instr_type_alu) {
86 nir_alu_instr *src_alu = nir_instr_as_alu(src.ssa->parent_instr);
87 nir_alu_type output_type = nir_op_infos[src_alu->op].output_type;
88
89 if (type == nir_type_bool) {
90 switch (src_alu->op) {
91 case nir_op_iand:
92 case nir_op_ior:
93 case nir_op_ixor:
94 return src_is_type(src_alu->src[0].src, nir_type_bool) &&
95 src_is_type(src_alu->src[1].src, nir_type_bool);
96 case nir_op_inot:
97 return src_is_type(src_alu->src[0].src, nir_type_bool);
98 default:
99 break;
100 }
101 }
102
103 return nir_alu_type_get_base_type(output_type) == type;
104 } else if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
105 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
106
107 if (type == nir_type_bool) {
108 return intr->intrinsic == nir_intrinsic_load_front_face ||
109 intr->intrinsic == nir_intrinsic_load_helper_invocation;
110 }
111 }
112
113 /* don't know */
114 return false;
115 }
116
117 static bool
nir_op_matches_search_op(nir_op nop,uint16_t sop)118 nir_op_matches_search_op(nir_op nop, uint16_t sop)
119 {
120 if (sop <= nir_last_opcode)
121 return nop == sop;
122
123 #define MATCH_FCONV_CASE(op) \
124 case nir_search_op_##op: \
125 return nop == nir_op_##op##16 || \
126 nop == nir_op_##op##32 || \
127 nop == nir_op_##op##64;
128
129 #define MATCH_ICONV_CASE(op) \
130 case nir_search_op_##op: \
131 return nop == nir_op_##op##8 || \
132 nop == nir_op_##op##16 || \
133 nop == nir_op_##op##32 || \
134 nop == nir_op_##op##64;
135
136 switch (sop) {
137 MATCH_FCONV_CASE(i2f)
138 MATCH_FCONV_CASE(u2f)
139 MATCH_FCONV_CASE(f2f)
140 MATCH_ICONV_CASE(f2u)
141 MATCH_ICONV_CASE(f2i)
142 MATCH_ICONV_CASE(u2u)
143 MATCH_ICONV_CASE(i2i)
144 MATCH_FCONV_CASE(b2f)
145 MATCH_ICONV_CASE(b2i)
146 default:
147 unreachable("Invalid nir_search_op");
148 }
149
150 #undef MATCH_FCONV_CASE
151 #undef MATCH_ICONV_CASE
152 }
153
154 uint16_t
nir_search_op_for_nir_op(nir_op nop)155 nir_search_op_for_nir_op(nir_op nop)
156 {
157 #define MATCH_FCONV_CASE(op) \
158 case nir_op_##op##16: \
159 case nir_op_##op##32: \
160 case nir_op_##op##64: \
161 return nir_search_op_##op;
162
163 #define MATCH_ICONV_CASE(op) \
164 case nir_op_##op##8: \
165 case nir_op_##op##16: \
166 case nir_op_##op##32: \
167 case nir_op_##op##64: \
168 return nir_search_op_##op;
169
170 switch (nop) {
171 MATCH_FCONV_CASE(i2f)
172 MATCH_FCONV_CASE(u2f)
173 MATCH_FCONV_CASE(f2f)
174 MATCH_ICONV_CASE(f2u)
175 MATCH_ICONV_CASE(f2i)
176 MATCH_ICONV_CASE(u2u)
177 MATCH_ICONV_CASE(i2i)
178 MATCH_FCONV_CASE(b2f)
179 MATCH_ICONV_CASE(b2i)
180 default:
181 return nop;
182 }
183
184 #undef MATCH_FCONV_CASE
185 #undef MATCH_ICONV_CASE
186 }
187
188 static nir_op
nir_op_for_search_op(uint16_t sop,unsigned bit_size)189 nir_op_for_search_op(uint16_t sop, unsigned bit_size)
190 {
191 if (sop <= nir_last_opcode)
192 return sop;
193
194 #define RET_FCONV_CASE(op) \
195 case nir_search_op_##op: \
196 switch (bit_size) { \
197 case 16: \
198 return nir_op_##op##16; \
199 case 32: \
200 return nir_op_##op##32; \
201 case 64: \
202 return nir_op_##op##64; \
203 default: \
204 unreachable("Invalid bit size"); \
205 }
206
207 #define RET_ICONV_CASE(op) \
208 case nir_search_op_##op: \
209 switch (bit_size) { \
210 case 8: \
211 return nir_op_##op##8; \
212 case 16: \
213 return nir_op_##op##16; \
214 case 32: \
215 return nir_op_##op##32; \
216 case 64: \
217 return nir_op_##op##64; \
218 default: \
219 unreachable("Invalid bit size"); \
220 }
221
222 switch (sop) {
223 RET_FCONV_CASE(i2f)
224 RET_FCONV_CASE(u2f)
225 RET_FCONV_CASE(f2f)
226 RET_ICONV_CASE(f2u)
227 RET_ICONV_CASE(f2i)
228 RET_ICONV_CASE(u2u)
229 RET_ICONV_CASE(i2i)
230 RET_FCONV_CASE(b2f)
231 RET_ICONV_CASE(b2i)
232 default:
233 unreachable("Invalid nir_search_op");
234 }
235
236 #undef RET_FCONV_CASE
237 #undef RET_ICONV_CASE
238 }
239
240 static bool
match_value(const nir_algebraic_table * table,const nir_search_value * value,nir_alu_instr * instr,unsigned src,unsigned num_components,const uint8_t * swizzle,struct match_state * state)241 match_value(const nir_algebraic_table *table,
242 const nir_search_value *value, nir_alu_instr *instr, unsigned src,
243 unsigned num_components, const uint8_t *swizzle,
244 struct match_state *state)
245 {
246 uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
247
248 /* If the source is an explicitly sized source, then we need to reset
249 * both the number of components and the swizzle.
250 */
251 if (nir_op_infos[instr->op].input_sizes[src] != 0) {
252 num_components = nir_op_infos[instr->op].input_sizes[src];
253 swizzle = identity_swizzle;
254 }
255
256 for (unsigned i = 0; i < num_components; ++i)
257 new_swizzle[i] = instr->src[src].swizzle[swizzle[i]];
258
259 /* If the value has a specific bit size and it doesn't match, bail */
260 if (value->bit_size > 0 &&
261 nir_src_bit_size(instr->src[src].src) != value->bit_size)
262 return false;
263
264 switch (value->type) {
265 case nir_search_value_expression:
266 if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
267 return false;
268
269 return match_expression(table, nir_search_value_as_expression(value),
270 nir_instr_as_alu(instr->src[src].src.ssa->parent_instr),
271 num_components, new_swizzle, state);
272
273 case nir_search_value_variable: {
274 nir_search_variable *var = nir_search_value_as_variable(value);
275 assert(var->variable < NIR_SEARCH_MAX_VARIABLES);
276
277 if (state->variables_seen & (1 << var->variable)) {
278 if (state->variables[var->variable].src.ssa != instr->src[src].src.ssa)
279 return false;
280
281 for (unsigned i = 0; i < num_components; ++i) {
282 if (state->variables[var->variable].swizzle[i] != new_swizzle[i])
283 return false;
284 }
285
286 return true;
287 } else {
288 if (var->is_constant &&
289 instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
290 return false;
291
292 if (var->cond_index != -1 && !table->variable_cond[var->cond_index](state->range_ht, instr,
293 src, num_components, new_swizzle))
294 return false;
295
296 if (var->type != nir_type_invalid &&
297 !src_is_type(instr->src[src].src, var->type))
298 return false;
299
300 state->variables_seen |= (1 << var->variable);
301 state->variables[var->variable].src = instr->src[src].src;
302
303 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) {
304 if (i < num_components)
305 state->variables[var->variable].swizzle[i] = new_swizzle[i];
306 else
307 state->variables[var->variable].swizzle[i] = 0;
308 }
309
310 return true;
311 }
312 }
313
314 case nir_search_value_constant: {
315 nir_search_constant *const_val = nir_search_value_as_constant(value);
316
317 if (!nir_src_is_const(instr->src[src].src))
318 return false;
319
320 switch (const_val->type) {
321 case nir_type_float: {
322 nir_load_const_instr *const load =
323 nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
324
325 /* There are 8-bit and 1-bit integer types, but there are no 8-bit or
326 * 1-bit float types. This prevents potential assertion failures in
327 * nir_src_comp_as_float.
328 */
329 if (load->def.bit_size < 16)
330 return false;
331
332 for (unsigned i = 0; i < num_components; ++i) {
333 double val = nir_src_comp_as_float(instr->src[src].src,
334 new_swizzle[i]);
335 if (val != const_val->data.d)
336 return false;
337 }
338 return true;
339 }
340
341 case nir_type_int:
342 case nir_type_uint:
343 case nir_type_bool: {
344 unsigned bit_size = nir_src_bit_size(instr->src[src].src);
345 uint64_t mask = u_uintN_max(bit_size);
346 for (unsigned i = 0; i < num_components; ++i) {
347 uint64_t val = nir_src_comp_as_uint(instr->src[src].src,
348 new_swizzle[i]);
349 if ((val & mask) != (const_val->data.u & mask))
350 return false;
351 }
352 return true;
353 }
354
355 default:
356 unreachable("Invalid alu source type");
357 }
358 }
359
360 default:
361 unreachable("Invalid search value type");
362 }
363 }
364
365 static bool
match_expression(const nir_algebraic_table * table,const nir_search_expression * expr,nir_alu_instr * instr,unsigned num_components,const uint8_t * swizzle,struct match_state * state)366 match_expression(const nir_algebraic_table *table, const nir_search_expression *expr, nir_alu_instr *instr,
367 unsigned num_components, const uint8_t *swizzle,
368 struct match_state *state)
369 {
370 if (expr->cond_index != -1 && !table->expression_cond[expr->cond_index](instr))
371 return false;
372
373 if (expr->nsz && nir_alu_instr_is_signed_zero_preserve(instr))
374 return false;
375
376 if (expr->nnan && nir_alu_instr_is_nan_preserve(instr))
377 return false;
378
379 if (expr->ninf && nir_alu_instr_is_inf_preserve(instr))
380 return false;
381
382 if (!nir_op_matches_search_op(instr->op, expr->opcode))
383 return false;
384
385 if (expr->value.bit_size > 0 &&
386 instr->def.bit_size != expr->value.bit_size)
387 return false;
388
389 state->inexact_match = expr->inexact || state->inexact_match;
390 state->has_exact_alu = (instr->exact && !expr->ignore_exact) || state->has_exact_alu;
391 if (state->inexact_match && state->has_exact_alu)
392 return false;
393
394 assert(nir_op_infos[instr->op].num_inputs > 0);
395
396 /* If we have an explicitly sized destination, we can only handle the
397 * identity swizzle. While dot(vec3(a, b, c).zxy) is a valid
398 * expression, we don't have the information right now to propagate that
399 * swizzle through. We can only properly propagate swizzles if the
400 * instruction is vectorized.
401 *
402 * The only exception is swizzle_y, for which we have a special condition,
403 * so that we can do pack64_2x32_split(unpack(a).x, unpack(a).y) --> a.
404 */
405 if (expr->swizzle_y) {
406 if (num_components != 1 || swizzle[0] != 1)
407 return false;
408 } else {
409 if (nir_op_infos[instr->op].output_size != 0) {
410 for (unsigned i = 0; i < num_components; i++) {
411 if (swizzle[i] != i)
412 return false;
413 }
414 }
415 }
416
417 /* If this is a commutative expression and it's one of the first few, look
418 * up its direction for the current search operation. We'll use that value
419 * to possibly flip the sources for the match.
420 */
421 unsigned comm_op_flip =
422 (expr->comm_expr_idx >= 0 &&
423 expr->comm_expr_idx < NIR_SEARCH_MAX_COMM_OPS)
424 ? ((state->comm_op_direction >> expr->comm_expr_idx) & 1)
425 : 0;
426
427 bool matched = true;
428 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
429 /* 2src_commutative instructions that have 3 sources are only commutative
430 * in the first two sources. Source 2 is always source 2.
431 */
432 if (!match_value(table, &state->table->values[expr->srcs[i]].value, instr,
433 i < 2 ? i ^ comm_op_flip : i,
434 num_components, swizzle, state)) {
435 matched = false;
436 break;
437 }
438 }
439
440 return matched;
441 }
442
443 static unsigned
replace_bitsize(const nir_search_value * value,unsigned search_bitsize,struct match_state * state)444 replace_bitsize(const nir_search_value *value, unsigned search_bitsize,
445 struct match_state *state)
446 {
447 if (value->bit_size > 0)
448 return value->bit_size;
449 if (value->bit_size < 0)
450 return nir_src_bit_size(state->variables[-value->bit_size - 1].src);
451 return search_bitsize;
452 }
453
454 static nir_alu_src
construct_value(nir_builder * build,const nir_search_value * value,unsigned num_components,unsigned search_bitsize,struct match_state * state,nir_instr * instr)455 construct_value(nir_builder *build,
456 const nir_search_value *value,
457 unsigned num_components, unsigned search_bitsize,
458 struct match_state *state,
459 nir_instr *instr)
460 {
461 switch (value->type) {
462 case nir_search_value_expression: {
463 const nir_search_expression *expr = nir_search_value_as_expression(value);
464 unsigned dst_bit_size = replace_bitsize(value, search_bitsize, state);
465 nir_op op = nir_op_for_search_op(expr->opcode, dst_bit_size);
466
467 if (nir_op_infos[op].output_size != 0)
468 num_components = nir_op_infos[op].output_size;
469
470 nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
471 nir_def_init(&alu->instr, &alu->def, num_components,
472 dst_bit_size);
473
474 /* We have no way of knowing what values in a given search expression
475 * map to a particular replacement value. Therefore, if the
476 * expression we are replacing has any exact values, the entire
477 * replacement should be exact.
478 */
479 alu->exact = state->has_exact_alu || expr->exact;
480 alu->fp_fast_math = nir_instr_as_alu(instr)->fp_fast_math;
481
482 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
483 /* If the source is an explicitly sized source, then we need to reset
484 * the number of components to match.
485 */
486 if (nir_op_infos[alu->op].input_sizes[i] != 0)
487 num_components = nir_op_infos[alu->op].input_sizes[i];
488
489 alu->src[i] = construct_value(build, &state->table->values[expr->srcs[i]].value,
490 num_components, search_bitsize,
491 state, instr);
492 }
493
494 nir_builder_instr_insert(build, &alu->instr);
495
496 assert(alu->def.index ==
497 util_dynarray_num_elements(state->states, uint16_t));
498 util_dynarray_append(state->states, uint16_t, 0);
499 nir_algebraic_automaton(&alu->instr, state->states, state->pass_op_table);
500
501 nir_alu_src val;
502 val.src = nir_src_for_ssa(&alu->def);
503 memcpy(val.swizzle, identity_swizzle, sizeof val.swizzle);
504
505 return val;
506 }
507
508 case nir_search_value_variable: {
509 const nir_search_variable *var = nir_search_value_as_variable(value);
510 assert(state->variables_seen & (1 << var->variable));
511
512 nir_alu_src val = { NIR_SRC_INIT };
513 nir_alu_src_copy(&val, &state->variables[var->variable]);
514 assert(!var->is_constant);
515
516 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
517 val.swizzle[i] = state->variables[var->variable].swizzle[var->swizzle[i]];
518
519 return val;
520 }
521
522 case nir_search_value_constant: {
523 const nir_search_constant *c = nir_search_value_as_constant(value);
524 unsigned bit_size = replace_bitsize(value, search_bitsize, state);
525
526 nir_def *cval;
527 switch (c->type) {
528 case nir_type_float:
529 cval = nir_imm_floatN_t(build, c->data.d, bit_size);
530 break;
531
532 case nir_type_int:
533 case nir_type_uint:
534 cval = nir_imm_intN_t(build, c->data.i, bit_size);
535 break;
536
537 case nir_type_bool:
538 cval = nir_imm_boolN_t(build, c->data.u, bit_size);
539 break;
540
541 default:
542 unreachable("Invalid alu source type");
543 }
544
545 assert(cval->index ==
546 util_dynarray_num_elements(state->states, uint16_t));
547 util_dynarray_append(state->states, uint16_t, 0);
548 nir_algebraic_automaton(cval->parent_instr, state->states,
549 state->pass_op_table);
550
551 nir_alu_src val;
552 val.src = nir_src_for_ssa(cval);
553 memset(val.swizzle, 0, sizeof val.swizzle);
554
555 return val;
556 }
557
558 default:
559 unreachable("Invalid search value type");
560 }
561 }
562
563 UNUSED static void
dump_value(const nir_algebraic_table * table,const nir_search_value * val)564 dump_value(const nir_algebraic_table *table, const nir_search_value *val)
565 {
566 switch (val->type) {
567 case nir_search_value_constant: {
568 const nir_search_constant *sconst = nir_search_value_as_constant(val);
569 switch (sconst->type) {
570 case nir_type_float:
571 fprintf(stderr, "%f", sconst->data.d);
572 break;
573 case nir_type_int:
574 fprintf(stderr, "%" PRId64, sconst->data.i);
575 break;
576 case nir_type_uint:
577 fprintf(stderr, "0x%" PRIx64, sconst->data.u);
578 break;
579 case nir_type_bool:
580 fprintf(stderr, "%s", sconst->data.u != 0 ? "True" : "False");
581 break;
582 default:
583 unreachable("bad const type");
584 }
585 break;
586 }
587
588 case nir_search_value_variable: {
589 const nir_search_variable *var = nir_search_value_as_variable(val);
590 if (var->is_constant)
591 fprintf(stderr, "#");
592 fprintf(stderr, "%c", var->variable + 'a');
593 break;
594 }
595
596 case nir_search_value_expression: {
597 const nir_search_expression *expr = nir_search_value_as_expression(val);
598 fprintf(stderr, "(");
599 if (expr->inexact)
600 fprintf(stderr, "~");
601 switch (expr->opcode) {
602 #define CASE(n) \
603 case nir_search_op_##n: \
604 fprintf(stderr, #n); \
605 break;
606 CASE(b2f)
607 CASE(b2i)
608 CASE(i2i)
609 CASE(f2i)
610 CASE(i2f)
611 #undef CASE
612 default:
613 fprintf(stderr, "%s", nir_op_infos[expr->opcode].name);
614 }
615
616 unsigned num_srcs = 1;
617 if (expr->opcode <= nir_last_opcode)
618 num_srcs = nir_op_infos[expr->opcode].num_inputs;
619
620 for (unsigned i = 0; i < num_srcs; i++) {
621 fprintf(stderr, " ");
622 dump_value(table, &table->values[expr->srcs[i]].value);
623 }
624
625 fprintf(stderr, ")");
626 break;
627 }
628 }
629
630 if (val->bit_size > 0)
631 fprintf(stderr, "@%d", val->bit_size);
632 }
633
634 static void
add_uses_to_worklist(nir_instr * instr,nir_instr_worklist * worklist,struct util_dynarray * states,const struct per_op_table * pass_op_table)635 add_uses_to_worklist(nir_instr *instr,
636 nir_instr_worklist *worklist,
637 struct util_dynarray *states,
638 const struct per_op_table *pass_op_table)
639 {
640 nir_def *def = nir_instr_def(instr);
641
642 nir_foreach_use_safe(use_src, def) {
643 if (nir_algebraic_automaton(nir_src_parent_instr(use_src), states, pass_op_table))
644 nir_instr_worklist_push_tail(worklist, nir_src_parent_instr(use_src));
645 }
646 }
647
648 static void
nir_algebraic_update_automaton(nir_instr * new_instr,nir_instr_worklist * algebraic_worklist,struct util_dynarray * states,const struct per_op_table * pass_op_table)649 nir_algebraic_update_automaton(nir_instr *new_instr,
650 nir_instr_worklist *algebraic_worklist,
651 struct util_dynarray *states,
652 const struct per_op_table *pass_op_table)
653 {
654
655 nir_instr_worklist *automaton_worklist = nir_instr_worklist_create();
656
657 /* Walk through the tree of uses of our new instruction's SSA value,
658 * recursively updating the automaton state until it stabilizes.
659 */
660 add_uses_to_worklist(new_instr, automaton_worklist, states, pass_op_table);
661
662 nir_instr *instr;
663 while ((instr = nir_instr_worklist_pop_head(automaton_worklist))) {
664 nir_instr_worklist_push_tail(algebraic_worklist, instr);
665 add_uses_to_worklist(instr, automaton_worklist, states, pass_op_table);
666 }
667
668 nir_instr_worklist_destroy(automaton_worklist);
669 }
670
671 static nir_def *
nir_replace_instr(nir_builder * build,nir_alu_instr * instr,struct hash_table * range_ht,struct util_dynarray * states,const nir_algebraic_table * table,const nir_search_expression * search,const nir_search_value * replace,nir_instr_worklist * algebraic_worklist,struct exec_list * dead_instrs)672 nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
673 struct hash_table *range_ht,
674 struct util_dynarray *states,
675 const nir_algebraic_table *table,
676 const nir_search_expression *search,
677 const nir_search_value *replace,
678 nir_instr_worklist *algebraic_worklist,
679 struct exec_list *dead_instrs)
680 {
681 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
682
683 for (unsigned i = 0; i < instr->def.num_components; ++i)
684 swizzle[i] = i;
685
686 struct match_state state;
687 state.inexact_match = false;
688 state.has_exact_alu = false;
689 state.range_ht = range_ht;
690 state.pass_op_table = table->pass_op_table;
691 state.table = table;
692
693 STATIC_ASSERT(sizeof(state.comm_op_direction) * 8 >= NIR_SEARCH_MAX_COMM_OPS);
694
695 unsigned comm_expr_combinations =
696 1 << MIN2(search->comm_exprs, NIR_SEARCH_MAX_COMM_OPS);
697
698 bool found = false;
699 for (unsigned comb = 0; comb < comm_expr_combinations; comb++) {
700 /* The bitfield of directions is just the current iteration. Hooray for
701 * binary.
702 */
703 state.comm_op_direction = comb;
704 state.variables_seen = 0;
705
706 if (match_expression(table, search, instr,
707 instr->def.num_components,
708 swizzle, &state)) {
709 found = true;
710 break;
711 }
712 }
713 if (!found)
714 return NULL;
715
716 #if 0
717 fprintf(stderr, "matched: ");
718 dump_value(table, &search->value);
719 fprintf(stderr, " -> ");
720 dump_value(table, replace);
721 fprintf(stderr, " ssa_%d\n", instr->def.index);
722 #endif
723
724 /* If the instruction at the root of the expression tree being replaced is
725 * a unary operation, insert the replacement instructions at the location
726 * of the source of the unary operation. Otherwise, insert the replacement
727 * instructions at the location of the expression tree root.
728 *
729 * For the unary operation case, this is done to prevent some spurious code
730 * motion that can dramatically extend live ranges. Imagine an expression
731 * like -(A+B) where the addtion and the negation are separated by flow
732 * control and thousands of instructions. If this expression is replaced
733 * with -A+-B, inserting the new instructions at the site of the negation
734 * could extend the live range of A and B dramtically. This could increase
735 * register pressure and cause spilling.
736 *
737 * It may well be that moving instructions around is a good thing, but
738 * keeping algebraic optimizations and code motion optimizations separate
739 * seems safest.
740 */
741 nir_alu_instr *const src_instr = nir_src_as_alu_instr(instr->src[0].src);
742 if (src_instr != NULL &&
743 (instr->op == nir_op_fneg || instr->op == nir_op_fabs ||
744 instr->op == nir_op_ineg || instr->op == nir_op_iabs ||
745 instr->op == nir_op_inot)) {
746 /* Insert new instructions *after*. Otherwise a hypothetical
747 * replacement fneg(X) -> fabs(X) would insert the fabs() instruction
748 * before X! This can also occur for things like fneg(X.wzyx) -> X.wzyx
749 * in vector mode. A move instruction to handle the swizzle will get
750 * inserted before X.
751 *
752 * This manifested in a single OpenGL ES 2.0 CTS vertex shader test on
753 * older Intel GPU that use vector-mode vertex processing.
754 */
755 build->cursor = nir_after_instr(&src_instr->instr);
756 } else {
757 build->cursor = nir_before_instr(&instr->instr);
758 }
759
760 state.states = states;
761
762 nir_alu_src val = construct_value(build, replace,
763 instr->def.num_components,
764 instr->def.bit_size,
765 &state, &instr->instr);
766
767 /* Note that NIR builder will elide the MOV if it's a no-op, which may
768 * allow more work to be done in a single pass through algebraic.
769 */
770 nir_def *ssa_val =
771 nir_mov_alu(build, val, instr->def.num_components);
772 if (ssa_val->index == util_dynarray_num_elements(states, uint16_t)) {
773 util_dynarray_append(states, uint16_t, 0);
774 nir_algebraic_automaton(ssa_val->parent_instr, states, table->pass_op_table);
775 }
776
777 /* Rewrite the uses of the old SSA value to the new one, and recurse
778 * through the uses updating the automaton's state.
779 */
780 nir_def_rewrite_uses(&instr->def, ssa_val);
781 nir_algebraic_update_automaton(ssa_val->parent_instr, algebraic_worklist,
782 states, table->pass_op_table);
783
784 /* Nothing uses the instr any more, so drop it out of the program. Note
785 * that the instr may be in the worklist still, so we can't free it
786 * directly.
787 */
788 assert(instr->instr.pass_flags == 0);
789 instr->instr.pass_flags = 1;
790 nir_instr_remove(&instr->instr);
791 exec_list_push_tail(dead_instrs, &instr->instr.node);
792
793 return ssa_val;
794 }
795
796 static bool
nir_algebraic_automaton(nir_instr * instr,struct util_dynarray * states,const struct per_op_table * pass_op_table)797 nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
798 const struct per_op_table *pass_op_table)
799 {
800 switch (instr->type) {
801 case nir_instr_type_alu: {
802 nir_alu_instr *alu = nir_instr_as_alu(instr);
803 nir_op op = alu->op;
804 uint16_t search_op = nir_search_op_for_nir_op(op);
805 const struct per_op_table *tbl = &pass_op_table[search_op];
806 if (tbl->num_filtered_states == 0)
807 return false;
808
809 /* Calculate the index into the transition table. Note the index
810 * calculated must match the iteration order of Python's
811 * itertools.product(), which was used to emit the transition
812 * table.
813 */
814 unsigned index = 0;
815 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
816 index *= tbl->num_filtered_states;
817 if (tbl->filter)
818 index += tbl->filter[*util_dynarray_element(states, uint16_t,
819 alu->src[i].src.ssa->index)];
820 }
821
822 uint16_t *state = util_dynarray_element(states, uint16_t,
823 alu->def.index);
824 if (*state != tbl->table[index]) {
825 *state = tbl->table[index];
826 return true;
827 }
828 return false;
829 }
830
831 case nir_instr_type_load_const: {
832 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
833 uint16_t *state = util_dynarray_element(states, uint16_t,
834 load_const->def.index);
835 if (*state != CONST_STATE) {
836 *state = CONST_STATE;
837 return true;
838 }
839 return false;
840 }
841
842 default:
843 return false;
844 }
845 }
846
847 static bool
nir_algebraic_instr(nir_builder * build,nir_instr * instr,struct hash_table * range_ht,const bool * condition_flags,const nir_algebraic_table * table,struct util_dynarray * states,nir_instr_worklist * worklist,struct exec_list * dead_instrs)848 nir_algebraic_instr(nir_builder *build, nir_instr *instr,
849 struct hash_table *range_ht,
850 const bool *condition_flags,
851 const nir_algebraic_table *table,
852 struct util_dynarray *states,
853 nir_instr_worklist *worklist,
854 struct exec_list *dead_instrs)
855 {
856
857 if (instr->type != nir_instr_type_alu)
858 return false;
859
860 nir_alu_instr *alu = nir_instr_as_alu(instr);
861
862 unsigned bit_size = alu->def.bit_size;
863 const unsigned execution_mode =
864 build->shader->info.float_controls_execution_mode;
865 const bool ignore_inexact =
866 nir_alu_instr_is_signed_zero_inf_nan_preserve(alu) ||
867 nir_is_denorm_flush_to_zero(execution_mode, bit_size);
868
869 int xform_idx = *util_dynarray_element(states, uint16_t,
870 alu->def.index);
871 for (const struct transform *xform = &table->transforms[table->transform_offsets[xform_idx]];
872 xform->condition_offset != ~0;
873 xform++) {
874 if (condition_flags[xform->condition_offset] &&
875 !(table->values[xform->search].expression.inexact && ignore_inexact) &&
876 nir_replace_instr(build, alu, range_ht, states, table,
877 &table->values[xform->search].expression,
878 &table->values[xform->replace].value, worklist, dead_instrs)) {
879 _mesa_hash_table_clear(range_ht, NULL);
880 return true;
881 }
882 }
883
884 return false;
885 }
886
887 bool
nir_algebraic_impl(nir_function_impl * impl,const bool * condition_flags,const nir_algebraic_table * table)888 nir_algebraic_impl(nir_function_impl *impl,
889 const bool *condition_flags,
890 const nir_algebraic_table *table)
891 {
892 bool progress = false;
893
894 nir_builder build = nir_builder_create(impl);
895
896 /* Note: it's important here that we're allocating a zeroed array, since
897 * state 0 is the default state, which means we don't have to visit
898 * anything other than constants and ALU instructions.
899 */
900 struct util_dynarray states = { 0 };
901 if (!util_dynarray_resize(&states, uint16_t, impl->ssa_alloc)) {
902 nir_metadata_preserve(impl, nir_metadata_all);
903 return false;
904 }
905 memset(states.data, 0, states.size);
906
907 struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
908
909 nir_instr_worklist *worklist = nir_instr_worklist_create();
910
911 /* Walk top-to-bottom setting up the automaton state. */
912 nir_foreach_block(block, impl) {
913 nir_foreach_instr(instr, block) {
914 nir_algebraic_automaton(instr, &states, table->pass_op_table);
915 }
916 }
917
918 /* Put our instrs in the worklist such that we're popping the last instr
919 * first. This will encourage us to match the biggest source patterns when
920 * possible.
921 */
922 nir_foreach_block_reverse(block, impl) {
923 nir_foreach_instr_reverse(instr, block) {
924 instr->pass_flags = 0;
925 if (instr->type == nir_instr_type_alu)
926 nir_instr_worklist_push_tail(worklist, instr);
927 }
928 }
929
930 struct exec_list dead_instrs;
931 exec_list_make_empty(&dead_instrs);
932
933 nir_instr *instr;
934 while ((instr = nir_instr_worklist_pop_head(worklist))) {
935 /* The worklist can have an instr pushed to it multiple times if it was
936 * the src of multiple instrs that also got optimized, so make sure that
937 * we don't try to re-optimize an instr we already handled.
938 */
939 if (instr->pass_flags)
940 continue;
941
942 progress |= nir_algebraic_instr(&build, instr,
943 range_ht, condition_flags,
944 table, &states, worklist, &dead_instrs);
945 }
946
947 nir_instr_free_list(&dead_instrs);
948
949 nir_instr_worklist_destroy(worklist);
950 ralloc_free(range_ht);
951 util_dynarray_fini(&states);
952
953 if (progress) {
954 nir_metadata_preserve(impl, nir_metadata_control_flow);
955 } else {
956 nir_metadata_preserve(impl, nir_metadata_all);
957 }
958
959 return progress;
960 }
961