1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_instr_set.h"
25 #include "nir_vla.h"
26 #include "util/half_float.h"
27
28 static bool
src_is_ssa(nir_src * src,void * data)29 src_is_ssa(nir_src *src, void *data)
30 {
31 (void) data;
32 return src->is_ssa;
33 }
34
35 static bool
dest_is_ssa(nir_dest * dest,void * data)36 dest_is_ssa(nir_dest *dest, void *data)
37 {
38 (void) data;
39 return dest->is_ssa;
40 }
41
42 ASSERTED static inline bool
instr_each_src_and_dest_is_ssa(const nir_instr * instr)43 instr_each_src_and_dest_is_ssa(const nir_instr *instr)
44 {
45 if (!nir_foreach_dest((nir_instr *)instr, dest_is_ssa, NULL) ||
46 !nir_foreach_src((nir_instr *)instr, src_is_ssa, NULL))
47 return false;
48
49 return true;
50 }
51
52 /* This function determines if uses of an instruction can safely be rewritten
53 * to use another identical instruction instead. Note that this function must
54 * be kept in sync with hash_instr() and nir_instrs_equal() -- only
55 * instructions that pass this test will be handed on to those functions, and
56 * conversely they must handle everything that this function returns true for.
57 */
58 static bool
instr_can_rewrite(const nir_instr * instr)59 instr_can_rewrite(const nir_instr *instr)
60 {
61 /* We only handle SSA. */
62 assert(instr_each_src_and_dest_is_ssa(instr));
63
64 switch (instr->type) {
65 case nir_instr_type_alu:
66 case nir_instr_type_deref:
67 case nir_instr_type_tex:
68 case nir_instr_type_load_const:
69 case nir_instr_type_phi:
70 return true;
71 case nir_instr_type_intrinsic:
72 return nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr));
73 case nir_instr_type_call:
74 case nir_instr_type_jump:
75 case nir_instr_type_ssa_undef:
76 return false;
77 case nir_instr_type_parallel_copy:
78 default:
79 unreachable("Invalid instruction type");
80 }
81
82 return false;
83 }
84
85
86 #define HASH(hash, data) XXH32(&(data), sizeof(data), hash)
87
88 static uint32_t
hash_src(uint32_t hash,const nir_src * src)89 hash_src(uint32_t hash, const nir_src *src)
90 {
91 assert(src->is_ssa);
92 hash = HASH(hash, src->ssa);
93 return hash;
94 }
95
96 static uint32_t
hash_alu_src(uint32_t hash,const nir_alu_src * src,unsigned num_components)97 hash_alu_src(uint32_t hash, const nir_alu_src *src, unsigned num_components)
98 {
99 hash = HASH(hash, src->abs);
100 hash = HASH(hash, src->negate);
101
102 for (unsigned i = 0; i < num_components; i++)
103 hash = HASH(hash, src->swizzle[i]);
104
105 hash = hash_src(hash, &src->src);
106 return hash;
107 }
108
109 static uint32_t
hash_alu(uint32_t hash,const nir_alu_instr * instr)110 hash_alu(uint32_t hash, const nir_alu_instr *instr)
111 {
112 hash = HASH(hash, instr->op);
113
114 /* We explicitly don't hash instr->exact. */
115 uint8_t flags = instr->no_signed_wrap |
116 instr->no_unsigned_wrap << 1;
117 hash = HASH(hash, flags);
118
119 hash = HASH(hash, instr->dest.dest.ssa.num_components);
120 hash = HASH(hash, instr->dest.dest.ssa.bit_size);
121
122 if (nir_op_infos[instr->op].algebraic_properties & NIR_OP_IS_2SRC_COMMUTATIVE) {
123 assert(nir_op_infos[instr->op].num_inputs >= 2);
124
125 uint32_t hash0 = hash_alu_src(hash, &instr->src[0],
126 nir_ssa_alu_instr_src_components(instr, 0));
127 uint32_t hash1 = hash_alu_src(hash, &instr->src[1],
128 nir_ssa_alu_instr_src_components(instr, 1));
129 /* For commutative operations, we need some commutative way of
130 * combining the hashes. One option would be to XOR them but that
131 * means that anything with two identical sources will hash to 0 and
132 * that's common enough we probably don't want the guaranteed
133 * collision. Either addition or multiplication will also work.
134 */
135 hash = hash0 * hash1;
136
137 for (unsigned i = 2; i < nir_op_infos[instr->op].num_inputs; i++) {
138 hash = hash_alu_src(hash, &instr->src[i],
139 nir_ssa_alu_instr_src_components(instr, i));
140 }
141 } else {
142 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
143 hash = hash_alu_src(hash, &instr->src[i],
144 nir_ssa_alu_instr_src_components(instr, i));
145 }
146 }
147
148 return hash;
149 }
150
151 static uint32_t
hash_deref(uint32_t hash,const nir_deref_instr * instr)152 hash_deref(uint32_t hash, const nir_deref_instr *instr)
153 {
154 hash = HASH(hash, instr->deref_type);
155 hash = HASH(hash, instr->modes);
156 hash = HASH(hash, instr->type);
157
158 if (instr->deref_type == nir_deref_type_var)
159 return HASH(hash, instr->var);
160
161 hash = hash_src(hash, &instr->parent);
162
163 switch (instr->deref_type) {
164 case nir_deref_type_struct:
165 hash = HASH(hash, instr->strct.index);
166 break;
167
168 case nir_deref_type_array:
169 case nir_deref_type_ptr_as_array:
170 hash = hash_src(hash, &instr->arr.index);
171 hash = HASH(hash, instr->arr.in_bounds);
172 break;
173
174 case nir_deref_type_cast:
175 hash = HASH(hash, instr->cast.ptr_stride);
176 hash = HASH(hash, instr->cast.align_mul);
177 hash = HASH(hash, instr->cast.align_offset);
178 break;
179
180 case nir_deref_type_var:
181 case nir_deref_type_array_wildcard:
182 /* Nothing to do */
183 break;
184
185 default:
186 unreachable("Invalid instruction deref type");
187 }
188
189 return hash;
190 }
191
192 static uint32_t
hash_load_const(uint32_t hash,const nir_load_const_instr * instr)193 hash_load_const(uint32_t hash, const nir_load_const_instr *instr)
194 {
195 hash = HASH(hash, instr->def.num_components);
196
197 if (instr->def.bit_size == 1) {
198 for (unsigned i = 0; i < instr->def.num_components; i++) {
199 uint8_t b = instr->value[i].b;
200 hash = HASH(hash, b);
201 }
202 } else {
203 unsigned size = instr->def.num_components * sizeof(*instr->value);
204 hash = XXH32(instr->value, size, hash);
205 }
206
207 return hash;
208 }
209
210 static int
cmp_phi_src(const void * data1,const void * data2)211 cmp_phi_src(const void *data1, const void *data2)
212 {
213 nir_phi_src *src1 = *(nir_phi_src **)data1;
214 nir_phi_src *src2 = *(nir_phi_src **)data2;
215 return src1->pred > src2->pred ? 1 : (src1->pred == src2->pred ? 0 : -1);
216 }
217
218 static uint32_t
hash_phi(uint32_t hash,const nir_phi_instr * instr)219 hash_phi(uint32_t hash, const nir_phi_instr *instr)
220 {
221 hash = HASH(hash, instr->instr.block);
222
223 /* sort sources by predecessor, since the order shouldn't matter */
224 unsigned num_preds = instr->instr.block->predecessors->entries;
225 NIR_VLA(nir_phi_src *, srcs, num_preds);
226 unsigned i = 0;
227 nir_foreach_phi_src(src, instr) {
228 srcs[i++] = src;
229 }
230
231 qsort(srcs, num_preds, sizeof(nir_phi_src *), cmp_phi_src);
232
233 for (i = 0; i < num_preds; i++) {
234 hash = hash_src(hash, &srcs[i]->src);
235 hash = HASH(hash, srcs[i]->pred);
236 }
237
238 return hash;
239 }
240
241 static uint32_t
hash_intrinsic(uint32_t hash,const nir_intrinsic_instr * instr)242 hash_intrinsic(uint32_t hash, const nir_intrinsic_instr *instr)
243 {
244 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
245 hash = HASH(hash, instr->intrinsic);
246
247 if (info->has_dest) {
248 hash = HASH(hash, instr->dest.ssa.num_components);
249 hash = HASH(hash, instr->dest.ssa.bit_size);
250 }
251
252 hash = XXH32(instr->const_index, info->num_indices * sizeof(instr->const_index[0]), hash);
253
254 for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++)
255 hash = hash_src(hash, &instr->src[i]);
256
257 return hash;
258 }
259
260 static uint32_t
hash_tex(uint32_t hash,const nir_tex_instr * instr)261 hash_tex(uint32_t hash, const nir_tex_instr *instr)
262 {
263 hash = HASH(hash, instr->op);
264 hash = HASH(hash, instr->num_srcs);
265
266 for (unsigned i = 0; i < instr->num_srcs; i++) {
267 hash = HASH(hash, instr->src[i].src_type);
268 hash = hash_src(hash, &instr->src[i].src);
269 }
270
271 hash = HASH(hash, instr->coord_components);
272 hash = HASH(hash, instr->sampler_dim);
273 hash = HASH(hash, instr->is_array);
274 hash = HASH(hash, instr->is_shadow);
275 hash = HASH(hash, instr->is_new_style_shadow);
276 hash = HASH(hash, instr->is_sparse);
277 unsigned component = instr->component;
278 hash = HASH(hash, component);
279 for (unsigned i = 0; i < 4; ++i)
280 for (unsigned j = 0; j < 2; ++j)
281 hash = HASH(hash, instr->tg4_offsets[i][j]);
282 hash = HASH(hash, instr->texture_index);
283 hash = HASH(hash, instr->sampler_index);
284 hash = HASH(hash, instr->texture_non_uniform);
285 hash = HASH(hash, instr->sampler_non_uniform);
286
287 return hash;
288 }
289
290 /* Computes a hash of an instruction for use in a hash table. Note that this
291 * will only work for instructions where instr_can_rewrite() returns true, and
292 * it should return identical hashes for two instructions that are the same
293 * according nir_instrs_equal().
294 */
295
296 static uint32_t
hash_instr(const void * data)297 hash_instr(const void *data)
298 {
299 const nir_instr *instr = data;
300 uint32_t hash = 0;
301
302 switch (instr->type) {
303 case nir_instr_type_alu:
304 hash = hash_alu(hash, nir_instr_as_alu(instr));
305 break;
306 case nir_instr_type_deref:
307 hash = hash_deref(hash, nir_instr_as_deref(instr));
308 break;
309 case nir_instr_type_load_const:
310 hash = hash_load_const(hash, nir_instr_as_load_const(instr));
311 break;
312 case nir_instr_type_phi:
313 hash = hash_phi(hash, nir_instr_as_phi(instr));
314 break;
315 case nir_instr_type_intrinsic:
316 hash = hash_intrinsic(hash, nir_instr_as_intrinsic(instr));
317 break;
318 case nir_instr_type_tex:
319 hash = hash_tex(hash, nir_instr_as_tex(instr));
320 break;
321 default:
322 unreachable("Invalid instruction type");
323 }
324
325 return hash;
326 }
327
328 bool
nir_srcs_equal(nir_src src1,nir_src src2)329 nir_srcs_equal(nir_src src1, nir_src src2)
330 {
331 if (src1.is_ssa) {
332 if (src2.is_ssa) {
333 return src1.ssa == src2.ssa;
334 } else {
335 return false;
336 }
337 } else {
338 if (src2.is_ssa) {
339 return false;
340 } else {
341 if ((src1.reg.indirect == NULL) != (src2.reg.indirect == NULL))
342 return false;
343
344 if (src1.reg.indirect) {
345 if (!nir_srcs_equal(*src1.reg.indirect, *src2.reg.indirect))
346 return false;
347 }
348
349 return src1.reg.reg == src2.reg.reg &&
350 src1.reg.base_offset == src2.reg.base_offset;
351 }
352 }
353 }
354
355 /**
356 * If the \p s is an SSA value that was generated by a negation instruction,
357 * that instruction is returned as a \c nir_alu_instr. Otherwise \c NULL is
358 * returned.
359 */
360 static nir_alu_instr *
get_neg_instr(nir_src s)361 get_neg_instr(nir_src s)
362 {
363 nir_alu_instr *alu = nir_src_as_alu_instr(s);
364
365 return alu != NULL && (alu->op == nir_op_fneg || alu->op == nir_op_ineg)
366 ? alu : NULL;
367 }
368
369 bool
nir_const_value_negative_equal(nir_const_value c1,nir_const_value c2,nir_alu_type full_type)370 nir_const_value_negative_equal(nir_const_value c1,
371 nir_const_value c2,
372 nir_alu_type full_type)
373 {
374 assert(nir_alu_type_get_base_type(full_type) != nir_type_invalid);
375 assert(nir_alu_type_get_type_size(full_type) != 0);
376
377 switch (full_type) {
378 case nir_type_float16:
379 return _mesa_half_to_float(c1.u16) == -_mesa_half_to_float(c2.u16);
380
381 case nir_type_float32:
382 return c1.f32 == -c2.f32;
383
384 case nir_type_float64:
385 return c1.f64 == -c2.f64;
386
387 case nir_type_int8:
388 case nir_type_uint8:
389 return c1.i8 == -c2.i8;
390
391 case nir_type_int16:
392 case nir_type_uint16:
393 return c1.i16 == -c2.i16;
394
395 case nir_type_int32:
396 case nir_type_uint32:
397 return c1.i32 == -c2.i32;
398
399 case nir_type_int64:
400 case nir_type_uint64:
401 return c1.i64 == -c2.i64;
402
403 default:
404 break;
405 }
406
407 return false;
408 }
409
410 /**
411 * Shallow compare of ALU srcs to determine if one is the negation of the other
412 *
413 * This function detects cases where \p alu1 is a constant and \p alu2 is a
414 * constant that is its negation. It will also detect cases where \p alu2 is
415 * an SSA value that is a \c nir_op_fneg applied to \p alu1 (and vice versa).
416 *
417 * This function does not detect the general case when \p alu1 and \p alu2 are
418 * SSA values that are the negations of each other (e.g., \p alu1 represents
419 * (a * b) and \p alu2 represents (-a * b)).
420 *
421 * \warning
422 * It is the responsibility of the caller to ensure that the component counts,
423 * write masks, and base types of the sources being compared are compatible.
424 */
425 bool
nir_alu_srcs_negative_equal(const nir_alu_instr * alu1,const nir_alu_instr * alu2,unsigned src1,unsigned src2)426 nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
427 const nir_alu_instr *alu2,
428 unsigned src1, unsigned src2)
429 {
430 #ifndef NDEBUG
431 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
432 assert(nir_alu_instr_channel_used(alu1, src1, i) ==
433 nir_alu_instr_channel_used(alu2, src2, i));
434 }
435
436 if (nir_alu_type_get_base_type(nir_op_infos[alu1->op].input_types[src1]) == nir_type_float) {
437 assert(nir_op_infos[alu1->op].input_types[src1] ==
438 nir_op_infos[alu2->op].input_types[src2]);
439 } else {
440 assert(nir_op_infos[alu1->op].input_types[src1] == nir_type_int);
441 assert(nir_op_infos[alu2->op].input_types[src2] == nir_type_int);
442 }
443 #endif
444
445 if (alu1->src[src1].abs != alu2->src[src2].abs)
446 return false;
447
448 bool parity = alu1->src[src1].negate != alu2->src[src2].negate;
449
450 /* Handling load_const instructions is tricky. */
451
452 const nir_const_value *const const1 =
453 nir_src_as_const_value(alu1->src[src1].src);
454
455 if (const1 != NULL) {
456 /* Assume that constant folding will eliminate source mods and unary
457 * ops.
458 */
459 if (parity)
460 return false;
461
462 const nir_const_value *const const2 =
463 nir_src_as_const_value(alu2->src[src2].src);
464
465 if (const2 == NULL)
466 return false;
467
468 if (nir_src_bit_size(alu1->src[src1].src) !=
469 nir_src_bit_size(alu2->src[src2].src))
470 return false;
471
472 const nir_alu_type full_type = nir_op_infos[alu1->op].input_types[src1] |
473 nir_src_bit_size(alu1->src[src1].src);
474 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
475 if (nir_alu_instr_channel_used(alu1, src1, i) &&
476 !nir_const_value_negative_equal(const1[alu1->src[src1].swizzle[i]],
477 const2[alu2->src[src2].swizzle[i]],
478 full_type))
479 return false;
480 }
481
482 return true;
483 }
484
485 uint8_t alu1_swizzle[NIR_MAX_VEC_COMPONENTS] = {0};
486 nir_src alu1_actual_src;
487 nir_alu_instr *neg1 = get_neg_instr(alu1->src[src1].src);
488
489 if (neg1) {
490 parity = !parity;
491 alu1_actual_src = neg1->src[0].src;
492
493 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(neg1, 0); i++)
494 alu1_swizzle[i] = neg1->src[0].swizzle[i];
495 } else {
496 alu1_actual_src = alu1->src[src1].src;
497
498 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++)
499 alu1_swizzle[i] = i;
500 }
501
502 uint8_t alu2_swizzle[NIR_MAX_VEC_COMPONENTS] = {0};
503 nir_src alu2_actual_src;
504 nir_alu_instr *neg2 = get_neg_instr(alu2->src[src2].src);
505
506 if (neg2) {
507 parity = !parity;
508 alu2_actual_src = neg2->src[0].src;
509
510 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(neg2, 0); i++)
511 alu2_swizzle[i] = neg2->src[0].swizzle[i];
512 } else {
513 alu2_actual_src = alu2->src[src2].src;
514
515 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu2, src2); i++)
516 alu2_swizzle[i] = i;
517 }
518
519 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++) {
520 if (alu1_swizzle[alu1->src[src1].swizzle[i]] !=
521 alu2_swizzle[alu2->src[src2].swizzle[i]])
522 return false;
523 }
524
525 return parity && nir_srcs_equal(alu1_actual_src, alu2_actual_src);
526 }
527
528 bool
nir_alu_srcs_equal(const nir_alu_instr * alu1,const nir_alu_instr * alu2,unsigned src1,unsigned src2)529 nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
530 unsigned src1, unsigned src2)
531 {
532 if (alu1->src[src1].abs != alu2->src[src2].abs ||
533 alu1->src[src1].negate != alu2->src[src2].negate)
534 return false;
535
536 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++) {
537 if (alu1->src[src1].swizzle[i] != alu2->src[src2].swizzle[i])
538 return false;
539 }
540
541 return nir_srcs_equal(alu1->src[src1].src, alu2->src[src2].src);
542 }
543
544 /* Returns "true" if two instructions are equal. Note that this will only
545 * work for the subset of instructions defined by instr_can_rewrite(). Also,
546 * it should only return "true" for instructions that hash_instr() will return
547 * the same hash for (ignoring collisions, of course).
548 */
549
550 bool
nir_instrs_equal(const nir_instr * instr1,const nir_instr * instr2)551 nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
552 {
553 assert(instr_can_rewrite(instr1) && instr_can_rewrite(instr2));
554
555 if (instr1->type != instr2->type)
556 return false;
557
558 switch (instr1->type) {
559 case nir_instr_type_alu: {
560 nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
561 nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
562
563 if (alu1->op != alu2->op)
564 return false;
565
566 /* We explicitly don't compare instr->exact. */
567
568 if (alu1->no_signed_wrap != alu2->no_signed_wrap)
569 return false;
570
571 if (alu1->no_unsigned_wrap != alu2->no_unsigned_wrap)
572 return false;
573
574 /* TODO: We can probably acutally do something more inteligent such
575 * as allowing different numbers and taking a maximum or something
576 * here */
577 if (alu1->dest.dest.ssa.num_components != alu2->dest.dest.ssa.num_components)
578 return false;
579
580 if (alu1->dest.dest.ssa.bit_size != alu2->dest.dest.ssa.bit_size)
581 return false;
582
583 if (nir_op_infos[alu1->op].algebraic_properties & NIR_OP_IS_2SRC_COMMUTATIVE) {
584 if ((!nir_alu_srcs_equal(alu1, alu2, 0, 0) ||
585 !nir_alu_srcs_equal(alu1, alu2, 1, 1)) &&
586 (!nir_alu_srcs_equal(alu1, alu2, 0, 1) ||
587 !nir_alu_srcs_equal(alu1, alu2, 1, 0)))
588 return false;
589
590 for (unsigned i = 2; i < nir_op_infos[alu1->op].num_inputs; i++) {
591 if (!nir_alu_srcs_equal(alu1, alu2, i, i))
592 return false;
593 }
594 } else {
595 for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
596 if (!nir_alu_srcs_equal(alu1, alu2, i, i))
597 return false;
598 }
599 }
600 return true;
601 }
602 case nir_instr_type_deref: {
603 nir_deref_instr *deref1 = nir_instr_as_deref(instr1);
604 nir_deref_instr *deref2 = nir_instr_as_deref(instr2);
605
606 if (deref1->deref_type != deref2->deref_type ||
607 deref1->modes != deref2->modes ||
608 deref1->type != deref2->type)
609 return false;
610
611 if (deref1->deref_type == nir_deref_type_var)
612 return deref1->var == deref2->var;
613
614 if (!nir_srcs_equal(deref1->parent, deref2->parent))
615 return false;
616
617 switch (deref1->deref_type) {
618 case nir_deref_type_struct:
619 if (deref1->strct.index != deref2->strct.index)
620 return false;
621 break;
622
623 case nir_deref_type_array:
624 case nir_deref_type_ptr_as_array:
625 if (!nir_srcs_equal(deref1->arr.index, deref2->arr.index))
626 return false;
627 if (deref1->arr.in_bounds != deref2->arr.in_bounds)
628 return false;
629 break;
630
631 case nir_deref_type_cast:
632 if (deref1->cast.ptr_stride != deref2->cast.ptr_stride ||
633 deref1->cast.align_mul != deref2->cast.align_mul ||
634 deref1->cast.align_offset != deref2->cast.align_offset)
635 return false;
636 break;
637
638 case nir_deref_type_var:
639 case nir_deref_type_array_wildcard:
640 /* Nothing to do */
641 break;
642
643 default:
644 unreachable("Invalid instruction deref type");
645 }
646 return true;
647 }
648 case nir_instr_type_tex: {
649 nir_tex_instr *tex1 = nir_instr_as_tex(instr1);
650 nir_tex_instr *tex2 = nir_instr_as_tex(instr2);
651
652 if (tex1->op != tex2->op)
653 return false;
654
655 if (tex1->num_srcs != tex2->num_srcs)
656 return false;
657 for (unsigned i = 0; i < tex1->num_srcs; i++) {
658 if (tex1->src[i].src_type != tex2->src[i].src_type ||
659 !nir_srcs_equal(tex1->src[i].src, tex2->src[i].src)) {
660 return false;
661 }
662 }
663
664 if (tex1->coord_components != tex2->coord_components ||
665 tex1->sampler_dim != tex2->sampler_dim ||
666 tex1->is_array != tex2->is_array ||
667 tex1->is_shadow != tex2->is_shadow ||
668 tex1->is_new_style_shadow != tex2->is_new_style_shadow ||
669 tex1->component != tex2->component ||
670 tex1->texture_index != tex2->texture_index ||
671 tex1->sampler_index != tex2->sampler_index) {
672 return false;
673 }
674
675 if (memcmp(tex1->tg4_offsets, tex2->tg4_offsets,
676 sizeof(tex1->tg4_offsets)))
677 return false;
678
679 return true;
680 }
681 case nir_instr_type_load_const: {
682 nir_load_const_instr *load1 = nir_instr_as_load_const(instr1);
683 nir_load_const_instr *load2 = nir_instr_as_load_const(instr2);
684
685 if (load1->def.num_components != load2->def.num_components)
686 return false;
687
688 if (load1->def.bit_size != load2->def.bit_size)
689 return false;
690
691 if (load1->def.bit_size == 1) {
692 for (unsigned i = 0; i < load1->def.num_components; ++i) {
693 if (load1->value[i].b != load2->value[i].b)
694 return false;
695 }
696 } else {
697 unsigned size = load1->def.num_components * sizeof(*load1->value);
698 if (memcmp(load1->value, load2->value, size) != 0)
699 return false;
700 }
701 return true;
702 }
703 case nir_instr_type_phi: {
704 nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
705 nir_phi_instr *phi2 = nir_instr_as_phi(instr2);
706
707 if (phi1->instr.block != phi2->instr.block)
708 return false;
709
710 nir_foreach_phi_src(src1, phi1) {
711 nir_foreach_phi_src(src2, phi2) {
712 if (src1->pred == src2->pred) {
713 if (!nir_srcs_equal(src1->src, src2->src))
714 return false;
715
716 break;
717 }
718 }
719 }
720
721 return true;
722 }
723 case nir_instr_type_intrinsic: {
724 nir_intrinsic_instr *intrinsic1 = nir_instr_as_intrinsic(instr1);
725 nir_intrinsic_instr *intrinsic2 = nir_instr_as_intrinsic(instr2);
726 const nir_intrinsic_info *info =
727 &nir_intrinsic_infos[intrinsic1->intrinsic];
728
729 if (intrinsic1->intrinsic != intrinsic2->intrinsic ||
730 intrinsic1->num_components != intrinsic2->num_components)
731 return false;
732
733 if (info->has_dest && intrinsic1->dest.ssa.num_components !=
734 intrinsic2->dest.ssa.num_components)
735 return false;
736
737 if (info->has_dest && intrinsic1->dest.ssa.bit_size !=
738 intrinsic2->dest.ssa.bit_size)
739 return false;
740
741 for (unsigned i = 0; i < info->num_srcs; i++) {
742 if (!nir_srcs_equal(intrinsic1->src[i], intrinsic2->src[i]))
743 return false;
744 }
745
746 for (unsigned i = 0; i < info->num_indices; i++) {
747 if (intrinsic1->const_index[i] != intrinsic2->const_index[i])
748 return false;
749 }
750
751 return true;
752 }
753 case nir_instr_type_call:
754 case nir_instr_type_jump:
755 case nir_instr_type_ssa_undef:
756 case nir_instr_type_parallel_copy:
757 default:
758 unreachable("Invalid instruction type");
759 }
760
761 unreachable("All cases in the above switch should return");
762 }
763
764 static nir_ssa_def *
nir_instr_get_dest_ssa_def(nir_instr * instr)765 nir_instr_get_dest_ssa_def(nir_instr *instr)
766 {
767 switch (instr->type) {
768 case nir_instr_type_alu:
769 assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
770 return &nir_instr_as_alu(instr)->dest.dest.ssa;
771 case nir_instr_type_deref:
772 assert(nir_instr_as_deref(instr)->dest.is_ssa);
773 return &nir_instr_as_deref(instr)->dest.ssa;
774 case nir_instr_type_load_const:
775 return &nir_instr_as_load_const(instr)->def;
776 case nir_instr_type_phi:
777 assert(nir_instr_as_phi(instr)->dest.is_ssa);
778 return &nir_instr_as_phi(instr)->dest.ssa;
779 case nir_instr_type_intrinsic:
780 assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
781 return &nir_instr_as_intrinsic(instr)->dest.ssa;
782 case nir_instr_type_tex:
783 assert(nir_instr_as_tex(instr)->dest.is_ssa);
784 return &nir_instr_as_tex(instr)->dest.ssa;
785 default:
786 unreachable("We never ask for any of these");
787 }
788 }
789
790 static bool
cmp_func(const void * data1,const void * data2)791 cmp_func(const void *data1, const void *data2)
792 {
793 return nir_instrs_equal(data1, data2);
794 }
795
796 struct set *
nir_instr_set_create(void * mem_ctx)797 nir_instr_set_create(void *mem_ctx)
798 {
799 return _mesa_set_create(mem_ctx, hash_instr, cmp_func);
800 }
801
802 void
nir_instr_set_destroy(struct set * instr_set)803 nir_instr_set_destroy(struct set *instr_set)
804 {
805 _mesa_set_destroy(instr_set, NULL);
806 }
807
808 bool
nir_instr_set_add_or_rewrite(struct set * instr_set,nir_instr * instr,bool (* cond_function)(const nir_instr * a,const nir_instr * b))809 nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr,
810 bool (*cond_function) (const nir_instr *a,
811 const nir_instr *b))
812 {
813 if (!instr_can_rewrite(instr))
814 return false;
815
816 struct set_entry *e = _mesa_set_search_or_add(instr_set, instr, NULL);
817 nir_instr *match = (nir_instr *) e->key;
818 if (match == instr)
819 return false;
820
821 if (!cond_function || cond_function(match, instr)) {
822 /* rewrite instruction if condition is matched */
823 nir_ssa_def *def = nir_instr_get_dest_ssa_def(instr);
824 nir_ssa_def *new_def = nir_instr_get_dest_ssa_def(match);
825
826 /* It's safe to replace an exact instruction with an inexact one as
827 * long as we make it exact. If we got here, the two instructions are
828 * exactly identical in every other way so, once we've set the exact
829 * bit, they are the same.
830 */
831 if (instr->type == nir_instr_type_alu && nir_instr_as_alu(instr)->exact)
832 nir_instr_as_alu(match)->exact = true;
833
834 nir_ssa_def_rewrite_uses(def, new_def);
835
836 nir_instr_remove(instr);
837
838 return true;
839 } else {
840 /* otherwise, replace hashed instruction */
841 e->key = instr;
842 return false;
843 }
844 }
845
846 void
nir_instr_set_remove(struct set * instr_set,nir_instr * instr)847 nir_instr_set_remove(struct set *instr_set, nir_instr *instr)
848 {
849 if (!instr_can_rewrite(instr))
850 return;
851
852 struct set_entry *entry = _mesa_set_search(instr_set, instr);
853 if (entry)
854 _mesa_set_remove(instr_set, entry);
855 }
856
857