• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <gtest/gtest.h>
25 
26 #include "nir.h"
27 #include "nir_builder.h"
28 
29 namespace {
30 
31 class nir_vars_test : public ::testing::Test {
32 protected:
33    nir_vars_test();
34    ~nir_vars_test();
35 
create_var(nir_variable_mode mode,const glsl_type * type,const char * name)36    nir_variable *create_var(nir_variable_mode mode, const glsl_type *type,
37                             const char *name) {
38       if (mode == nir_var_function_temp)
39          return nir_local_variable_create(b->impl, type, name);
40       else
41          return nir_variable_create(b->shader, mode, type, name);
42    }
43 
create_int(nir_variable_mode mode,const char * name)44    nir_variable *create_int(nir_variable_mode mode, const char *name) {
45       return create_var(mode, glsl_int_type(), name);
46    }
47 
create_ivec2(nir_variable_mode mode,const char * name)48    nir_variable *create_ivec2(nir_variable_mode mode, const char *name) {
49       return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 2), name);
50    }
51 
create_ivec4(nir_variable_mode mode,const char * name)52    nir_variable *create_ivec4(nir_variable_mode mode, const char *name) {
53       return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 4), name);
54    }
55 
create_many_int(nir_variable_mode mode,const char * prefix,unsigned count)56    nir_variable **create_many_int(nir_variable_mode mode, const char *prefix, unsigned count) {
57       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
58       for (unsigned i = 0; i < count; i++)
59          result[i] = create_int(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
60       return result;
61    }
62 
create_many_ivec2(nir_variable_mode mode,const char * prefix,unsigned count)63    nir_variable **create_many_ivec2(nir_variable_mode mode, const char *prefix, unsigned count) {
64       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
65       for (unsigned i = 0; i < count; i++)
66          result[i] = create_ivec2(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
67       return result;
68    }
69 
create_many_ivec4(nir_variable_mode mode,const char * prefix,unsigned count)70    nir_variable **create_many_ivec4(nir_variable_mode mode, const char *prefix, unsigned count) {
71       nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
72       for (unsigned i = 0; i < count; i++)
73          result[i] = create_ivec4(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
74       return result;
75    }
76 
77    unsigned count_derefs(nir_deref_type deref_type);
78    unsigned count_intrinsics(nir_intrinsic_op intrinsic);
count_function_temp_vars(void)79    unsigned count_function_temp_vars(void) {
80       return exec_list_length(&b->impl->locals);
81    }
82 
count_shader_temp_vars(void)83    unsigned count_shader_temp_vars(void) {
84       unsigned count = 0;
85       nir_foreach_variable_with_modes(var, b->shader, nir_var_shader_temp)
86          count++;
87       return count;
88    }
89 
90    nir_intrinsic_instr *get_intrinsic(nir_intrinsic_op intrinsic,
91                                       unsigned index);
92 
93    nir_deref_instr *get_deref(nir_deref_type deref_type,
94                               unsigned index);
95    void *mem_ctx;
96    void *lin_ctx;
97 
98    nir_builder *b;
99 };
100 
nir_vars_test()101 nir_vars_test::nir_vars_test()
102 {
103    glsl_type_singleton_init_or_ref();
104 
105    mem_ctx = ralloc_context(NULL);
106    lin_ctx = linear_alloc_parent(mem_ctx, 0);
107    static const nir_shader_compiler_options options = { };
108    b = rzalloc(mem_ctx, nir_builder);
109    nir_builder_init_simple_shader(b, mem_ctx, MESA_SHADER_COMPUTE, &options);
110 }
111 
~nir_vars_test()112 nir_vars_test::~nir_vars_test()
113 {
114    if (HasFailure()) {
115       printf("\nShader from the failed test:\n\n");
116       nir_print_shader(b->shader, stdout);
117    }
118 
119    ralloc_free(mem_ctx);
120 
121    glsl_type_singleton_decref();
122 }
123 
124 unsigned
count_intrinsics(nir_intrinsic_op intrinsic)125 nir_vars_test::count_intrinsics(nir_intrinsic_op intrinsic)
126 {
127    unsigned count = 0;
128    nir_foreach_block(block, b->impl) {
129       nir_foreach_instr(instr, block) {
130          if (instr->type != nir_instr_type_intrinsic)
131             continue;
132          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
133          if (intrin->intrinsic == intrinsic)
134             count++;
135       }
136    }
137    return count;
138 }
139 
140 unsigned
count_derefs(nir_deref_type deref_type)141 nir_vars_test::count_derefs(nir_deref_type deref_type)
142 {
143    unsigned count = 0;
144    nir_foreach_block(block, b->impl) {
145       nir_foreach_instr(instr, block) {
146          if (instr->type != nir_instr_type_deref)
147             continue;
148          nir_deref_instr *intrin = nir_instr_as_deref(instr);
149          if (intrin->deref_type == deref_type)
150             count++;
151       }
152    }
153    return count;
154 }
155 
156 nir_intrinsic_instr *
get_intrinsic(nir_intrinsic_op intrinsic,unsigned index)157 nir_vars_test::get_intrinsic(nir_intrinsic_op intrinsic,
158                              unsigned index)
159 {
160    nir_foreach_block(block, b->impl) {
161       nir_foreach_instr(instr, block) {
162          if (instr->type != nir_instr_type_intrinsic)
163             continue;
164          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
165          if (intrin->intrinsic == intrinsic) {
166             if (index == 0)
167                return intrin;
168             index--;
169          }
170       }
171    }
172    return NULL;
173 }
174 
175 nir_deref_instr *
get_deref(nir_deref_type deref_type,unsigned index)176 nir_vars_test::get_deref(nir_deref_type deref_type,
177                          unsigned index)
178 {
179    nir_foreach_block(block, b->impl) {
180       nir_foreach_instr(instr, block) {
181          if (instr->type != nir_instr_type_deref)
182             continue;
183          nir_deref_instr *deref = nir_instr_as_deref(instr);
184          if (deref->deref_type == deref_type) {
185             if (index == 0)
186                return deref;
187             index--;
188          }
189       }
190    }
191    return NULL;
192 }
193 
194 /* Allow grouping the tests while still sharing the helpers. */
195 class nir_redundant_load_vars_test : public nir_vars_test {};
196 class nir_copy_prop_vars_test : public nir_vars_test {};
197 class nir_dead_write_vars_test : public nir_vars_test {};
198 class nir_combine_stores_test : public nir_vars_test {};
199 class nir_split_vars_test : public nir_vars_test {};
200 class nir_remove_dead_variables_test : public nir_vars_test {};
201 
202 } // namespace
203 
204 static nir_ssa_def *
nir_load_var_volatile(nir_builder * b,nir_variable * var)205 nir_load_var_volatile(nir_builder *b, nir_variable *var)
206 {
207    return nir_load_deref_with_access(b, nir_build_deref_var(b, var),
208                                      ACCESS_VOLATILE);
209 }
210 
211 static void
nir_store_var_volatile(nir_builder * b,nir_variable * var,nir_ssa_def * value,nir_component_mask_t writemask)212 nir_store_var_volatile(nir_builder *b, nir_variable *var,
213                        nir_ssa_def *value, nir_component_mask_t writemask)
214 {
215    nir_store_deref_with_access(b, nir_build_deref_var(b, var),
216                                value, writemask, ACCESS_VOLATILE);
217 }
218 
TEST_F(nir_redundant_load_vars_test,duplicated_load)219 TEST_F(nir_redundant_load_vars_test, duplicated_load)
220 {
221    /* Load a variable twice in the same block.  One should be removed. */
222 
223    nir_variable *in = create_int(nir_var_shader_in, "in");
224    nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
225 
226    nir_store_var(b, out[0], nir_load_var(b, in), 1);
227    nir_store_var(b, out[1], nir_load_var(b, in), 1);
228 
229    nir_validate_shader(b->shader, NULL);
230 
231    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
232 
233    bool progress = nir_opt_copy_prop_vars(b->shader);
234    EXPECT_TRUE(progress);
235 
236    nir_validate_shader(b->shader, NULL);
237 
238    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
239 }
240 
TEST_F(nir_redundant_load_vars_test,duplicated_load_volatile)241 TEST_F(nir_redundant_load_vars_test, duplicated_load_volatile)
242 {
243    /* Load a variable twice in the same block.  One should be removed. */
244 
245    nir_variable *in = create_int(nir_var_shader_in, "in");
246    nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
247 
248    /* Volatile prevents us from eliminating a load by combining it with
249     * another.  It shouldn't however, prevent us from combing other
250     * non-volatile loads.
251     */
252    nir_store_var(b, out[0], nir_load_var(b, in), 1);
253    nir_store_var(b, out[1], nir_load_var_volatile(b, in), 1);
254    nir_store_var(b, out[2], nir_load_var(b, in), 1);
255 
256    nir_validate_shader(b->shader, NULL);
257 
258    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
259 
260    bool progress = nir_opt_copy_prop_vars(b->shader);
261    EXPECT_TRUE(progress);
262 
263    nir_validate_shader(b->shader, NULL);
264 
265    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
266 
267    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
268    ASSERT_TRUE(first_store->src[1].is_ssa);
269 
270    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
271    ASSERT_TRUE(third_store->src[1].is_ssa);
272 
273    EXPECT_EQ(first_store->src[1].ssa, third_store->src[1].ssa);
274 }
275 
TEST_F(nir_redundant_load_vars_test,duplicated_load_in_two_blocks)276 TEST_F(nir_redundant_load_vars_test, duplicated_load_in_two_blocks)
277 {
278    /* Load a variable twice in different blocks.  One should be removed. */
279 
280    nir_variable *in = create_int(nir_var_shader_in, "in");
281    nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
282 
283    nir_store_var(b, out[0], nir_load_var(b, in), 1);
284 
285    /* Forces the stores to be in different blocks. */
286    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
287 
288    nir_store_var(b, out[1], nir_load_var(b, in), 1);
289 
290    nir_validate_shader(b->shader, NULL);
291 
292    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
293 
294    bool progress = nir_opt_copy_prop_vars(b->shader);
295    EXPECT_TRUE(progress);
296 
297    nir_validate_shader(b->shader, NULL);
298 
299    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
300 }
301 
TEST_F(nir_redundant_load_vars_test,invalidate_inside_if_block)302 TEST_F(nir_redundant_load_vars_test, invalidate_inside_if_block)
303 {
304    /* Load variables, then write to some of then in different branches of the
305     * if statement.  They should be invalidated accordingly.
306     */
307 
308    nir_variable **g = create_many_int(nir_var_shader_temp, "g", 3);
309    nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
310 
311    nir_load_var(b, g[0]);
312    nir_load_var(b, g[1]);
313    nir_load_var(b, g[2]);
314 
315    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
316    nir_store_var(b, g[0], nir_imm_int(b, 10), 1);
317 
318    nir_push_else(b, if_stmt);
319    nir_store_var(b, g[1], nir_imm_int(b, 20), 1);
320 
321    nir_pop_if(b, if_stmt);
322 
323    nir_store_var(b, out[0], nir_load_var(b, g[0]), 1);
324    nir_store_var(b, out[1], nir_load_var(b, g[1]), 1);
325    nir_store_var(b, out[2], nir_load_var(b, g[2]), 1);
326 
327    nir_validate_shader(b->shader, NULL);
328 
329    bool progress = nir_opt_copy_prop_vars(b->shader);
330    EXPECT_TRUE(progress);
331 
332    /* There are 3 initial loads, plus 2 loads for the values invalidated
333     * inside the if statement.
334     */
335    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 5);
336 
337    /* We only load g[2] once. */
338    unsigned g2_load_count = 0;
339    for (int i = 0; i < 5; i++) {
340          nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, i);
341          if (nir_intrinsic_get_var(load, 0) == g[2])
342             g2_load_count++;
343    }
344    EXPECT_EQ(g2_load_count, 1);
345 }
346 
TEST_F(nir_redundant_load_vars_test,invalidate_live_load_in_the_end_of_loop)347 TEST_F(nir_redundant_load_vars_test, invalidate_live_load_in_the_end_of_loop)
348 {
349    /* Invalidating a load in the end of loop body will apply to the whole loop
350     * body.
351     */
352 
353    nir_variable *v = create_int(nir_var_mem_ssbo, "v");
354 
355    nir_load_var(b, v);
356 
357    nir_loop *loop = nir_push_loop(b);
358 
359    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
360    nir_jump(b, nir_jump_break);
361    nir_pop_if(b, if_stmt);
362 
363    nir_load_var(b, v);
364    nir_store_var(b, v, nir_imm_int(b, 10), 1);
365 
366    nir_pop_loop(b, loop);
367 
368    bool progress = nir_opt_copy_prop_vars(b->shader);
369    ASSERT_FALSE(progress);
370 }
371 
TEST_F(nir_copy_prop_vars_test,simple_copies)372 TEST_F(nir_copy_prop_vars_test, simple_copies)
373 {
374    nir_variable *in   = create_int(nir_var_shader_in,     "in");
375    nir_variable *temp = create_int(nir_var_function_temp, "temp");
376    nir_variable *out  = create_int(nir_var_shader_out,    "out");
377 
378    nir_copy_var(b, temp, in);
379    nir_copy_var(b, out, temp);
380 
381    nir_validate_shader(b->shader, NULL);
382 
383    bool progress = nir_opt_copy_prop_vars(b->shader);
384    EXPECT_TRUE(progress);
385 
386    nir_validate_shader(b->shader, NULL);
387 
388    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
389 
390    nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
391    ASSERT_TRUE(first_copy->src[1].is_ssa);
392 
393    nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
394    ASSERT_TRUE(second_copy->src[1].is_ssa);
395 
396    EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
397 }
398 
TEST_F(nir_copy_prop_vars_test,self_copy)399 TEST_F(nir_copy_prop_vars_test, self_copy)
400 {
401    nir_variable *v = create_int(nir_var_mem_ssbo, "v");
402 
403    nir_copy_var(b, v, v);
404 
405    nir_validate_shader(b->shader, NULL);
406 
407    bool progress = nir_opt_copy_prop_vars(b->shader);
408    EXPECT_TRUE(progress);
409 
410    nir_validate_shader(b->shader, NULL);
411 
412    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 0);
413 }
414 
TEST_F(nir_copy_prop_vars_test,simple_store_load)415 TEST_F(nir_copy_prop_vars_test, simple_store_load)
416 {
417    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
418    unsigned mask = 1 | 2;
419 
420    nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
421    nir_store_var(b, v[0], stored_value, mask);
422 
423    nir_ssa_def *read_value = nir_load_var(b, v[0]);
424    nir_store_var(b, v[1], read_value, mask);
425 
426    nir_validate_shader(b->shader, NULL);
427 
428    bool progress = nir_opt_copy_prop_vars(b->shader);
429    EXPECT_TRUE(progress);
430 
431    nir_validate_shader(b->shader, NULL);
432 
433    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
434 
435    for (int i = 0; i < 2; i++) {
436       nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
437       ASSERT_TRUE(store->src[1].is_ssa);
438       EXPECT_EQ(store->src[1].ssa, stored_value);
439    }
440 }
441 
TEST_F(nir_copy_prop_vars_test,store_store_load)442 TEST_F(nir_copy_prop_vars_test, store_store_load)
443 {
444    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
445    unsigned mask = 1 | 2;
446 
447    nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
448    nir_store_var(b, v[0], first_value, mask);
449 
450    nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
451    nir_store_var(b, v[0], second_value, mask);
452 
453    nir_ssa_def *read_value = nir_load_var(b, v[0]);
454    nir_store_var(b, v[1], read_value, mask);
455 
456    nir_validate_shader(b->shader, NULL);
457 
458    bool progress = nir_opt_copy_prop_vars(b->shader);
459    EXPECT_TRUE(progress);
460 
461    nir_validate_shader(b->shader, NULL);
462 
463    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
464 
465    /* Store to v[1] should use second_value directly. */
466    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
467    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
468    ASSERT_TRUE(store_to_v1->src[1].is_ssa);
469    EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
470 }
471 
TEST_F(nir_copy_prop_vars_test,store_store_load_different_components)472 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components)
473 {
474    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
475 
476    nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
477    nir_store_var(b, v[0], first_value, 1 << 1);
478 
479    nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
480    nir_store_var(b, v[0], second_value, 1 << 0);
481 
482    nir_ssa_def *read_value = nir_load_var(b, v[0]);
483    nir_store_var(b, v[1], read_value, 1 << 1);
484 
485    nir_validate_shader(b->shader, NULL);
486 
487    bool progress = nir_opt_copy_prop_vars(b->shader);
488    EXPECT_TRUE(progress);
489 
490    nir_validate_shader(b->shader, NULL);
491 
492    nir_opt_constant_folding(b->shader);
493    nir_validate_shader(b->shader, NULL);
494 
495    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
496 
497    /* Store to v[1] should use first_value directly.  The write of
498     * second_value did not overwrite the component it uses.
499     */
500    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
501    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
502    ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
503 }
504 
TEST_F(nir_copy_prop_vars_test,store_store_load_different_components_in_many_blocks)505 TEST_F(nir_copy_prop_vars_test, store_store_load_different_components_in_many_blocks)
506 {
507    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
508 
509    nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
510    nir_store_var(b, v[0], first_value, 1 << 1);
511 
512    /* Adding an if statement will cause blocks to be created. */
513    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
514 
515    nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
516    nir_store_var(b, v[0], second_value, 1 << 0);
517 
518    /* Adding an if statement will cause blocks to be created. */
519    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
520 
521    nir_ssa_def *read_value = nir_load_var(b, v[0]);
522    nir_store_var(b, v[1], read_value, 1 << 1);
523 
524    nir_validate_shader(b->shader, NULL);
525 
526    bool progress = nir_opt_copy_prop_vars(b->shader);
527    EXPECT_TRUE(progress);
528 
529    nir_validate_shader(b->shader, NULL);
530 
531    nir_opt_constant_folding(b->shader);
532    nir_validate_shader(b->shader, NULL);
533 
534    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
535 
536    /* Store to v[1] should use first_value directly.  The write of
537     * second_value did not overwrite the component it uses.
538     */
539    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
540    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
541    ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
542 }
543 
TEST_F(nir_copy_prop_vars_test,store_volatile)544 TEST_F(nir_copy_prop_vars_test, store_volatile)
545 {
546    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
547    unsigned mask = 1 | 2;
548 
549    nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
550    nir_store_var(b, v[0], first_value, mask);
551 
552    nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
553    nir_store_var_volatile(b, v[0], second_value, mask);
554 
555    nir_ssa_def *third_value = nir_imm_ivec2(b, 50, 60);
556    nir_store_var(b, v[0], third_value, mask);
557 
558    nir_ssa_def *read_value = nir_load_var(b, v[0]);
559    nir_store_var(b, v[1], read_value, mask);
560 
561    nir_validate_shader(b->shader, NULL);
562 
563    bool progress = nir_opt_copy_prop_vars(b->shader);
564    EXPECT_TRUE(progress);
565 
566    nir_validate_shader(b->shader, NULL);
567 
568    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
569 
570    /* Our approach here is a bit scorched-earth.  We expect the volatile store
571     * in the middle to cause both that store and the one before it to be kept.
572     * Technically, volatile only prevents combining the volatile store with
573     * another store and one could argue that the store before the volatile and
574     * the one after it could be combined.  However, it seems safer to just
575     * treat a volatile store like an atomic and prevent any combining across
576     * it.
577     */
578    nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3);
579    ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
580    ASSERT_TRUE(store_to_v1->src[1].is_ssa);
581    EXPECT_EQ(store_to_v1->src[1].ssa, third_value);
582 }
583 
TEST_F(nir_copy_prop_vars_test,self_copy_volatile)584 TEST_F(nir_copy_prop_vars_test, self_copy_volatile)
585 {
586    nir_variable *v = create_int(nir_var_mem_ssbo, "v");
587 
588    nir_copy_var(b, v, v);
589    nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
590                                  nir_build_deref_var(b, v),
591                                  (gl_access_qualifier)0, ACCESS_VOLATILE);
592    nir_copy_deref_with_access(b, nir_build_deref_var(b, v),
593                                  nir_build_deref_var(b, v),
594                                  ACCESS_VOLATILE, (gl_access_qualifier)0);
595    nir_copy_var(b, v, v);
596 
597    nir_validate_shader(b->shader, NULL);
598 
599    bool progress = nir_opt_copy_prop_vars(b->shader);
600    EXPECT_TRUE(progress);
601 
602    nir_validate_shader(b->shader, NULL);
603 
604    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
605 
606    /* Store to v[1] should use second_value directly. */
607    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_copy_deref, 0);
608    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_copy_deref, 1);
609    ASSERT_EQ(nir_intrinsic_src_access(first), ACCESS_VOLATILE);
610    ASSERT_EQ(nir_intrinsic_dst_access(first), (gl_access_qualifier)0);
611    ASSERT_EQ(nir_intrinsic_src_access(second), (gl_access_qualifier)0);
612    ASSERT_EQ(nir_intrinsic_dst_access(second), ACCESS_VOLATILE);
613 }
614 
TEST_F(nir_copy_prop_vars_test,memory_barrier_in_two_blocks)615 TEST_F(nir_copy_prop_vars_test, memory_barrier_in_two_blocks)
616 {
617    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
618 
619    nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
620    nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
621 
622    /* Split into many blocks. */
623    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
624 
625    nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
626 
627    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
628                              nir_var_mem_ssbo);
629 
630    nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
631 
632    bool progress = nir_opt_copy_prop_vars(b->shader);
633    ASSERT_TRUE(progress);
634 
635    /* Only the second load will remain after the optimization. */
636    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
637    nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
638    ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[1]);
639 }
640 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_prevents_load_removal)641 TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_load_removal)
642 {
643    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 1);
644 
645    nir_load_var(b, x[0]);
646 
647    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
648                              nir_var_mem_ssbo);
649 
650    nir_load_var(b, x[0]);
651 
652    bool progress = nir_opt_copy_prop_vars(b->shader);
653    ASSERT_FALSE(progress);
654 
655    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
656 }
657 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_prevents_same_mode_load_removal)658 TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_same_mode_load_removal)
659 {
660    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
661 
662    nir_load_var(b, x[0]);
663    nir_load_var(b, x[1]);
664 
665    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
666                              nir_var_mem_ssbo);
667 
668    nir_load_var(b, x[0]);
669    nir_load_var(b, x[1]);
670 
671    bool progress = nir_opt_copy_prop_vars(b->shader);
672    ASSERT_FALSE(progress);
673 
674    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_load_deref));
675 }
676 
TEST_F(nir_redundant_load_vars_test,acquire_barrier_allows_different_mode_load_removal)677 TEST_F(nir_redundant_load_vars_test, acquire_barrier_allows_different_mode_load_removal)
678 {
679    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
680    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
681 
682    nir_load_var(b, x[0]);
683    nir_load_var(b, x[1]);
684    nir_load_var(b, y[0]);
685    nir_load_var(b, y[1]);
686 
687    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
688                              nir_var_mem_ssbo);
689 
690    nir_load_var(b, x[0]);
691    nir_load_var(b, x[1]);
692    nir_load_var(b, y[0]);
693    nir_load_var(b, y[1]);
694 
695    bool progress = nir_opt_copy_prop_vars(b->shader);
696    ASSERT_TRUE(progress);
697 
698    ASSERT_EQ(6, count_intrinsics(nir_intrinsic_load_deref));
699 
700    nir_intrinsic_instr *load;
701 
702    load = get_intrinsic(nir_intrinsic_load_deref, 0);
703    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
704    load = get_intrinsic(nir_intrinsic_load_deref, 1);
705    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
706 
707    load = get_intrinsic(nir_intrinsic_load_deref, 2);
708    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[0]);
709    load = get_intrinsic(nir_intrinsic_load_deref, 3);
710    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[1]);
711 
712    load = get_intrinsic(nir_intrinsic_load_deref, 4);
713    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
714    load = get_intrinsic(nir_intrinsic_load_deref, 5);
715    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
716 }
717 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_load_removal)718 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_load_removal)
719 {
720    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 1);
721 
722    nir_load_var(b, x[0]);
723 
724    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
725                              nir_var_mem_ssbo);
726 
727    nir_load_var(b, x[0]);
728 
729    bool progress = nir_opt_copy_prop_vars(b->shader);
730    ASSERT_TRUE(progress);
731 
732    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
733 }
734 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_same_mode_load_removal)735 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_same_mode_load_removal)
736 {
737    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
738 
739    nir_load_var(b, x[0]);
740    nir_load_var(b, x[1]);
741 
742    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
743                              nir_var_mem_ssbo);
744 
745    nir_load_var(b, x[0]);
746    nir_load_var(b, x[1]);
747 
748    bool progress = nir_opt_copy_prop_vars(b->shader);
749    ASSERT_TRUE(progress);
750 
751    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
752 }
753 
TEST_F(nir_redundant_load_vars_test,release_barrier_allows_different_mode_load_removal)754 TEST_F(nir_redundant_load_vars_test, release_barrier_allows_different_mode_load_removal)
755 {
756    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
757    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
758 
759    nir_load_var(b, x[0]);
760    nir_load_var(b, x[1]);
761    nir_load_var(b, y[0]);
762    nir_load_var(b, y[1]);
763 
764    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
765                              nir_var_mem_ssbo);
766 
767    nir_load_var(b, x[0]);
768    nir_load_var(b, x[1]);
769    nir_load_var(b, y[0]);
770    nir_load_var(b, y[1]);
771 
772    bool progress = nir_opt_copy_prop_vars(b->shader);
773    ASSERT_TRUE(progress);
774 
775    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_load_deref));
776 
777    nir_intrinsic_instr *load;
778 
779    load = get_intrinsic(nir_intrinsic_load_deref, 0);
780    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
781    load = get_intrinsic(nir_intrinsic_load_deref, 1);
782    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
783 
784    load = get_intrinsic(nir_intrinsic_load_deref, 2);
785    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[0]);
786    load = get_intrinsic(nir_intrinsic_load_deref, 3);
787    ASSERT_EQ(nir_intrinsic_get_var(load, 0), y[1]);
788 }
789 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation)790 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation)
791 {
792    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 1);
793 
794    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
795 
796    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
797                              nir_var_mem_ssbo);
798 
799    nir_load_var(b, x[0]);
800 
801    bool progress = nir_opt_copy_prop_vars(b->shader);
802    ASSERT_FALSE(progress);
803 
804    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
805    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
806 }
807 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_same_mode_propagation)808 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_same_mode_propagation)
809 {
810    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
811 
812    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
813    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
814 
815    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
816                              nir_var_mem_ssbo);
817 
818    nir_load_var(b, x[0]);
819    nir_load_var(b, x[1]);
820 
821    bool progress = nir_opt_copy_prop_vars(b->shader);
822    ASSERT_FALSE(progress);
823 
824    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
825    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
826 }
827 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_allows_different_mode_propagation)828 TEST_F(nir_copy_prop_vars_test, acquire_barrier_allows_different_mode_propagation)
829 {
830    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
831    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
832 
833    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
834    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
835    nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
836    nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
837 
838    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
839                              nir_var_mem_ssbo);
840 
841    nir_load_var(b, x[0]);
842    nir_load_var(b, x[1]);
843    nir_load_var(b, y[0]);
844    nir_load_var(b, y[1]);
845 
846    bool progress = nir_opt_copy_prop_vars(b->shader);
847    ASSERT_TRUE(progress);
848 
849    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_store_deref));
850    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_load_deref));
851 
852    nir_intrinsic_instr *store;
853 
854    store = get_intrinsic(nir_intrinsic_store_deref, 0);
855    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[0]);
856    store = get_intrinsic(nir_intrinsic_store_deref, 1);
857    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[1]);
858 
859    store = get_intrinsic(nir_intrinsic_store_deref, 2);
860    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[0]);
861    store = get_intrinsic(nir_intrinsic_store_deref, 3);
862    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[1]);
863 
864    nir_intrinsic_instr *load;
865 
866    load = get_intrinsic(nir_intrinsic_load_deref, 0);
867    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[0]);
868    load = get_intrinsic(nir_intrinsic_load_deref, 1);
869    ASSERT_EQ(nir_intrinsic_get_var(load, 0), x[1]);
870 }
871 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation)872 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation)
873 {
874    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 1);
875 
876    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
877 
878    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
879                              nir_var_mem_ssbo);
880 
881    nir_load_var(b, x[0]);
882 
883    bool progress = nir_opt_copy_prop_vars(b->shader);
884    ASSERT_TRUE(progress);
885 
886    ASSERT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
887 }
888 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_same_mode_propagation)889 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_same_mode_propagation)
890 {
891    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
892 
893    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
894    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
895 
896    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
897                              nir_var_mem_ssbo);
898 
899    nir_load_var(b, x[0]);
900    nir_load_var(b, x[1]);
901 
902    bool progress = nir_opt_copy_prop_vars(b->shader);
903    ASSERT_TRUE(progress);
904 
905    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
906    ASSERT_EQ(0, count_intrinsics(nir_intrinsic_load_deref));
907 }
908 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_different_mode_propagation)909 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_different_mode_propagation)
910 {
911    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
912    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 2);
913 
914    nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
915    nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
916    nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
917    nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
918 
919    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
920                              nir_var_mem_ssbo);
921 
922    nir_load_var(b, x[0]);
923    nir_load_var(b, x[1]);
924    nir_load_var(b, y[0]);
925    nir_load_var(b, y[1]);
926 
927    bool progress = nir_opt_copy_prop_vars(b->shader);
928    ASSERT_TRUE(progress);
929 
930    ASSERT_EQ(4, count_intrinsics(nir_intrinsic_store_deref));
931    ASSERT_EQ(0, count_intrinsics(nir_intrinsic_load_deref));
932 
933    nir_intrinsic_instr *store;
934 
935    store = get_intrinsic(nir_intrinsic_store_deref, 0);
936    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[0]);
937    store = get_intrinsic(nir_intrinsic_store_deref, 1);
938    ASSERT_EQ(nir_intrinsic_get_var(store, 0), x[1]);
939 
940    store = get_intrinsic(nir_intrinsic_store_deref, 2);
941    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[0]);
942    store = get_intrinsic(nir_intrinsic_store_deref, 3);
943    ASSERT_EQ(nir_intrinsic_get_var(store, 0), y[1]);
944 }
945 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation_from_copy)946 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy)
947 {
948    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 3);
949 
950    nir_copy_var(b, x[1], x[0]);
951 
952    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
953                              nir_var_mem_ssbo);
954 
955    nir_copy_var(b, x[2], x[1]);
956 
957    bool progress = nir_opt_copy_prop_vars(b->shader);
958    ASSERT_FALSE(progress);
959 
960    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
961 
962    nir_intrinsic_instr *copy;
963 
964    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
965    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
966 
967    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
968    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[1]);
969 }
970 
TEST_F(nir_copy_prop_vars_test,acquire_barrier_prevents_propagation_from_copy_to_different_mode)971 TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy_to_different_mode)
972 {
973    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
974    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 1);
975 
976    nir_copy_var(b, y[0], x[0]);
977 
978    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
979                              nir_var_mem_ssbo);
980 
981    nir_copy_var(b, x[1], y[0]);
982 
983    bool progress = nir_opt_copy_prop_vars(b->shader);
984    ASSERT_FALSE(progress);
985 
986    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
987 
988    nir_intrinsic_instr *copy;
989 
990    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
991    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
992 
993    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
994    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), y[0]);
995 }
996 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation_from_copy)997 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy)
998 {
999    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 3);
1000 
1001    nir_copy_var(b, x[1], x[0]);
1002 
1003    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
1004                              nir_var_mem_ssbo);
1005 
1006    nir_copy_var(b, x[2], x[1]);
1007 
1008    bool progress = nir_opt_copy_prop_vars(b->shader);
1009    ASSERT_TRUE(progress);
1010 
1011    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
1012 
1013    nir_intrinsic_instr *copy;
1014 
1015    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
1016    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1017 
1018    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
1019    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1020 }
1021 
TEST_F(nir_copy_prop_vars_test,release_barrier_allows_propagation_from_copy_to_different_mode)1022 TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy_to_different_mode)
1023 {
1024    nir_variable **x = create_many_int(nir_var_mem_ssbo, "x", 2);
1025    nir_variable **y = create_many_int(nir_var_mem_shared, "y", 1);
1026 
1027    nir_copy_var(b, y[0], x[0]);
1028 
1029    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
1030                              nir_var_mem_ssbo);
1031 
1032    nir_copy_var(b, x[1], y[0]);
1033 
1034    bool progress = nir_opt_copy_prop_vars(b->shader);
1035    ASSERT_TRUE(progress);
1036 
1037    ASSERT_EQ(2, count_intrinsics(nir_intrinsic_copy_deref));
1038 
1039    nir_intrinsic_instr *copy;
1040 
1041    copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
1042    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1043 
1044    copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
1045    ASSERT_EQ(nir_intrinsic_get_var(copy, 1), x[0]);
1046 }
1047 
TEST_F(nir_copy_prop_vars_test,simple_store_load_in_two_blocks)1048 TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
1049 {
1050    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
1051    unsigned mask = 1 | 2;
1052 
1053    nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
1054    nir_store_var(b, v[0], stored_value, mask);
1055 
1056    /* Adding an if statement will cause blocks to be created. */
1057    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1058 
1059    nir_ssa_def *read_value = nir_load_var(b, v[0]);
1060    nir_store_var(b, v[1], read_value, mask);
1061 
1062    nir_validate_shader(b->shader, NULL);
1063 
1064    bool progress = nir_opt_copy_prop_vars(b->shader);
1065    EXPECT_TRUE(progress);
1066 
1067    nir_validate_shader(b->shader, NULL);
1068 
1069    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1070 
1071    for (int i = 0; i < 2; i++) {
1072       nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
1073       ASSERT_TRUE(store->src[1].is_ssa);
1074       EXPECT_EQ(store->src[1].ssa, stored_value);
1075    }
1076 }
1077 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_reuses_previous_load)1078 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_load)
1079 {
1080    nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
1081    nir_variable *in1 = create_ivec2(nir_var_mem_ssbo, "in1");
1082    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1083    nir_variable *out = create_int(nir_var_mem_ssbo, "out");
1084 
1085    nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
1086    nir_store_var(b, vec, nir_load_var(b, in1), 1 << 1);
1087 
1088    /* This load will be dropped, as vec.y (or vec[1]) is already known. */
1089    nir_deref_instr *deref =
1090       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1091    nir_ssa_def *loaded_from_deref = nir_load_deref(b, deref);
1092 
1093    /* This store should use the value loaded from in1. */
1094    nir_store_var(b, out, loaded_from_deref, 1 << 0);
1095 
1096    nir_validate_shader(b->shader, NULL);
1097    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1098    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1099 
1100    bool progress = nir_opt_copy_prop_vars(b->shader);
1101    EXPECT_TRUE(progress);
1102 
1103    nir_validate_shader(b->shader, NULL);
1104    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1105    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1106 
1107    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
1108    ASSERT_TRUE(store->src[1].is_ssa);
1109 
1110    /* NOTE: The ALU instruction is how we get the vec.y. */
1111    ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
1112 }
1113 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_reuses_previous_copy)1114 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_copy)
1115 {
1116    nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
1117    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1118 
1119    nir_copy_var(b, vec, in0);
1120 
1121    /* This load will be replaced with one from in0. */
1122    nir_deref_instr *deref =
1123       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1124    nir_load_deref(b, deref);
1125 
1126    nir_validate_shader(b->shader, NULL);
1127 
1128    bool progress = nir_opt_copy_prop_vars(b->shader);
1129    EXPECT_TRUE(progress);
1130 
1131    nir_validate_shader(b->shader, NULL);
1132    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1133 
1134    nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
1135    ASSERT_EQ(nir_intrinsic_get_var(load, 0), in0);
1136 }
1137 
TEST_F(nir_copy_prop_vars_test,load_direct_array_deref_on_vector_gets_reused)1138 TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
1139 {
1140    nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
1141    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1142    nir_variable *out = create_ivec2(nir_var_mem_ssbo, "out");
1143 
1144    /* Loading "vec[1]" deref will save the information about vec.y. */
1145    nir_deref_instr *deref =
1146       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1147    nir_load_deref(b, deref);
1148 
1149    /* Store to vec.x. */
1150    nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
1151 
1152    /* This load will be dropped, since both vec.x and vec.y are known. */
1153    nir_ssa_def *loaded_from_vec = nir_load_var(b, vec);
1154    nir_store_var(b, out, loaded_from_vec, 0x3);
1155 
1156    nir_validate_shader(b->shader, NULL);
1157    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1158    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1159 
1160    bool progress = nir_opt_copy_prop_vars(b->shader);
1161    EXPECT_TRUE(progress);
1162 
1163    nir_validate_shader(b->shader, NULL);
1164    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1165    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1166 
1167    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
1168    ASSERT_TRUE(store->src[1].is_ssa);
1169    ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
1170 }
1171 
TEST_F(nir_copy_prop_vars_test,store_load_direct_array_deref_on_vector)1172 TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
1173 {
1174    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1175    nir_variable *out0 = create_int(nir_var_mem_ssbo, "out0");
1176    nir_variable *out1 = create_ivec2(nir_var_mem_ssbo, "out1");
1177 
1178    /* Store to "vec[1]" and "vec[0]". */
1179    nir_deref_instr *store_deref_y =
1180       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1181    nir_store_deref(b, store_deref_y, nir_imm_int(b, 20), 1);
1182 
1183    nir_deref_instr *store_deref_x =
1184       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 0);
1185    nir_store_deref(b, store_deref_x, nir_imm_int(b, 10), 1);
1186 
1187    /* Both loads below will be dropped, because the values are already known. */
1188    nir_deref_instr *load_deref_y =
1189       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
1190    nir_store_var(b, out0, nir_load_deref(b, load_deref_y), 1);
1191 
1192    nir_store_var(b, out1, nir_load_var(b, vec), 1);
1193 
1194    nir_validate_shader(b->shader, NULL);
1195    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1196    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
1197 
1198    bool progress = nir_opt_copy_prop_vars(b->shader);
1199    EXPECT_TRUE(progress);
1200 
1201    nir_validate_shader(b->shader, NULL);
1202    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
1203    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
1204 
1205    /* Third store will just use the value from first store. */
1206    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1207    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
1208    ASSERT_TRUE(third_store->src[1].is_ssa);
1209    EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
1210 
1211    /* Fourth store will compose first and second store values. */
1212    nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
1213    ASSERT_TRUE(fourth_store->src[1].is_ssa);
1214    EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
1215 }
1216 
TEST_F(nir_copy_prop_vars_test,store_load_indirect_array_deref_on_vector)1217 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
1218 {
1219    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1220    nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
1221    nir_variable *out = create_int(nir_var_mem_ssbo, "out");
1222 
1223    nir_ssa_def *idx_ssa = nir_load_var(b, idx);
1224 
1225    /* Store to vec[idx]. */
1226    nir_deref_instr *store_deref =
1227       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1228    nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
1229 
1230    /* Load from vec[idx] to store in out. This load should be dropped. */
1231    nir_deref_instr *load_deref =
1232       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1233    nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
1234 
1235    nir_validate_shader(b->shader, NULL);
1236    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1237    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1238 
1239    bool progress = nir_opt_copy_prop_vars(b->shader);
1240    EXPECT_TRUE(progress);
1241 
1242    nir_validate_shader(b->shader, NULL);
1243    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1244    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1245 
1246    /* Store to vec[idx] propagated to out. */
1247    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1248    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1249    ASSERT_TRUE(first->src[1].is_ssa);
1250    ASSERT_TRUE(second->src[1].is_ssa);
1251    EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
1252 }
1253 
TEST_F(nir_copy_prop_vars_test,store_load_direct_and_indirect_array_deref_on_vector)1254 TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_vector)
1255 {
1256    nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
1257    nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
1258    nir_variable **out = create_many_int(nir_var_mem_ssbo, "out", 2);
1259 
1260    nir_ssa_def *idx_ssa = nir_load_var(b, idx);
1261 
1262    /* Store to vec. */
1263    nir_store_var(b, vec, nir_imm_ivec2(b, 10, 10), 1 | 2);
1264 
1265    /* Load from vec[idx]. This load is currently not dropped. */
1266    nir_deref_instr *indirect =
1267       nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
1268    nir_store_var(b, out[0], nir_load_deref(b, indirect), 1);
1269 
1270    /* Load from vec[idx] again. This load should be dropped. */
1271    nir_store_var(b, out[1], nir_load_deref(b, indirect), 1);
1272 
1273    nir_validate_shader(b->shader, NULL);
1274    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
1275    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1276 
1277    bool progress = nir_opt_copy_prop_vars(b->shader);
1278    EXPECT_TRUE(progress);
1279 
1280    nir_validate_shader(b->shader, NULL);
1281    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1282    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1283 
1284    /* Store to vec[idx] propagated to out. */
1285    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1286    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1287    ASSERT_TRUE(second->src[1].is_ssa);
1288    ASSERT_TRUE(third->src[1].is_ssa);
1289    EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
1290 }
1291 
TEST_F(nir_copy_prop_vars_test,store_load_indirect_array_deref)1292 TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
1293 {
1294    nir_variable *arr = create_var(nir_var_mem_ssbo,
1295                                   glsl_array_type(glsl_int_type(), 10, 0),
1296                                   "arr");
1297    nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
1298    nir_variable *out = create_int(nir_var_mem_ssbo, "out");
1299 
1300    nir_ssa_def *idx_ssa = nir_load_var(b, idx);
1301 
1302    /* Store to arr[idx]. */
1303    nir_deref_instr *store_deref =
1304       nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
1305    nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
1306 
1307    /* Load from arr[idx] to store in out. This load should be dropped. */
1308    nir_deref_instr *load_deref =
1309       nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
1310    nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
1311 
1312    nir_validate_shader(b->shader, NULL);
1313    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
1314    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1315 
1316    bool progress = nir_opt_copy_prop_vars(b->shader);
1317    EXPECT_TRUE(progress);
1318 
1319    nir_validate_shader(b->shader, NULL);
1320    ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
1321    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
1322 
1323    /* Store to arr[idx] propagated to out. */
1324    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1325    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1326    ASSERT_TRUE(first->src[1].is_ssa);
1327    ASSERT_TRUE(second->src[1].is_ssa);
1328    EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
1329 }
1330 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_block)1331 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_block)
1332 {
1333    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
1334 
1335    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1336 
1337    bool progress = nir_opt_dead_write_vars(b->shader);
1338    ASSERT_FALSE(progress);
1339 }
1340 
TEST_F(nir_dead_write_vars_test,no_dead_writes_different_components_in_block)1341 TEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block)
1342 {
1343    nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
1344 
1345    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1346    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1 << 1);
1347 
1348    bool progress = nir_opt_dead_write_vars(b->shader);
1349    ASSERT_FALSE(progress);
1350 }
1351 
TEST_F(nir_dead_write_vars_test,volatile_write)1352 TEST_F(nir_dead_write_vars_test, volatile_write)
1353 {
1354    nir_variable *v = create_int(nir_var_mem_ssbo, "v");
1355 
1356    nir_store_var(b, v, nir_imm_int(b, 0), 0x1);
1357    nir_store_var_volatile(b, v, nir_imm_int(b, 1), 0x1);
1358    nir_store_var(b, v, nir_imm_int(b, 2), 0x1);
1359 
1360    /* Our approach here is a bit scorched-earth.  We expect the volatile store
1361     * in the middle to cause both that store and the one before it to be kept.
1362     * Technically, volatile only prevents combining the volatile store with
1363     * another store and one could argue that the store before the volatile and
1364     * the one after it could be combined.  However, it seems safer to just
1365     * treat a volatile store like an atomic and prevent any combining across
1366     * it.
1367     */
1368    bool progress = nir_opt_dead_write_vars(b->shader);
1369    ASSERT_FALSE(progress);
1370 }
1371 
TEST_F(nir_dead_write_vars_test,volatile_copies)1372 TEST_F(nir_dead_write_vars_test, volatile_copies)
1373 {
1374    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
1375 
1376    nir_copy_var(b, v[0], v[1]);
1377    nir_copy_deref_with_access(b, nir_build_deref_var(b, v[0]),
1378                                  nir_build_deref_var(b, v[1]),
1379                                  ACCESS_VOLATILE, (gl_access_qualifier)0);
1380    nir_copy_var(b, v[0], v[1]);
1381 
1382    /* Our approach here is a bit scorched-earth.  We expect the volatile store
1383     * in the middle to cause both that store and the one before it to be kept.
1384     * Technically, volatile only prevents combining the volatile store with
1385     * another store and one could argue that the store before the volatile and
1386     * the one after it could be combined.  However, it seems safer to just
1387     * treat a volatile store like an atomic and prevent any combining across
1388     * it.
1389     */
1390    bool progress = nir_opt_dead_write_vars(b->shader);
1391    ASSERT_FALSE(progress);
1392 }
1393 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_if_statement)1394 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
1395 {
1396    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6);
1397 
1398    nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
1399    nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
1400 
1401    /* Each arm of the if statement will overwrite one store. */
1402    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1403    nir_store_var(b, v[2], nir_load_var(b, v[4]), 1);
1404 
1405    nir_push_else(b, if_stmt);
1406    nir_store_var(b, v[3], nir_load_var(b, v[5]), 1);
1407 
1408    nir_pop_if(b, if_stmt);
1409 
1410    bool progress = nir_opt_dead_write_vars(b->shader);
1411    ASSERT_FALSE(progress);
1412 }
1413 
TEST_F(nir_dead_write_vars_test,no_dead_writes_in_loop_statement)1414 TEST_F(nir_dead_write_vars_test, no_dead_writes_in_loop_statement)
1415 {
1416    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
1417 
1418    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1419 
1420    /* Loop will write other value.  Since it might not be executed, it doesn't
1421     * kill the first write.
1422     */
1423    nir_loop *loop = nir_push_loop(b);
1424 
1425    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1426    nir_jump(b, nir_jump_break);
1427    nir_pop_if(b, if_stmt);
1428 
1429    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
1430    nir_pop_loop(b, loop);
1431 
1432    bool progress = nir_opt_dead_write_vars(b->shader);
1433    ASSERT_FALSE(progress);
1434 }
1435 
TEST_F(nir_dead_write_vars_test,dead_write_in_block)1436 TEST_F(nir_dead_write_vars_test, dead_write_in_block)
1437 {
1438    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
1439 
1440    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1441    nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
1442    nir_store_var(b, v[0], load_v2, 1);
1443 
1444    bool progress = nir_opt_dead_write_vars(b->shader);
1445    ASSERT_TRUE(progress);
1446 
1447    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1448 
1449    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1450    ASSERT_TRUE(store->src[1].is_ssa);
1451    EXPECT_EQ(store->src[1].ssa, load_v2);
1452 }
1453 
TEST_F(nir_dead_write_vars_test,dead_write_components_in_block)1454 TEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
1455 {
1456    nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
1457 
1458    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1459    nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
1460    nir_store_var(b, v[0], load_v2, 1 << 0);
1461 
1462    bool progress = nir_opt_dead_write_vars(b->shader);
1463    ASSERT_TRUE(progress);
1464 
1465    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1466 
1467    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1468    ASSERT_TRUE(store->src[1].is_ssa);
1469    EXPECT_EQ(store->src[1].ssa, load_v2);
1470 }
1471 
1472 
1473 /* TODO: The DISABLED tests below depend on the dead write removal be able to
1474  * identify dead writes between multiple blocks.  This is still not
1475  * implemented.
1476  */
1477 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_write_in_two_blocks)1478 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
1479 {
1480    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
1481 
1482    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1483    nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
1484 
1485    /* Causes the stores to be in different blocks. */
1486    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1487 
1488    nir_store_var(b, v[0], load_v2, 1);
1489 
1490    bool progress = nir_opt_dead_write_vars(b->shader);
1491    ASSERT_TRUE(progress);
1492 
1493    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1494 
1495    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1496    ASSERT_TRUE(store->src[1].is_ssa);
1497    EXPECT_EQ(store->src[1].ssa, load_v2);
1498 }
1499 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_write_components_in_two_blocks)1500 TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
1501 {
1502    nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
1503 
1504    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
1505 
1506    /* Causes the stores to be in different blocks. */
1507    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1508 
1509    nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
1510    nir_store_var(b, v[0], load_v2, 1 << 0);
1511 
1512    bool progress = nir_opt_dead_write_vars(b->shader);
1513    ASSERT_TRUE(progress);
1514 
1515    EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
1516 
1517    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
1518    ASSERT_TRUE(store->src[1].is_ssa);
1519    EXPECT_EQ(store->src[1].ssa, load_v2);
1520 }
1521 
TEST_F(nir_dead_write_vars_test,DISABLED_dead_writes_in_if_statement)1522 TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
1523 {
1524    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
1525 
1526    /* Both branches will overwrite, making the previous store dead. */
1527    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1528 
1529    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
1530    nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
1531    nir_store_var(b, v[0], load_v2, 1);
1532 
1533    nir_push_else(b, if_stmt);
1534    nir_ssa_def *load_v3 = nir_load_var(b, v[3]);
1535    nir_store_var(b, v[0], load_v3, 1);
1536 
1537    nir_pop_if(b, if_stmt);
1538 
1539    bool progress = nir_opt_dead_write_vars(b->shader);
1540    ASSERT_TRUE(progress);
1541    EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
1542 
1543    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1544    ASSERT_TRUE(first_store->src[1].is_ssa);
1545    EXPECT_EQ(first_store->src[1].ssa, load_v2);
1546 
1547    nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
1548    ASSERT_TRUE(second_store->src[1].is_ssa);
1549    EXPECT_EQ(second_store->src[1].ssa, load_v3);
1550 }
1551 
TEST_F(nir_dead_write_vars_test,DISABLED_memory_barrier_in_two_blocks)1552 TEST_F(nir_dead_write_vars_test, DISABLED_memory_barrier_in_two_blocks)
1553 {
1554    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
1555 
1556    nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
1557    nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
1558 
1559    /* Split into many blocks. */
1560    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1561 
1562    /* Because it is before the barrier, this will kill the previous store to that target. */
1563    nir_store_var(b, v[0], nir_imm_int(b, 3), 1);
1564 
1565    nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
1566                              nir_var_mem_ssbo);
1567 
1568    nir_store_var(b, v[1], nir_imm_int(b, 4), 1);
1569 
1570    bool progress = nir_opt_dead_write_vars(b->shader);
1571    ASSERT_TRUE(progress);
1572 
1573    EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
1574 }
1575 
TEST_F(nir_dead_write_vars_test,DISABLED_unrelated_barrier_in_two_blocks)1576 TEST_F(nir_dead_write_vars_test, DISABLED_unrelated_barrier_in_two_blocks)
1577 {
1578    nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
1579    nir_variable *out = create_int(nir_var_shader_out, "out");
1580 
1581    nir_store_var(b, out, nir_load_var(b, v[1]), 1);
1582    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
1583 
1584    /* Split into many blocks. */
1585    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
1586 
1587    /* Emit vertex will ensure writes to output variables are considered used,
1588     * but should not affect other types of variables. */
1589 
1590    nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_emit_vertex)->instr);
1591 
1592    nir_store_var(b, out, nir_load_var(b, v[2]), 1);
1593    nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
1594 
1595    bool progress = nir_opt_dead_write_vars(b->shader);
1596    ASSERT_TRUE(progress);
1597 
1598    /* Verify the first write to v[0] was removed. */
1599    EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
1600 
1601    nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
1602    EXPECT_EQ(nir_intrinsic_get_var(first_store, 0), out);
1603 
1604    nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
1605    EXPECT_EQ(nir_intrinsic_get_var(second_store, 0), out);
1606 
1607    nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
1608    EXPECT_EQ(nir_intrinsic_get_var(third_store, 0), v[0]);
1609 }
1610 
TEST_F(nir_combine_stores_test,non_overlapping_stores)1611 TEST_F(nir_combine_stores_test, non_overlapping_stores)
1612 {
1613    nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "v", 4);
1614    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1615 
1616    for (int i = 0; i < 4; i++)
1617       nir_store_var(b, out, nir_load_var(b, v[i]), 1 << i);
1618 
1619    nir_validate_shader(b->shader, NULL);
1620 
1621    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1622    ASSERT_TRUE(progress);
1623 
1624    nir_validate_shader(b->shader, NULL);
1625 
1626    /* Clean up to verify from where the values in combined store are coming. */
1627    nir_copy_prop(b->shader);
1628    nir_opt_dce(b->shader);
1629 
1630    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1631    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1632    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1633    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1634 
1635    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1636    ASSERT_TRUE(vec);
1637    for (int i = 0; i < 4; i++) {
1638       nir_intrinsic_instr *load = nir_src_as_intrinsic(vec->src[i].src);
1639       ASSERT_EQ(load->intrinsic, nir_intrinsic_load_deref);
1640       ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[i])
1641          << "Source value for component " << i << " of store is wrong";
1642       ASSERT_EQ(vec->src[i].swizzle[0], i)
1643          << "Source component for component " << i << " of store is wrong";
1644    }
1645 }
1646 
TEST_F(nir_combine_stores_test,overlapping_stores)1647 TEST_F(nir_combine_stores_test, overlapping_stores)
1648 {
1649    nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "v", 3);
1650    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1651 
1652    /* Make stores with xy, yz and zw masks. */
1653    for (int i = 0; i < 3; i++) {
1654       nir_component_mask_t mask = (1 << i) | (1 << (i + 1));
1655       nir_store_var(b, out, nir_load_var(b, v[i]), mask);
1656    }
1657 
1658    nir_validate_shader(b->shader, NULL);
1659 
1660    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1661    ASSERT_TRUE(progress);
1662 
1663    nir_validate_shader(b->shader, NULL);
1664 
1665    /* Clean up to verify from where the values in combined store are coming. */
1666    nir_copy_prop(b->shader);
1667    nir_opt_dce(b->shader);
1668 
1669    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1670    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1671    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1672    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1673 
1674    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1675    ASSERT_TRUE(vec);
1676 
1677    /* Component x comes from v[0]. */
1678    nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1679    ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1680    ASSERT_EQ(vec->src[0].swizzle[0], 0);
1681 
1682    /* Component y comes from v[1]. */
1683    nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1684    ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1685    ASSERT_EQ(vec->src[1].swizzle[0], 1);
1686 
1687    /* Components z and w come from v[2]. */
1688    nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1689    nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1690    ASSERT_EQ(load_for_z, load_for_w);
1691    ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), v[2]);
1692    ASSERT_EQ(vec->src[2].swizzle[0], 2);
1693    ASSERT_EQ(vec->src[3].swizzle[0], 3);
1694 }
1695 
TEST_F(nir_combine_stores_test,direct_array_derefs)1696 TEST_F(nir_combine_stores_test, direct_array_derefs)
1697 {
1698    nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "vec", 2);
1699    nir_variable **s = create_many_int(nir_var_mem_ssbo, "scalar", 2);
1700    nir_variable *out = create_ivec4(nir_var_mem_ssbo, "out");
1701 
1702    nir_deref_instr *out_deref = nir_build_deref_var(b, out);
1703 
1704    /* Store to vector with mask x. */
1705    nir_store_deref(b, out_deref, nir_load_var(b, v[0]),
1706                    1 << 0);
1707 
1708    /* Store to vector with mask yz. */
1709    nir_store_deref(b, out_deref, nir_load_var(b, v[1]),
1710                    (1 << 2) | (1 << 1));
1711 
1712    /* Store to vector[2], overlapping with previous store. */
1713    nir_store_deref(b,
1714                    nir_build_deref_array_imm(b, out_deref, 2),
1715                    nir_load_var(b, s[0]),
1716                    1 << 0);
1717 
1718    /* Store to vector[3], no overlap. */
1719    nir_store_deref(b,
1720                    nir_build_deref_array_imm(b, out_deref, 3),
1721                    nir_load_var(b, s[1]),
1722                    1 << 0);
1723 
1724    nir_validate_shader(b->shader, NULL);
1725 
1726    bool progress = nir_opt_combine_stores(b->shader, nir_var_mem_ssbo);
1727    ASSERT_TRUE(progress);
1728 
1729    nir_validate_shader(b->shader, NULL);
1730 
1731    /* Clean up to verify from where the values in combined store are coming. */
1732    nir_copy_prop(b->shader);
1733    nir_opt_dce(b->shader);
1734 
1735    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1736    nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1737    ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1738    ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1739 
1740    nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1741    ASSERT_TRUE(vec);
1742 
1743    /* Component x comes from v[0]. */
1744    nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1745    ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1746    ASSERT_EQ(vec->src[0].swizzle[0], 0);
1747 
1748    /* Component y comes from v[1]. */
1749    nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1750    ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1751    ASSERT_EQ(vec->src[1].swizzle[0], 1);
1752 
1753    /* Components z comes from s[0]. */
1754    nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1755    ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), s[0]);
1756    ASSERT_EQ(vec->src[2].swizzle[0], 0);
1757 
1758    /* Component w comes from s[1]. */
1759    nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1760    ASSERT_EQ(nir_intrinsic_get_var(load_for_w, 0), s[1]);
1761    ASSERT_EQ(vec->src[3].swizzle[0], 0);
1762 }
1763 
1764 static int64_t
vec_src_comp_as_int(nir_src src,unsigned comp)1765 vec_src_comp_as_int(nir_src src, unsigned comp)
1766 {
1767    if (nir_src_is_const(src))
1768       return nir_src_comp_as_int(src, comp);
1769 
1770    assert(src.is_ssa);
1771    nir_ssa_scalar s = { src.ssa, comp };
1772    assert(nir_op_is_vec(nir_ssa_scalar_alu_op(s)));
1773    return nir_ssa_scalar_as_int(nir_ssa_scalar_chase_alu_src(s, comp));
1774 }
1775 
TEST_F(nir_combine_stores_test,store_volatile)1776 TEST_F(nir_combine_stores_test, store_volatile)
1777 {
1778    nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1779 
1780    nir_store_var(b, out, nir_imm_ivec4(b, 0, 0, 0, 0), 1 << 0);
1781    nir_store_var(b, out, nir_imm_ivec4(b, 1, 1, 1, 1), 1 << 1);
1782    nir_store_var_volatile(b, out, nir_imm_ivec4(b, -1, -2, -3, -4), 0xf);
1783    nir_store_var(b, out, nir_imm_ivec4(b, 2, 2, 2, 2), 1 << 2);
1784    nir_store_var(b, out, nir_imm_ivec4(b, 3, 3, 3, 3), 1 << 3);
1785 
1786    nir_validate_shader(b->shader, NULL);
1787 
1788    bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1789    ASSERT_TRUE(progress);
1790 
1791    nir_validate_shader(b->shader, NULL);
1792 
1793    /* Clean up the stored values */
1794    nir_opt_constant_folding(b->shader);
1795    nir_opt_dce(b->shader);
1796 
1797    ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
1798 
1799    nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
1800    ASSERT_EQ(nir_intrinsic_write_mask(first), 0x3);
1801    ASSERT_EQ(vec_src_comp_as_int(first->src[1], 0), 0);
1802    ASSERT_EQ(vec_src_comp_as_int(first->src[1], 1), 1);
1803 
1804    nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
1805    ASSERT_EQ(nir_intrinsic_write_mask(second), 0xf);
1806    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 0), -1);
1807    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 1), -2);
1808    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 2), -3);
1809    ASSERT_EQ(vec_src_comp_as_int(second->src[1], 3), -4);
1810 
1811    nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
1812    ASSERT_EQ(nir_intrinsic_write_mask(third), 0xc);
1813    ASSERT_EQ(vec_src_comp_as_int(third->src[1], 2), 2);
1814    ASSERT_EQ(vec_src_comp_as_int(third->src[1], 3), 3);
1815 }
1816 
TEST_F(nir_split_vars_test,simple_split)1817 TEST_F(nir_split_vars_test, simple_split)
1818 {
1819    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1820    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
1821                                    "temp");
1822    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1823    for (int i = 0; i < 4; i++)
1824       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
1825 
1826    nir_validate_shader(b->shader, NULL);
1827    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
1828    ASSERT_EQ(count_function_temp_vars(), 1);
1829 
1830    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1831    EXPECT_TRUE(progress);
1832 
1833    nir_validate_shader(b->shader, NULL);
1834    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
1835    ASSERT_EQ(count_function_temp_vars(), 4);
1836 }
1837 
TEST_F(nir_split_vars_test,simple_no_split_array_struct)1838 TEST_F(nir_split_vars_test, simple_no_split_array_struct)
1839 {
1840    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1841    struct glsl_struct_field field;
1842 
1843    field.type = glsl_float_type();
1844    field.name = ralloc_asprintf(b, "field1");
1845    field.location = -1;
1846    field.offset = 0;
1847 
1848    const struct glsl_type *st_type = glsl_struct_type(&field, 1, "struct", false);
1849    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(st_type, 4, 0),
1850                                    "temp");
1851 
1852    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0), "temp2");
1853 
1854    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1855    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
1856    for (int i = 0; i < 4; i++)
1857       nir_store_deref(b, nir_build_deref_array_imm(b, temp2_deref, i), nir_load_var(b, in[i]), 1);
1858 
1859    for (int i = 0; i < 4; i++)
1860       nir_store_deref(b, nir_build_deref_struct(b, nir_build_deref_array_imm(b, temp_deref, i), 0), nir_load_var(b, in[i]), 1);
1861 
1862    nir_validate_shader(b->shader, NULL);
1863    ASSERT_EQ(count_derefs(nir_deref_type_array), 8);
1864    ASSERT_EQ(count_derefs(nir_deref_type_struct), 4);
1865    ASSERT_EQ(count_function_temp_vars(), 2);
1866 
1867    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1868    EXPECT_TRUE(progress);
1869 
1870    nir_validate_shader(b->shader, NULL);
1871 
1872    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
1873    ASSERT_EQ(count_derefs(nir_deref_type_struct), 4);
1874    for (int i = 0; i < 4; i++) {
1875       nir_deref_instr *deref = get_deref(nir_deref_type_array, i);
1876       ASSERT_TRUE(deref);
1877       ASSERT_TRUE(glsl_type_is_struct(deref->type));
1878    }
1879 
1880    ASSERT_EQ(count_function_temp_vars(), 5);
1881 }
1882 
TEST_F(nir_split_vars_test,simple_split_shader_temp)1883 TEST_F(nir_split_vars_test, simple_split_shader_temp)
1884 {
1885    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1886    nir_variable *temp = create_var(nir_var_shader_temp, glsl_array_type(glsl_int_type(), 4, 0),
1887                                    "temp");
1888    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1889 
1890    for (int i = 0; i < 4; i++)
1891       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
1892 
1893    nir_validate_shader(b->shader, NULL);
1894    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
1895    ASSERT_EQ(count_shader_temp_vars(), 1);
1896 
1897    bool progress = nir_split_array_vars(b->shader, nir_var_shader_temp);
1898    EXPECT_TRUE(progress);
1899 
1900    nir_validate_shader(b->shader, NULL);
1901    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
1902    ASSERT_EQ(count_shader_temp_vars(), 4);
1903 }
1904 
TEST_F(nir_split_vars_test,simple_oob)1905 TEST_F(nir_split_vars_test, simple_oob)
1906 {
1907    nir_variable **in = create_many_int(nir_var_shader_in, "in", 6);
1908    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
1909                                    "temp");
1910    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1911 
1912    for (int i = 0; i < 6; i++)
1913       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
1914 
1915    nir_validate_shader(b->shader, NULL);
1916    ASSERT_EQ(count_derefs(nir_deref_type_array), 6);
1917    ASSERT_EQ(count_function_temp_vars(), 1);
1918 
1919    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1920    EXPECT_TRUE(progress);
1921 
1922    nir_validate_shader(b->shader, NULL);
1923    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
1924    ASSERT_EQ(count_function_temp_vars(), 4);
1925 }
1926 
TEST_F(nir_split_vars_test,simple_unused)1927 TEST_F(nir_split_vars_test, simple_unused)
1928 {
1929    nir_variable **in = create_many_int(nir_var_shader_in, "in", 2);
1930    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
1931                                    "temp");
1932    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1933 
1934    for (int i = 0; i < 2; i++)
1935       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
1936 
1937    nir_validate_shader(b->shader, NULL);
1938    ASSERT_EQ(count_derefs(nir_deref_type_array), 2);
1939    ASSERT_EQ(count_function_temp_vars(), 1);
1940 
1941    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1942    EXPECT_TRUE(progress);
1943 
1944    nir_validate_shader(b->shader, NULL);
1945    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
1946    /* this pass doesn't remove the unused ones */
1947    ASSERT_EQ(count_function_temp_vars(), 4);
1948 }
1949 
TEST_F(nir_split_vars_test,two_level_split)1950 TEST_F(nir_split_vars_test, two_level_split)
1951 {
1952    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1953    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 4, 0), 4, 0),
1954                                    "temp");
1955    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1956    for (int i = 0; i < 4; i++) {
1957       nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
1958       for (int j = 0; j < 4; j++) {
1959          nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j);
1960          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
1961       }
1962    }
1963 
1964    nir_validate_shader(b->shader, NULL);
1965    ASSERT_EQ(count_derefs(nir_deref_type_array), 20);
1966    ASSERT_EQ(count_function_temp_vars(), 1);
1967 
1968    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1969    EXPECT_TRUE(progress);
1970 
1971    nir_validate_shader(b->shader, NULL);
1972    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
1973    ASSERT_EQ(count_function_temp_vars(), 16);
1974 }
1975 
TEST_F(nir_split_vars_test,simple_dont_split)1976 TEST_F(nir_split_vars_test, simple_dont_split)
1977 {
1978    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
1979    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
1980                                    "temp");
1981    nir_variable *ind = create_int(nir_var_shader_in, "ind");
1982 
1983    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
1984    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
1985 
1986    for (int i = 0; i < 4; i++)
1987       nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa), nir_load_var(b, in[i]), 1);
1988 
1989    nir_validate_shader(b->shader, NULL);
1990    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
1991    ASSERT_EQ(count_function_temp_vars(), 1);
1992 
1993    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
1994    EXPECT_FALSE(progress);
1995 
1996    nir_validate_shader(b->shader, NULL);
1997    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
1998    ASSERT_EQ(count_function_temp_vars(), 1);
1999 }
2000 
TEST_F(nir_split_vars_test,twolevel_dont_split_lvl_0)2001 TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_0)
2002 {
2003    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2004    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 6, 0), 4, 0),
2005                                    "temp");
2006    nir_variable *ind = create_int(nir_var_shader_in, "ind");
2007 
2008    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
2009    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2010 
2011    for (int i = 0; i < 4; i++) {
2012       nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa);
2013       for (int j = 0; j < 6; j++) {
2014          nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j);
2015          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
2016       }
2017    }
2018 
2019    nir_validate_shader(b->shader, NULL);
2020    ASSERT_EQ(count_derefs(nir_deref_type_array), 28);
2021    ASSERT_EQ(count_function_temp_vars(), 1);
2022 
2023    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2024    EXPECT_TRUE(progress);
2025 
2026    nir_validate_shader(b->shader, NULL);
2027    ASSERT_EQ(count_derefs(nir_deref_type_array), 24);
2028    ASSERT_EQ(count_function_temp_vars(), 6);
2029 }
2030 
TEST_F(nir_split_vars_test,twolevel_dont_split_lvl_1)2031 TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_1)
2032 {
2033    nir_variable **in = create_many_int(nir_var_shader_in, "in", 6);
2034    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_array_type(glsl_int_type(), 6, 0), 4, 0),
2035                                    "temp");
2036    nir_variable *ind = create_int(nir_var_shader_in, "ind");
2037 
2038    nir_deref_instr *ind_deref = nir_build_deref_var(b, ind);
2039    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2040 
2041    for (int i = 0; i < 4; i++) {
2042       nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
2043       for (int j = 0; j < 6; j++) {
2044          /* just add the inner index to get some different derefs */
2045          nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd(b, &ind_deref->dest.ssa, nir_imm_int(b, j)));
2046          nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
2047       }
2048    }
2049 
2050    nir_validate_shader(b->shader, NULL);
2051    ASSERT_EQ(count_derefs(nir_deref_type_array), 28);
2052    ASSERT_EQ(count_function_temp_vars(), 1);
2053 
2054    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2055    EXPECT_TRUE(progress);
2056 
2057    nir_validate_shader(b->shader, NULL);
2058    ASSERT_EQ(count_derefs(nir_deref_type_array), 24);
2059    ASSERT_EQ(count_function_temp_vars(), 4);
2060 }
2061 
TEST_F(nir_split_vars_test,split_multiple_store)2062 TEST_F(nir_split_vars_test, split_multiple_store)
2063 {
2064    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2065    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2066                                    "temp");
2067    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2068                                     "temp2");
2069 
2070    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2071    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2072 
2073    for (int i = 0; i < 4; i++)
2074       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2075 
2076    for (int i = 0; i < 4; i++)
2077       nir_store_deref(b, nir_build_deref_array_imm(b, temp2_deref, i), nir_load_var(b, in[i]), 1);
2078 
2079    nir_validate_shader(b->shader, NULL);
2080    ASSERT_EQ(count_derefs(nir_deref_type_array), 8);
2081    ASSERT_EQ(count_function_temp_vars(), 2);
2082 
2083    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2084    EXPECT_TRUE(progress);
2085 
2086    nir_validate_shader(b->shader, NULL);
2087    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2088    ASSERT_EQ(count_function_temp_vars(), 8);
2089 }
2090 
TEST_F(nir_split_vars_test,split_load_store)2091 TEST_F(nir_split_vars_test, split_load_store)
2092 {
2093    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2094    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2095                                    "temp");
2096    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2097                                     "temp2");
2098 
2099    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2100    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2101 
2102    for (int i = 0; i < 4; i++)
2103       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2104 
2105    for (int i = 0; i < 4; i++) {
2106       nir_deref_instr *store_deref = nir_build_deref_array_imm(b, temp2_deref, i);
2107       nir_deref_instr *load_deref = nir_build_deref_array_imm(b, temp_deref, i);
2108       nir_store_deref(b, store_deref, nir_load_deref(b, load_deref), 1);
2109    }
2110 
2111    nir_validate_shader(b->shader, NULL);
2112    ASSERT_EQ(count_derefs(nir_deref_type_array), 12);
2113    ASSERT_EQ(count_function_temp_vars(), 2);
2114 
2115    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2116    EXPECT_TRUE(progress);
2117 
2118    nir_validate_shader(b->shader, NULL);
2119    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2120    ASSERT_EQ(count_function_temp_vars(), 8);
2121 }
2122 
TEST_F(nir_split_vars_test,split_copy)2123 TEST_F(nir_split_vars_test, split_copy)
2124 {
2125    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2126    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2127                                    "temp");
2128    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2129                                     "temp2");
2130 
2131    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2132    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2133 
2134    for (int i = 0; i < 4; i++)
2135       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2136 
2137    for (int i = 0; i < 4; i++) {
2138       nir_deref_instr *store_deref = nir_build_deref_array_imm(b, temp2_deref, i);
2139       nir_deref_instr *load_deref = nir_build_deref_array_imm(b, temp_deref, i);
2140       nir_copy_deref(b, store_deref, load_deref);
2141    }
2142 
2143    nir_validate_shader(b->shader, NULL);
2144    ASSERT_EQ(count_derefs(nir_deref_type_array), 12);
2145    ASSERT_EQ(count_function_temp_vars(), 2);
2146 
2147    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2148    EXPECT_TRUE(progress);
2149 
2150    nir_validate_shader(b->shader, NULL);
2151    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2152    ASSERT_EQ(count_function_temp_vars(), 8);
2153 }
2154 
TEST_F(nir_split_vars_test,split_wildcard_copy)2155 TEST_F(nir_split_vars_test, split_wildcard_copy)
2156 {
2157    nir_variable **in = create_many_int(nir_var_shader_in, "in", 4);
2158    nir_variable *temp = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2159                                    "temp");
2160    nir_variable *temp2 = create_var(nir_var_function_temp, glsl_array_type(glsl_int_type(), 4, 0),
2161                                     "temp2");
2162 
2163    nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
2164    nir_deref_instr *temp2_deref = nir_build_deref_var(b, temp2);
2165 
2166    for (int i = 0; i < 4; i++)
2167       nir_store_deref(b, nir_build_deref_array_imm(b, temp_deref, i), nir_load_var(b, in[i]), 1);
2168 
2169    nir_deref_instr *src_wildcard = nir_build_deref_array_wildcard(b, temp_deref);
2170    nir_deref_instr *dst_wildcard = nir_build_deref_array_wildcard(b, temp2_deref);
2171 
2172    nir_copy_deref(b, dst_wildcard, src_wildcard);
2173 
2174    nir_validate_shader(b->shader, NULL);
2175    ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
2176    ASSERT_EQ(count_derefs(nir_deref_type_array_wildcard), 2);
2177    ASSERT_EQ(count_function_temp_vars(), 2);
2178    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 1);
2179 
2180    bool progress = nir_split_array_vars(b->shader, nir_var_function_temp);
2181    EXPECT_TRUE(progress);
2182 
2183    nir_validate_shader(b->shader, NULL);
2184    ASSERT_EQ(count_derefs(nir_deref_type_array), 0);
2185    ASSERT_EQ(count_derefs(nir_deref_type_array_wildcard), 0);
2186    ASSERT_EQ(count_function_temp_vars(), 8);
2187    ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 4);
2188 }
2189 
TEST_F(nir_remove_dead_variables_test,pointer_initializer_used)2190 TEST_F(nir_remove_dead_variables_test, pointer_initializer_used)
2191 {
2192    nir_variable *x = create_int(nir_var_shader_temp, "x");
2193    nir_variable *y = create_int(nir_var_shader_temp, "y");
2194    y->pointer_initializer = x;
2195    nir_variable *out = create_int(nir_var_shader_out, "out");
2196 
2197    nir_validate_shader(b->shader, NULL);
2198 
2199    nir_copy_var(b, out, y);
2200 
2201    bool progress = nir_remove_dead_variables(b->shader, nir_var_all, NULL);
2202    EXPECT_FALSE(progress);
2203 
2204    nir_validate_shader(b->shader, NULL);
2205 
2206    unsigned count = 0;
2207    nir_foreach_variable_in_shader(var, b->shader)
2208       count++;
2209 
2210    ASSERT_EQ(count, 3);
2211 }
2212 
TEST_F(nir_remove_dead_variables_test,pointer_initializer_dead)2213 TEST_F(nir_remove_dead_variables_test, pointer_initializer_dead)
2214 {
2215    nir_variable *x = create_int(nir_var_shader_temp, "x");
2216    nir_variable *y = create_int(nir_var_shader_temp, "y");
2217    nir_variable *z = create_int(nir_var_shader_temp, "z");
2218    y->pointer_initializer = x;
2219    z->pointer_initializer = y;
2220 
2221    nir_validate_shader(b->shader, NULL);
2222 
2223    bool progress = nir_remove_dead_variables(b->shader, nir_var_all, NULL);
2224    EXPECT_TRUE(progress);
2225 
2226    nir_validate_shader(b->shader, NULL);
2227 
2228    unsigned count = 0;
2229    nir_foreach_variable_in_shader(var, b->shader)
2230       count++;
2231 
2232    ASSERT_EQ(count, 0);
2233 }
2234 
2235 
2236