1 /*
2 * Copyright © 2020 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir_builder.h"
26
27 static bool
lower_ubo_load_instr(nir_builder * b,nir_instr * instr,UNUSED void * _data)28 lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
29 {
30 if (instr->type != nir_instr_type_intrinsic)
31 return false;
32
33 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
34 if (load->intrinsic != nir_intrinsic_load_global_constant_offset &&
35 load->intrinsic != nir_intrinsic_load_global_constant_bounded)
36 return false;
37
38 b->cursor = nir_before_instr(instr);
39
40 nir_ssa_def *base_addr = load->src[0].ssa;
41 nir_ssa_def *bound = NULL;
42 if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)
43 bound = load->src[2].ssa;
44
45 unsigned bit_size = load->dest.ssa.bit_size;
46 assert(bit_size >= 8 && bit_size % 8 == 0);
47 unsigned byte_size = bit_size / 8;
48
49 nir_ssa_def *val;
50 if (nir_src_is_const(load->src[1])) {
51 uint32_t offset = nir_src_as_uint(load->src[1]);
52
53 /* Things should be component-aligned. */
54 assert(offset % byte_size == 0);
55
56 assert(ANV_UBO_ALIGNMENT == 64);
57
58 unsigned suboffset = offset % 64;
59 uint64_t aligned_offset = offset - suboffset;
60
61 /* Load two just in case we go over a 64B boundary */
62 nir_ssa_def *data[2];
63 for (unsigned i = 0; i < 2; i++) {
64 nir_ssa_def *pred;
65 if (bound) {
66 pred = nir_ilt(b, nir_imm_int(b, aligned_offset + i * 64 + 63),
67 bound);
68 } else {
69 pred = nir_imm_true(b);
70 }
71
72 nir_ssa_def *addr = nir_iadd_imm(b, base_addr,
73 aligned_offset + i * 64);
74
75 data[i] = nir_load_global_const_block_intel(b, 16, addr, pred);
76 }
77
78 val = nir_extract_bits(b, data, 2, suboffset * 8,
79 load->num_components, bit_size);
80 } else {
81 nir_ssa_def *offset = load->src[1].ssa;
82 nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));
83
84 if (bound) {
85 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
86
87 unsigned load_size = byte_size * load->num_components;
88 nir_ssa_def *in_bounds =
89 nir_ilt(b, nir_iadd_imm(b, offset, load_size - 1), bound);
90
91 nir_push_if(b, in_bounds);
92
93 nir_ssa_def *load_val =
94 nir_build_load_global_constant(b, load->dest.ssa.num_components,
95 load->dest.ssa.bit_size, addr,
96 .access = nir_intrinsic_access(load),
97 .align_mul = nir_intrinsic_align_mul(load),
98 .align_offset = nir_intrinsic_align_offset(load));
99
100 nir_pop_if(b, NULL);
101
102 val = nir_if_phi(b, load_val, zero);
103 } else {
104 val = nir_build_load_global_constant(b, load->dest.ssa.num_components,
105 load->dest.ssa.bit_size, addr,
106 .access = nir_intrinsic_access(load),
107 .align_mul = nir_intrinsic_align_mul(load),
108 .align_offset = nir_intrinsic_align_offset(load));
109 }
110 }
111
112 nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
113 nir_instr_remove(&load->instr);
114
115 return true;
116 }
117
118 bool
anv_nir_lower_ubo_loads(nir_shader * shader)119 anv_nir_lower_ubo_loads(nir_shader *shader)
120 {
121 return nir_shader_instructions_pass(shader, lower_ubo_load_instr,
122 nir_metadata_none,
123 NULL);
124 }
125