1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler/spirv/nir_spirv.h"
25 #include "nir/nir.h"
26 #include "nir/nir_schedule.h"
27 #include "rogue_nir.h"
28 #include "rogue_operand.h"
29
30 /**
31 * \file rogue_nir.c
32 *
33 * \brief Contains NIR-specific functions.
34 */
35
36 /**
37 * \brief SPIR-V to NIR compilation options.
38 */
39 static const struct spirv_to_nir_options spirv_options = {
40 .environment = NIR_SPIRV_VULKAN,
41
42 /* Buffer address: (descriptor_set, binding), offset. */
43 .ubo_addr_format = nir_address_format_vec2_index_32bit_offset,
44 };
45
46 static const nir_shader_compiler_options nir_options = {
47 .lower_fsat = true,
48 .fuse_ffma32 = true,
49 };
50
51 const struct spirv_to_nir_options *
rogue_get_spirv_options(const struct rogue_compiler * compiler)52 rogue_get_spirv_options(const struct rogue_compiler *compiler)
53 {
54 return &spirv_options;
55 }
56
57 const nir_shader_compiler_options *
rogue_get_compiler_options(const struct rogue_compiler * compiler)58 rogue_get_compiler_options(const struct rogue_compiler *compiler)
59 {
60 return &nir_options;
61 }
62
rogue_glsl_type_size(const struct glsl_type * type,bool bindless)63 static int rogue_glsl_type_size(const struct glsl_type *type, bool bindless)
64 {
65 return glsl_count_attribute_slots(type, false);
66 }
67
68 /**
69 * \brief Applies optimizations and passes required to lower the NIR shader into
70 * a form suitable for lowering to Rogue IR.
71 *
72 * \param[in] ctx Shared multi-stage build context.
73 * \param[in] shader Rogue shader.
74 * \param[in] stage Shader stage.
75 * \return true if successful, otherwise false.
76 */
rogue_nir_passes(struct rogue_build_ctx * ctx,nir_shader * nir,gl_shader_stage stage)77 bool rogue_nir_passes(struct rogue_build_ctx *ctx,
78 nir_shader *nir,
79 gl_shader_stage stage)
80 {
81 bool progress;
82
83 nir_validate_shader(nir, "after spirv_to_nir");
84
85 /* Splitting. */
86 NIR_PASS_V(nir, nir_split_var_copies);
87 NIR_PASS_V(nir, nir_split_per_member_structs);
88
89 /* Ensure fs outputs are in the [0.0f...1.0f] range. */
90 NIR_PASS_V(nir, nir_lower_clamp_color_outputs);
91
92 /* Replace references to I/O variables with intrinsics. */
93 NIR_PASS_V(nir,
94 nir_lower_io,
95 nir_var_shader_in | nir_var_shader_out,
96 rogue_glsl_type_size,
97 (nir_lower_io_options)0);
98
99 /* Load inputs to scalars (single registers later). */
100 NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_shader_in);
101
102 /* Optimize GL access qualifiers. */
103 const nir_opt_access_options opt_access_options = {
104 .is_vulkan = true,
105 .infer_non_readable = true,
106 };
107 NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
108
109 /* Apply PFO code to the fragment shader output. */
110 if (nir->info.stage == MESA_SHADER_FRAGMENT)
111 NIR_PASS_V(nir, rogue_nir_pfo);
112
113 /* Load outputs to scalars (single registers later). */
114 NIR_PASS_V(nir, nir_lower_io_to_scalar, nir_var_shader_out);
115
116 /* Lower ALU operations to scalars. */
117 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
118
119 /* Algebraic opts. */
120 do {
121 progress = false;
122
123 NIR_PASS(progress, nir, nir_copy_prop);
124 NIR_PASS(progress, nir, nir_opt_cse);
125 NIR_PASS(progress, nir, nir_opt_algebraic);
126 NIR_PASS(progress, nir, nir_opt_constant_folding);
127 NIR_PASS(progress, nir, nir_opt_dce);
128 NIR_PASS_V(nir, nir_opt_gcm, false);
129 } while (progress);
130
131 /* Additional I/O lowering. */
132 NIR_PASS_V(nir,
133 nir_lower_explicit_io,
134 nir_var_mem_ubo,
135 spirv_options.ubo_addr_format);
136 NIR_PASS_V(nir, rogue_nir_lower_io, NULL);
137
138 /* Late algebraic opts. */
139 do {
140 progress = false;
141
142 NIR_PASS(progress, nir, nir_opt_algebraic_late);
143 NIR_PASS_V(nir, nir_opt_constant_folding);
144 NIR_PASS_V(nir, nir_copy_prop);
145 NIR_PASS_V(nir, nir_opt_dce);
146 NIR_PASS_V(nir, nir_opt_cse);
147 } while (progress);
148
149 /* Replace SSA constant references with a register that loads the value. */
150 NIR_PASS_V(nir, rogue_nir_constreg);
151 /* Remove unused constant registers. */
152 NIR_PASS_V(nir, nir_opt_dce);
153
154 /* Move loads to just before they're needed. */
155 NIR_PASS_V(nir, nir_opt_move, nir_move_load_ubo | nir_move_load_input);
156
157 /* Convert vecNs to movs so we can sequentially allocate them later. */
158 NIR_PASS_V(nir, nir_lower_vec_to_movs, NULL, NULL);
159
160 /* Out of SSA pass. */
161 NIR_PASS_V(nir, nir_convert_from_ssa, false);
162
163 /* TODO: Re-enable scheduling after register pressure tweaks. */
164 #if 0
165 /* Instruction scheduling. */
166 struct nir_schedule_options schedule_options = {
167 .threshold = ROGUE_MAX_REG_TEMP / 2,
168 };
169 NIR_PASS_V(nir, nir_schedule, &schedule_options);
170 #endif
171
172 /* Assign I/O locations. */
173 nir_assign_io_var_locations(nir,
174 nir_var_shader_in,
175 &nir->num_inputs,
176 nir->info.stage);
177 nir_assign_io_var_locations(nir,
178 nir_var_shader_out,
179 &nir->num_outputs,
180 nir->info.stage);
181
182 /* Gather info into nir shader struct. */
183 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
184
185 /* Clean-up after passes. */
186 nir_sweep(nir);
187
188 nir_validate_shader(nir, "after passes");
189
190 return true;
191 }
192