• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #pragma once
25 
26 #include "brw_reg.h"
27 #include "compiler/nir/nir.h"
28 #include "brw_compiler.h"
29 #include "nir_builder.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 extern const struct nir_shader_compiler_options brw_scalar_nir_options;
36 
37 int type_size_vec4(const struct glsl_type *type, bool bindless);
38 int type_size_dvec4(const struct glsl_type *type, bool bindless);
39 
40 static inline int
type_size_scalar_bytes(const struct glsl_type * type,bool bindless)41 type_size_scalar_bytes(const struct glsl_type *type, bool bindless)
42 {
43    return glsl_count_dword_slots(type, bindless) * 4;
44 }
45 
46 static inline int
type_size_vec4_bytes(const struct glsl_type * type,bool bindless)47 type_size_vec4_bytes(const struct glsl_type *type, bool bindless)
48 {
49    return type_size_vec4(type, bindless) * 16;
50 }
51 
52 struct brw_nir_compiler_opts {
53    /* Soft floating point implementation shader */
54    const nir_shader *softfp64;
55 
56    /* Whether robust image access is enabled */
57    bool robust_image_access;
58 
59    /* Input vertices for TCS stage (0 means dynamic) */
60    unsigned input_vertices;
61 };
62 
63 /* UBO surface index can come in 2 flavors :
64  *    - nir_intrinsic_resource_intel
65  *    - anything else
66  *
67  * In the first case, checking that the surface index is const requires
68  * checking resource_intel::src[1]. In any other case it's a simple
69  * nir_src_is_const().
70  *
71  * This function should only be called on src[0] of load_ubo intrinsics.
72  */
73 static inline bool
brw_nir_ubo_surface_index_is_pushable(nir_src src)74 brw_nir_ubo_surface_index_is_pushable(nir_src src)
75 {
76    nir_intrinsic_instr *intrin =
77       src.ssa->parent_instr->type == nir_instr_type_intrinsic ?
78       nir_instr_as_intrinsic(src.ssa->parent_instr) : NULL;
79 
80    if (intrin && intrin->intrinsic == nir_intrinsic_resource_intel) {
81       return (nir_intrinsic_resource_access_intel(intrin) &
82               nir_resource_intel_pushable);
83    }
84 
85    return nir_src_is_const(src);
86 }
87 
88 static inline unsigned
brw_nir_ubo_surface_index_get_push_block(nir_src src)89 brw_nir_ubo_surface_index_get_push_block(nir_src src)
90 {
91    if (nir_src_is_const(src))
92       return nir_src_as_uint(src);
93 
94    if (!brw_nir_ubo_surface_index_is_pushable(src))
95       return UINT32_MAX;
96 
97    assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
98 
99    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
100    assert(intrin->intrinsic == nir_intrinsic_resource_intel);
101 
102    return nir_intrinsic_resource_block_intel(intrin);
103 }
104 
105 /* This helper return the binding table index of a surface access (any
106  * buffer/image/etc...). It works off the source of one of the intrinsics
107  * (load_ubo, load_ssbo, store_ssbo, load_image, store_image, etc...).
108  *
109  * If the source is constant, then this is the binding table index. If we're
110  * going through a resource_intel intel intrinsic, then we need to check
111  * src[1] of that intrinsic.
112  */
113 static inline unsigned
brw_nir_ubo_surface_index_get_bti(nir_src src)114 brw_nir_ubo_surface_index_get_bti(nir_src src)
115 {
116    if (nir_src_is_const(src))
117       return nir_src_as_uint(src);
118 
119    assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
120 
121    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
122    if (!intrin || intrin->intrinsic != nir_intrinsic_resource_intel)
123       return UINT32_MAX;
124 
125    /* In practice we could even drop this intrinsic because the bindless
126     * access always operate from a base offset coming from a push constant, so
127     * they can never be constant.
128     */
129    if (nir_intrinsic_resource_access_intel(intrin) &
130        nir_resource_intel_bindless)
131       return UINT32_MAX;
132 
133    if (!nir_src_is_const(intrin->src[1]))
134       return UINT32_MAX;
135 
136    return nir_src_as_uint(intrin->src[1]);
137 }
138 
139 /* Returns true if a fragment shader needs at least one render target */
140 static inline bool
brw_nir_fs_needs_null_rt(const struct intel_device_info * devinfo,nir_shader * nir,bool multisample_fbo,bool alpha_to_coverage)141 brw_nir_fs_needs_null_rt(const struct intel_device_info *devinfo,
142                          nir_shader *nir,
143                          bool multisample_fbo, bool alpha_to_coverage)
144 {
145    assert(nir->info.stage == MESA_SHADER_FRAGMENT);
146 
147    /* Null-RT bit in the render target write extended descriptor is only
148     * available on Gfx11+.
149     */
150    if (devinfo->ver < 11)
151       return true;
152 
153    uint64_t relevant_outputs = 0;
154    if (multisample_fbo)
155       relevant_outputs |= BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
156 
157    return (alpha_to_coverage ||
158            (nir->info.outputs_written & relevant_outputs) != 0);
159 }
160 
161 void brw_preprocess_nir(const struct brw_compiler *compiler,
162                         nir_shader *nir,
163                         const struct brw_nir_compiler_opts *opts);
164 
165 void
166 brw_nir_link_shaders(const struct brw_compiler *compiler,
167                      nir_shader *producer, nir_shader *consumer);
168 
169 bool brw_nir_lower_cs_intrinsics(nir_shader *nir,
170                                  const struct intel_device_info *devinfo,
171                                  struct brw_cs_prog_data *prog_data);
172 bool brw_nir_lower_alpha_to_coverage(nir_shader *shader,
173                                      const struct brw_wm_prog_key *key,
174                                      const struct brw_wm_prog_data *prog_data);
175 void brw_nir_lower_vs_inputs(nir_shader *nir);
176 void brw_nir_lower_vue_inputs(nir_shader *nir,
177                               const struct intel_vue_map *vue_map);
178 void brw_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue);
179 void brw_nir_lower_fs_inputs(nir_shader *nir,
180                              const struct intel_device_info *devinfo,
181                              const struct brw_wm_prog_key *key);
182 void brw_nir_lower_vue_outputs(nir_shader *nir);
183 void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue,
184                                enum tess_primitive_mode tes_primitive_mode);
185 void brw_nir_lower_fs_outputs(nir_shader *nir);
186 
187 bool brw_nir_lower_cmat(nir_shader *nir, unsigned subgroup_size);
188 
189 struct brw_nir_lower_storage_image_opts {
190    const struct intel_device_info *devinfo;
191 
192    bool lower_loads;
193    bool lower_stores;
194 };
195 
196 bool brw_nir_lower_storage_image(nir_shader *nir,
197                                  const struct brw_nir_lower_storage_image_opts *opts);
198 
199 bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader,
200                                         const struct
201                                         intel_device_info *devinfo);
202 
203 bool brw_nir_lower_simd(nir_shader *nir, unsigned dispatch_width);
204 
205 void brw_postprocess_nir(nir_shader *nir,
206                          const struct brw_compiler *compiler,
207                          bool debug_enabled,
208                          enum brw_robustness_flags robust_flags);
209 
210 bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
211                                          const uint8_t *attrib_wa_flags);
212 
213 bool brw_nir_apply_trig_workarounds(nir_shader *nir);
214 
215 bool brw_nir_limit_trig_input_range_workaround(nir_shader *nir);
216 
217 bool brw_nir_lower_fsign(nir_shader *nir);
218 
219 bool brw_nir_opt_fsat(nir_shader *);
220 
221 void brw_nir_apply_key(nir_shader *nir,
222                        const struct brw_compiler *compiler,
223                        const struct brw_base_prog_key *key,
224                        unsigned max_subgroup_size);
225 
226 unsigned brw_nir_api_subgroup_size(const nir_shader *nir,
227                                    unsigned hw_subgroup_size);
228 
229 enum brw_conditional_mod brw_cmod_for_nir_comparison(nir_op op);
230 enum lsc_opcode lsc_op_for_nir_intrinsic(const nir_intrinsic_instr *intrin);
231 enum brw_reg_type brw_type_for_nir_type(const struct intel_device_info *devinfo,
232                                         nir_alu_type type);
233 
234 bool brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
235                                   unsigned bit_size,
236                                   unsigned num_components,
237                                   int64_t hole_size,
238                                   nir_intrinsic_instr *low,
239                                   nir_intrinsic_instr *high,
240                                   void *data);
241 
242 void brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
243                                 nir_shader *nir,
244                                 struct brw_ubo_range out_ranges[4]);
245 
246 void brw_nir_optimize(nir_shader *nir,
247                       const struct intel_device_info *devinfo);
248 
249 nir_shader *brw_nir_create_passthrough_tcs(void *mem_ctx,
250                                            const struct brw_compiler *compiler,
251                                            const struct brw_tcs_prog_key *key);
252 
253 #define BRW_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
254 #define BRW_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)
255 #define BRW_NIR_FRAG_OUTPUT_LOCATION_SHIFT 1
256 #define BRW_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
257 
258 bool brw_nir_move_interpolation_to_top(nir_shader *nir);
259 nir_def *brw_nir_load_global_const(nir_builder *b,
260                                        nir_intrinsic_instr *load_uniform,
261                                        nir_def *base_addr,
262                                        unsigned off);
263 
264 const struct glsl_type *brw_nir_get_var_type(const struct nir_shader *nir,
265                                              nir_variable *var);
266 
267 void brw_nir_adjust_payload(nir_shader *shader);
268 
269 static inline nir_variable_mode
brw_nir_no_indirect_mask(const struct brw_compiler * compiler,gl_shader_stage stage)270 brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
271                          gl_shader_stage stage)
272 {
273    nir_variable_mode indirect_mask = (nir_variable_mode) 0;
274 
275    switch (stage) {
276    case MESA_SHADER_VERTEX:
277    case MESA_SHADER_FRAGMENT:
278       indirect_mask |= nir_var_shader_in;
279       break;
280 
281    default:
282       /* Everything else can handle indirect inputs */
283       break;
284    }
285 
286    if (stage != MESA_SHADER_TESS_CTRL &&
287        stage != MESA_SHADER_TASK &&
288        stage != MESA_SHADER_MESH)
289       indirect_mask |= nir_var_shader_out;
290 
291    return indirect_mask;
292 }
293 
294 bool brw_nir_uses_inline_data(nir_shader *shader);
295 
296 #ifdef __cplusplus
297 }
298 #endif
299