• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "intel_nir.h"
25 #include "brw_nir_rt.h"
26 #include "brw_nir_rt_builder.h"
27 #include "intel_nir.h"
28 
29 static bool
resize_deref(nir_builder * b,nir_deref_instr * deref,unsigned num_components,unsigned bit_size)30 resize_deref(nir_builder *b, nir_deref_instr *deref,
31              unsigned num_components, unsigned bit_size)
32 {
33    if (deref->def.num_components == num_components &&
34        deref->def.bit_size == bit_size)
35       return false;
36 
37    /* NIR requires array indices have to match the deref bit size */
38    if (deref->def.bit_size != bit_size &&
39        (deref->deref_type == nir_deref_type_array ||
40         deref->deref_type == nir_deref_type_ptr_as_array)) {
41       b->cursor = nir_before_instr(&deref->instr);
42       nir_def *idx;
43       if (nir_src_is_const(deref->arr.index)) {
44          idx = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index), bit_size);
45       } else {
46          idx = nir_i2iN(b, deref->arr.index.ssa, bit_size);
47       }
48       nir_src_rewrite(&deref->arr.index, idx);
49    }
50 
51    deref->def.num_components = num_components;
52    deref->def.bit_size = bit_size;
53 
54    return true;
55 }
56 
57 static bool
lower_rt_io_derefs(nir_shader * shader)58 lower_rt_io_derefs(nir_shader *shader)
59 {
60    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
61 
62    bool progress = false;
63 
64    unsigned num_shader_call_vars = 0;
65    nir_foreach_variable_with_modes(var, shader, nir_var_shader_call_data)
66       num_shader_call_vars++;
67 
68    unsigned num_ray_hit_attrib_vars = 0;
69    nir_foreach_variable_with_modes(var, shader, nir_var_ray_hit_attrib)
70       num_ray_hit_attrib_vars++;
71 
72    /* At most one payload is allowed because it's an input.  Technically, this
73     * is also true for hit attribute variables.  However, after we inline an
74     * any-hit shader into an intersection shader, we can end up with multiple
75     * hit attribute variables.  They'll end up mapping to a cast from the same
76     * base pointer so this is fine.
77     */
78    assert(num_shader_call_vars <= 1);
79 
80    nir_builder b = nir_builder_at(nir_before_impl(impl));
81 
82    nir_def *call_data_addr = NULL;
83    if (num_shader_call_vars > 0) {
84       assert(shader->scratch_size >= BRW_BTD_STACK_CALLEE_DATA_SIZE);
85       call_data_addr =
86          brw_nir_rt_load_scratch(&b, BRW_BTD_STACK_CALL_DATA_PTR_OFFSET, 8,
87                                  1, 64);
88       progress = true;
89    }
90 
91    gl_shader_stage stage = shader->info.stage;
92    nir_def *hit_attrib_addr = NULL;
93    if (num_ray_hit_attrib_vars > 0) {
94       assert(stage == MESA_SHADER_ANY_HIT ||
95              stage == MESA_SHADER_CLOSEST_HIT ||
96              stage == MESA_SHADER_INTERSECTION);
97       nir_def *hit_addr =
98          brw_nir_rt_mem_hit_addr(&b, stage == MESA_SHADER_CLOSEST_HIT);
99       /* The vec2 barycentrics are in 2nd and 3rd dwords of MemHit */
100       nir_def *bary_addr = nir_iadd_imm(&b, hit_addr, 4);
101       hit_attrib_addr = nir_bcsel(&b, nir_load_leaf_procedural_intel(&b),
102                                       brw_nir_rt_hit_attrib_data_addr(&b),
103                                       bary_addr);
104       progress = true;
105    }
106 
107    nir_foreach_block(block, impl) {
108       nir_foreach_instr_safe(instr, block) {
109          if (instr->type != nir_instr_type_deref)
110             continue;
111 
112          nir_deref_instr *deref = nir_instr_as_deref(instr);
113          if (nir_deref_mode_is(deref, nir_var_shader_call_data)) {
114             deref->modes = nir_var_function_temp;
115             if (deref->deref_type == nir_deref_type_var) {
116                b.cursor = nir_before_instr(&deref->instr);
117                nir_deref_instr *cast =
118                   nir_build_deref_cast(&b, call_data_addr,
119                                        nir_var_function_temp,
120                                        deref->var->type, 0);
121                nir_def_rewrite_uses(&deref->def,
122                                         &cast->def);
123                nir_instr_remove(&deref->instr);
124                progress = true;
125             }
126          } else if (nir_deref_mode_is(deref, nir_var_ray_hit_attrib)) {
127             deref->modes = nir_var_function_temp;
128             if (deref->deref_type == nir_deref_type_var) {
129                b.cursor = nir_before_instr(&deref->instr);
130                nir_deref_instr *cast =
131                   nir_build_deref_cast(&b, hit_attrib_addr,
132                                        nir_var_function_temp,
133                                        deref->type, 0);
134                nir_def_rewrite_uses(&deref->def,
135                                         &cast->def);
136                nir_instr_remove(&deref->instr);
137                progress = true;
138             }
139          }
140 
141          /* We're going to lower all function_temp memory to scratch using
142           * 64-bit addresses.  We need to resize all our derefs first or else
143           * nir_lower_explicit_io will have a fit.
144           */
145          if (nir_deref_mode_is(deref, nir_var_function_temp) &&
146              resize_deref(&b, deref, 1, 64))
147             progress = true;
148       }
149    }
150 
151    if (progress) {
152       nir_metadata_preserve(impl, nir_metadata_block_index |
153                                   nir_metadata_dominance);
154    } else {
155       nir_metadata_preserve(impl, nir_metadata_all);
156    }
157 
158    return progress;
159 }
160 
161 /** Lowers ray-tracing shader I/O and scratch access
162  *
163  * SPV_KHR_ray_tracing adds three new types of I/O, each of which need their
164  * own bit of special care:
165  *
166  *  - Shader payload data:  This is represented by the IncomingCallableData
167  *    and IncomingRayPayload storage classes which are both represented by
168  *    nir_var_call_data in NIR.  There is at most one of these per-shader and
169  *    they contain payload data passed down the stack from the parent shader
170  *    when it calls executeCallable() or traceRay().  In our implementation,
171  *    the actual storage lives in the calling shader's scratch space and we're
172  *    passed a pointer to it.
173  *
174  *  - Hit attribute data:  This is represented by the HitAttribute storage
175  *    class in SPIR-V and nir_var_ray_hit_attrib in NIR.  For triangle
176  *    geometry, it's supposed to contain two floats which are the barycentric
177  *    coordinates.  For AABS/procedural geometry, it contains the hit data
178  *    written out by the intersection shader.  In our implementation, it's a
179  *    64-bit pointer which points either to the u/v area of the relevant
180  *    MemHit data structure or the space right after the HW ray stack entry.
181  *
182  *  - Shader record buffer data:  This allows read-only access to the data
183  *    stored in the SBT right after the bindless shader handles.  It's
184  *    effectively a UBO with a magic address.  Coming out of spirv_to_nir,
185  *    we get a nir_intrinsic_load_shader_record_ptr which is cast to a
186  *    nir_var_mem_global deref and all access happens through that.  The
187  *    shader_record_ptr system value is handled in brw_nir_lower_rt_intrinsics
188  *    and we assume nir_lower_explicit_io is called elsewhere thanks to
189  *    VK_KHR_buffer_device_address so there's really nothing to do here.
190  *
191  * We also handle lowering any remaining function_temp variables to scratch at
192  * this point.  This gets rid of any remaining arrays and also takes care of
193  * the sending side of ray payloads where we pass pointers to a function_temp
194  * variable down the call stack.
195  */
196 static void
lower_rt_io_and_scratch(nir_shader * nir)197 lower_rt_io_and_scratch(nir_shader *nir)
198 {
199    /* First, we to ensure all the I/O variables have explicit types.  Because
200     * these are shader-internal and don't come in from outside, they don't
201     * have an explicit memory layout and we have to assign them one.
202     */
203    NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
204               nir_var_function_temp |
205               nir_var_shader_call_data |
206               nir_var_ray_hit_attrib,
207               glsl_get_natural_size_align_bytes);
208 
209    /* Now patch any derefs to I/O vars */
210    NIR_PASS_V(nir, lower_rt_io_derefs);
211 
212    /* Finally, lower any remaining function_temp, mem_constant, or
213     * ray_hit_attrib access to 64-bit global memory access.
214     */
215    NIR_PASS_V(nir, nir_lower_explicit_io,
216               nir_var_function_temp |
217               nir_var_mem_constant |
218               nir_var_ray_hit_attrib,
219               nir_address_format_64bit_global);
220 }
221 
222 static void
build_terminate_ray(nir_builder * b)223 build_terminate_ray(nir_builder *b)
224 {
225    nir_def *skip_closest_hit = nir_test_mask(b, nir_load_ray_flags(b),
226       BRW_RT_RAY_FLAG_SKIP_CLOSEST_HIT_SHADER);
227    nir_push_if(b, skip_closest_hit);
228    {
229       /* The shader that calls traceRay() is unable to access any ray hit
230        * information except for that which is explicitly written into the ray
231        * payload by shaders invoked during the trace.  If there's no closest-
232        * hit shader, then accepting the hit has no observable effect; it's
233        * just extra memory traffic for no reason.
234        */
235       brw_nir_btd_return(b);
236       nir_jump(b, nir_jump_halt);
237    }
238    nir_push_else(b, NULL);
239    {
240       /* The closest hit shader is in the same shader group as the any-hit
241        * shader that we're currently in.  We can get the address for its SBT
242        * handle by looking at the shader record pointer and subtracting the
243        * size of a SBT handle.  The BINDLESS_SHADER_RECORD for a closest hit
244        * shader is the first one in the SBT handle.
245        */
246       nir_def *closest_hit =
247          nir_iadd_imm(b, nir_load_shader_record_ptr(b),
248                         -BRW_RT_SBT_HANDLE_SIZE);
249 
250       brw_nir_rt_commit_hit(b);
251       brw_nir_btd_spawn(b, closest_hit);
252       nir_jump(b, nir_jump_halt);
253    }
254    nir_pop_if(b, NULL);
255 }
256 
257 /** Lowers away ray walk intrinsics
258  *
259  * This lowers terminate_ray, ignore_ray_intersection, and the NIR-specific
260  * accept_ray_intersection intrinsics to the appropriate Intel-specific
261  * intrinsics.
262  */
263 static bool
lower_ray_walk_intrinsics(nir_shader * shader,const struct intel_device_info * devinfo)264 lower_ray_walk_intrinsics(nir_shader *shader,
265                           const struct intel_device_info *devinfo)
266 {
267    assert(shader->info.stage == MESA_SHADER_ANY_HIT ||
268           shader->info.stage == MESA_SHADER_INTERSECTION);
269 
270    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
271 
272    nir_builder b = nir_builder_create(impl);
273 
274    bool progress = false;
275    nir_foreach_block_safe(block, impl) {
276       nir_foreach_instr_safe(instr, block) {
277          if (instr->type != nir_instr_type_intrinsic)
278             continue;
279 
280          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
281 
282          switch (intrin->intrinsic) {
283          case nir_intrinsic_ignore_ray_intersection: {
284             b.cursor = nir_instr_remove(&intrin->instr);
285 
286             /* We put the newly emitted code inside a dummy if because it's
287              * going to contain a jump instruction and we don't want to deal
288              * with that mess here.  It'll get dealt with by our control-flow
289              * optimization passes.
290              */
291             nir_push_if(&b, nir_imm_true(&b));
292             nir_trace_ray_intel(&b,
293                                 nir_load_btd_global_arg_addr_intel(&b),
294                                 nir_imm_int(&b, BRW_RT_BVH_LEVEL_OBJECT),
295                                 nir_imm_int(&b, GEN_RT_TRACE_RAY_CONTINUE),
296                                 .synchronous = false);
297             nir_jump(&b, nir_jump_halt);
298             nir_pop_if(&b, NULL);
299             progress = true;
300             break;
301          }
302 
303          case nir_intrinsic_accept_ray_intersection: {
304             b.cursor = nir_instr_remove(&intrin->instr);
305 
306             nir_def *terminate = nir_test_mask(&b, nir_load_ray_flags(&b),
307                BRW_RT_RAY_FLAG_TERMINATE_ON_FIRST_HIT);
308             nir_push_if(&b, terminate);
309             {
310                build_terminate_ray(&b);
311             }
312             nir_push_else(&b, NULL);
313             {
314                nir_trace_ray_intel(&b,
315                                    nir_load_btd_global_arg_addr_intel(&b),
316                                    nir_imm_int(&b, BRW_RT_BVH_LEVEL_OBJECT),
317                                    nir_imm_int(&b, GEN_RT_TRACE_RAY_COMMIT),
318                                    .synchronous = false);
319                nir_jump(&b, nir_jump_halt);
320             }
321             nir_pop_if(&b, NULL);
322             progress = true;
323             break;
324          }
325 
326          case nir_intrinsic_terminate_ray: {
327             b.cursor = nir_instr_remove(&intrin->instr);
328             build_terminate_ray(&b);
329             progress = true;
330             break;
331          }
332 
333          default:
334             break;
335          }
336       }
337    }
338 
339    if (progress) {
340       nir_metadata_preserve(impl, nir_metadata_none);
341    } else {
342       nir_metadata_preserve(impl, nir_metadata_all);
343    }
344 
345    return progress;
346 }
347 
348 void
brw_nir_lower_raygen(nir_shader * nir)349 brw_nir_lower_raygen(nir_shader *nir)
350 {
351    assert(nir->info.stage == MESA_SHADER_RAYGEN);
352    NIR_PASS_V(nir, brw_nir_lower_shader_returns);
353    lower_rt_io_and_scratch(nir);
354 }
355 
356 void
brw_nir_lower_any_hit(nir_shader * nir,const struct intel_device_info * devinfo)357 brw_nir_lower_any_hit(nir_shader *nir, const struct intel_device_info *devinfo)
358 {
359    assert(nir->info.stage == MESA_SHADER_ANY_HIT);
360    NIR_PASS_V(nir, brw_nir_lower_shader_returns);
361    NIR_PASS_V(nir, lower_ray_walk_intrinsics, devinfo);
362    lower_rt_io_and_scratch(nir);
363 }
364 
365 void
brw_nir_lower_closest_hit(nir_shader * nir)366 brw_nir_lower_closest_hit(nir_shader *nir)
367 {
368    assert(nir->info.stage == MESA_SHADER_CLOSEST_HIT);
369    NIR_PASS_V(nir, brw_nir_lower_shader_returns);
370    lower_rt_io_and_scratch(nir);
371 }
372 
373 void
brw_nir_lower_miss(nir_shader * nir)374 brw_nir_lower_miss(nir_shader *nir)
375 {
376    assert(nir->info.stage == MESA_SHADER_MISS);
377    NIR_PASS_V(nir, brw_nir_lower_shader_returns);
378    lower_rt_io_and_scratch(nir);
379 }
380 
381 void
brw_nir_lower_callable(nir_shader * nir)382 brw_nir_lower_callable(nir_shader *nir)
383 {
384    assert(nir->info.stage == MESA_SHADER_CALLABLE);
385    NIR_PASS_V(nir, brw_nir_lower_shader_returns);
386    lower_rt_io_and_scratch(nir);
387 }
388 
389 void
brw_nir_lower_combined_intersection_any_hit(nir_shader * intersection,const nir_shader * any_hit,const struct intel_device_info * devinfo)390 brw_nir_lower_combined_intersection_any_hit(nir_shader *intersection,
391                                             const nir_shader *any_hit,
392                                             const struct intel_device_info *devinfo)
393 {
394    assert(intersection->info.stage == MESA_SHADER_INTERSECTION);
395    assert(any_hit == NULL || any_hit->info.stage == MESA_SHADER_ANY_HIT);
396    NIR_PASS_V(intersection, brw_nir_lower_shader_returns);
397    NIR_PASS_V(intersection, brw_nir_lower_intersection_shader,
398               any_hit, devinfo);
399    NIR_PASS_V(intersection, lower_ray_walk_intrinsics, devinfo);
400    lower_rt_io_and_scratch(intersection);
401 }
402 
403 static nir_def *
build_load_uniform(nir_builder * b,unsigned offset,unsigned num_components,unsigned bit_size)404 build_load_uniform(nir_builder *b, unsigned offset,
405                    unsigned num_components, unsigned bit_size)
406 {
407    return nir_load_uniform(b, num_components, bit_size, nir_imm_int(b, 0),
408                            .base = offset,
409                            .range = num_components * bit_size / 8);
410 }
411 
412 #define load_trampoline_param(b, name, num_components, bit_size) \
413    build_load_uniform((b), offsetof(struct brw_rt_raygen_trampoline_params, name), \
414                       (num_components), (bit_size))
415 
416 nir_shader *
brw_nir_create_raygen_trampoline(const struct brw_compiler * compiler,void * mem_ctx)417 brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
418                                  void *mem_ctx)
419 {
420    const struct intel_device_info *devinfo = compiler->devinfo;
421    const nir_shader_compiler_options *nir_options =
422       compiler->nir_options[MESA_SHADER_COMPUTE];
423 
424    STATIC_ASSERT(sizeof(struct brw_rt_raygen_trampoline_params) == 32);
425 
426    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE,
427                                                   nir_options,
428                                                   "RT Ray-Gen Trampoline");
429    ralloc_steal(mem_ctx, b.shader);
430 
431    b.shader->info.workgroup_size_variable = true;
432 
433    /* The RT global data and raygen BINDLESS_SHADER_RECORD addresses are
434     * passed in as push constants in the first register.  We deal with the
435     * raygen BSR address here; the global data we'll deal with later.
436     */
437    b.shader->num_uniforms = 32;
438    nir_def *raygen_param_bsr_addr =
439       load_trampoline_param(&b, raygen_bsr_addr, 1, 64);
440    nir_def *is_indirect =
441       nir_i2b(&b, load_trampoline_param(&b, is_indirect, 1, 8));
442    nir_def *local_shift =
443       nir_u2u32(&b, load_trampoline_param(&b, local_group_size_log2, 3, 8));
444 
445    nir_def *raygen_indirect_bsr_addr;
446    nir_push_if(&b, is_indirect);
447    {
448       raygen_indirect_bsr_addr =
449          nir_load_global_constant(&b, raygen_param_bsr_addr,
450                                   8 /* align */,
451                                   1 /* components */,
452                                   64 /* bit_size */);
453    }
454    nir_pop_if(&b, NULL);
455 
456    nir_def *raygen_bsr_addr =
457       nir_if_phi(&b, raygen_indirect_bsr_addr, raygen_param_bsr_addr);
458 
459    nir_def *global_id = nir_load_workgroup_id_zero_base(&b);
460    nir_def *simd_channel = nir_load_subgroup_invocation(&b);
461    nir_def *local_x =
462       nir_ubfe(&b, simd_channel, nir_imm_int(&b, 0),
463                   nir_channel(&b, local_shift, 0));
464    nir_def *local_y =
465       nir_ubfe(&b, simd_channel, nir_channel(&b, local_shift, 0),
466                   nir_channel(&b, local_shift, 1));
467    nir_def *local_z =
468       nir_ubfe(&b, simd_channel,
469                   nir_iadd(&b, nir_channel(&b, local_shift, 0),
470                               nir_channel(&b, local_shift, 1)),
471                   nir_channel(&b, local_shift, 2));
472    nir_def *launch_id =
473       nir_iadd(&b, nir_ishl(&b, global_id, local_shift),
474                   nir_vec3(&b, local_x, local_y, local_z));
475 
476    nir_def *launch_size = nir_load_ray_launch_size(&b);
477    nir_push_if(&b, nir_ball(&b, nir_ult(&b, launch_id, launch_size)));
478    {
479       nir_store_global(&b, brw_nir_rt_sw_hotzone_addr(&b, devinfo), 16,
480                        nir_vec4(&b, nir_imm_int(&b, 0), /* Stack ptr */
481                                     nir_channel(&b, launch_id, 0),
482                                     nir_channel(&b, launch_id, 1),
483                                     nir_channel(&b, launch_id, 2)),
484                        0xf /* write mask */);
485 
486       brw_nir_btd_spawn(&b, raygen_bsr_addr);
487    }
488    nir_push_else(&b, NULL);
489    {
490       /* Even though these invocations aren't being used for anything, the
491        * hardware allocated stack IDs for them.  They need to retire them.
492        */
493       brw_nir_btd_retire(&b);
494    }
495    nir_pop_if(&b, NULL);
496 
497    nir_shader *nir = b.shader;
498    nir->info.name = ralloc_strdup(nir, "RT: TraceRay trampoline");
499    nir_validate_shader(nir, "in brw_nir_create_raygen_trampoline");
500 
501    struct brw_nir_compiler_opts opts = {};
502    brw_preprocess_nir(compiler, nir, &opts);
503 
504    NIR_PASS_V(nir, brw_nir_lower_rt_intrinsics, devinfo);
505 
506    b = nir_builder_create(nir_shader_get_entrypoint(b.shader));
507    /* brw_nir_lower_rt_intrinsics will leave us with a btd_global_arg_addr
508     * intrinsic which doesn't exist in compute shaders.  We also created one
509     * above when we generated the BTD spawn intrinsic.  Now we go through and
510     * replace them with a uniform load.
511     */
512    nir_foreach_block(block, b.impl) {
513       nir_foreach_instr_safe(instr, block) {
514          if (instr->type != nir_instr_type_intrinsic)
515             continue;
516 
517          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
518          if (intrin->intrinsic != nir_intrinsic_load_btd_global_arg_addr_intel)
519             continue;
520 
521          b.cursor = nir_before_instr(&intrin->instr);
522          nir_def *global_arg_addr =
523             load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
524          nir_def_rewrite_uses(&intrin->def,
525                                   global_arg_addr);
526          nir_instr_remove(instr);
527       }
528    }
529 
530    NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics, devinfo, NULL);
531 
532    brw_nir_optimize(nir, devinfo);
533 
534    return nir;
535 }
536