• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Google
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "radv_debug.h"
25 #include "radv_rt_common.h"
26 #include "radv_acceleration_structure.h"
27 
28 bool
radv_enable_rt(const struct radv_physical_device * pdevice,bool rt_pipelines)29 radv_enable_rt(const struct radv_physical_device *pdevice, bool rt_pipelines)
30 {
31    if ((pdevice->rad_info.gfx_level < GFX10_3 && !radv_emulate_rt(pdevice)) || pdevice->use_llvm)
32       return false;
33 
34    if (rt_pipelines)
35       return pdevice->instance->perftest_flags & RADV_PERFTEST_RT;
36 
37    return true;
38 }
39 
40 bool
radv_emulate_rt(const struct radv_physical_device * pdevice)41 radv_emulate_rt(const struct radv_physical_device *pdevice)
42 {
43    return pdevice->instance->perftest_flags & RADV_PERFTEST_EMULATE_RT;
44 }
45 
46 void
nir_sort_hit_pair(nir_builder * b,nir_variable * var_distances,nir_variable * var_indices,uint32_t chan_1,uint32_t chan_2)47 nir_sort_hit_pair(nir_builder *b, nir_variable *var_distances, nir_variable *var_indices,
48                   uint32_t chan_1, uint32_t chan_2)
49 {
50    nir_ssa_def *ssa_distances = nir_load_var(b, var_distances);
51    nir_ssa_def *ssa_indices = nir_load_var(b, var_indices);
52    /* if (distances[chan_2] < distances[chan_1]) { */
53    nir_push_if(
54       b, nir_flt(b, nir_channel(b, ssa_distances, chan_2), nir_channel(b, ssa_distances, chan_1)));
55    {
56       /* swap(distances[chan_2], distances[chan_1]); */
57       nir_ssa_def *new_distances[4] = {nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32),
58                                        nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32)};
59       nir_ssa_def *new_indices[4] = {nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32),
60                                      nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32)};
61       new_distances[chan_2] = nir_channel(b, ssa_distances, chan_1);
62       new_distances[chan_1] = nir_channel(b, ssa_distances, chan_2);
63       new_indices[chan_2] = nir_channel(b, ssa_indices, chan_1);
64       new_indices[chan_1] = nir_channel(b, ssa_indices, chan_2);
65       nir_store_var(b, var_distances, nir_vec(b, new_distances, 4),
66                     (1u << chan_1) | (1u << chan_2));
67       nir_store_var(b, var_indices, nir_vec(b, new_indices, 4), (1u << chan_1) | (1u << chan_2));
68    }
69    /* } */
70    nir_pop_if(b, NULL);
71 }
72 
73 nir_ssa_def *
intersect_ray_amd_software_box(struct radv_device * device,nir_builder * b,nir_ssa_def * bvh_node,nir_ssa_def * ray_tmax,nir_ssa_def * origin,nir_ssa_def * dir,nir_ssa_def * inv_dir)74 intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node,
75                                nir_ssa_def *ray_tmax, nir_ssa_def *origin, nir_ssa_def *dir,
76                                nir_ssa_def *inv_dir)
77 {
78    const struct glsl_type *vec4_type = glsl_vector_type(GLSL_TYPE_FLOAT, 4);
79    const struct glsl_type *uvec4_type = glsl_vector_type(GLSL_TYPE_UINT, 4);
80 
81    nir_ssa_def *node_addr = build_node_to_addr(device, b, bvh_node);
82 
83    /* vec4 distances = vec4(INF, INF, INF, INF); */
84    nir_variable *distances =
85       nir_variable_create(b->shader, nir_var_shader_temp, vec4_type, "distances");
86    nir_store_var(b, distances, nir_imm_vec4(b, INFINITY, INFINITY, INFINITY, INFINITY), 0xf);
87 
88    /* uvec4 child_indices = uvec4(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff); */
89    nir_variable *child_indices =
90       nir_variable_create(b->shader, nir_var_shader_temp, uvec4_type, "child_indices");
91    nir_store_var(b, child_indices,
92                  nir_imm_ivec4(b, 0xffffffffu, 0xffffffffu, 0xffffffffu, 0xffffffffu), 0xf);
93 
94    /* Need to remove infinities here because otherwise we get nasty NaN propogation
95     * if the direction has 0s in it. */
96    /* inv_dir = clamp(inv_dir, -FLT_MAX, FLT_MAX); */
97    inv_dir = nir_fclamp(b, inv_dir, nir_imm_float(b, -FLT_MAX), nir_imm_float(b, FLT_MAX));
98 
99    for (int i = 0; i < 4; i++) {
100       const uint32_t child_offset = offsetof(struct radv_bvh_box32_node, children[i]);
101       const uint32_t coord_offsets[2] = {
102          offsetof(struct radv_bvh_box32_node, coords[i][0][0]),
103          offsetof(struct radv_bvh_box32_node, coords[i][1][0]),
104       };
105 
106       /* node->children[i] -> uint */
107       nir_ssa_def *child_index =
108          nir_build_load_global(b, 1, 32, nir_iadd_imm(b, node_addr, child_offset), .align_mul = 64,
109                                .align_offset = child_offset % 64);
110       /* node->coords[i][0], node->coords[i][1] -> vec3 */
111       nir_ssa_def *node_coords[2] = {
112          nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[0]),
113                                .align_mul = 64, .align_offset = coord_offsets[0] % 64),
114          nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[1]),
115                                .align_mul = 64, .align_offset = coord_offsets[1] % 64),
116       };
117 
118       /* If x of the aabb min is NaN, then this is an inactive aabb.
119        * We don't need to care about any other components being NaN as that is UB.
120        * https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap36.html#VkAabbPositionsKHR
121        */
122       nir_ssa_def *min_x = nir_channel(b, node_coords[0], 0);
123       nir_ssa_def *min_x_is_not_nan =
124          nir_inot(b, nir_fneu(b, min_x, min_x)); /* NaN != NaN -> true */
125 
126       /* vec3 bound0 = (node->coords[i][0] - origin) * inv_dir; */
127       nir_ssa_def *bound0 = nir_fmul(b, nir_fsub(b, node_coords[0], origin), inv_dir);
128       /* vec3 bound1 = (node->coords[i][1] - origin) * inv_dir; */
129       nir_ssa_def *bound1 = nir_fmul(b, nir_fsub(b, node_coords[1], origin), inv_dir);
130 
131       /* float tmin = max(max(min(bound0.x, bound1.x), min(bound0.y, bound1.y)), min(bound0.z,
132        * bound1.z)); */
133       nir_ssa_def *tmin =
134          nir_fmax(b,
135                   nir_fmax(b, nir_fmin(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
136                            nir_fmin(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
137                   nir_fmin(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
138 
139       /* float tmax = min(min(max(bound0.x, bound1.x), max(bound0.y, bound1.y)), max(bound0.z,
140        * bound1.z)); */
141       nir_ssa_def *tmax =
142          nir_fmin(b,
143                   nir_fmin(b, nir_fmax(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
144                            nir_fmax(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
145                   nir_fmax(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
146 
147       /* if (!isnan(node->coords[i][0].x) && tmax >= max(0.0f, tmin) && tmin < ray_tmax) { */
148       nir_push_if(b,
149                   nir_iand(b, min_x_is_not_nan,
150                            nir_iand(b, nir_fge(b, tmax, nir_fmax(b, nir_imm_float(b, 0.0f), tmin)),
151                                     nir_flt(b, tmin, ray_tmax))));
152       {
153          /* child_indices[i] = node->children[i]; */
154          nir_ssa_def *new_child_indices[4] = {child_index, child_index, child_index, child_index};
155          nir_store_var(b, child_indices, nir_vec(b, new_child_indices, 4), 1u << i);
156 
157          /* distances[i] = tmin; */
158          nir_ssa_def *new_distances[4] = {tmin, tmin, tmin, tmin};
159          nir_store_var(b, distances, nir_vec(b, new_distances, 4), 1u << i);
160       }
161       /* } */
162       nir_pop_if(b, NULL);
163    }
164 
165    /* Sort our distances with a sorting network. */
166    nir_sort_hit_pair(b, distances, child_indices, 0, 1);
167    nir_sort_hit_pair(b, distances, child_indices, 2, 3);
168    nir_sort_hit_pair(b, distances, child_indices, 0, 2);
169    nir_sort_hit_pair(b, distances, child_indices, 1, 3);
170    nir_sort_hit_pair(b, distances, child_indices, 1, 2);
171 
172    return nir_load_var(b, child_indices);
173 }
174 
175 nir_ssa_def *
intersect_ray_amd_software_tri(struct radv_device * device,nir_builder * b,nir_ssa_def * bvh_node,nir_ssa_def * ray_tmax,nir_ssa_def * origin,nir_ssa_def * dir,nir_ssa_def * inv_dir)176 intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node,
177                                nir_ssa_def *ray_tmax, nir_ssa_def *origin, nir_ssa_def *dir,
178                                nir_ssa_def *inv_dir)
179 {
180    const struct glsl_type *vec4_type = glsl_vector_type(GLSL_TYPE_FLOAT, 4);
181 
182    nir_ssa_def *node_addr = build_node_to_addr(device, b, bvh_node);
183 
184    const uint32_t coord_offsets[3] = {
185       offsetof(struct radv_bvh_triangle_node, coords[0]),
186       offsetof(struct radv_bvh_triangle_node, coords[1]),
187       offsetof(struct radv_bvh_triangle_node, coords[2]),
188    };
189 
190    /* node->coords[0], node->coords[1], node->coords[2] -> vec3 */
191    nir_ssa_def *node_coords[3] = {
192       nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[0]), .align_mul = 64,
193                             .align_offset = coord_offsets[0] % 64),
194       nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[1]), .align_mul = 64,
195                             .align_offset = coord_offsets[1] % 64),
196       nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[2]), .align_mul = 64,
197                             .align_offset = coord_offsets[2] % 64),
198    };
199 
200    nir_variable *result = nir_variable_create(b->shader, nir_var_shader_temp, vec4_type, "result");
201    nir_store_var(b, result, nir_imm_vec4(b, INFINITY, 1.0f, 0.0f, 0.0f), 0xf);
202 
203    /* Based on watertight Ray/Triangle intersection from
204     * http://jcgt.org/published/0002/01/05/paper.pdf */
205 
206    /* Calculate the dimension where the ray direction is largest */
207    nir_ssa_def *abs_dir = nir_fabs(b, dir);
208 
209    nir_ssa_def *abs_dirs[3] = {
210       nir_channel(b, abs_dir, 0),
211       nir_channel(b, abs_dir, 1),
212       nir_channel(b, abs_dir, 2),
213    };
214    /* Find index of greatest value of abs_dir and put that as kz. */
215    nir_ssa_def *kz = nir_bcsel(
216       b, nir_fge(b, abs_dirs[0], abs_dirs[1]),
217       nir_bcsel(b, nir_fge(b, abs_dirs[0], abs_dirs[2]), nir_imm_int(b, 0), nir_imm_int(b, 2)),
218       nir_bcsel(b, nir_fge(b, abs_dirs[1], abs_dirs[2]), nir_imm_int(b, 1), nir_imm_int(b, 2)));
219    nir_ssa_def *kx = nir_imod(b, nir_iadd_imm(b, kz, 1), nir_imm_int(b, 3));
220    nir_ssa_def *ky = nir_imod(b, nir_iadd_imm(b, kx, 1), nir_imm_int(b, 3));
221    nir_ssa_def *k_indices[3] = {kx, ky, kz};
222    nir_ssa_def *k = nir_vec(b, k_indices, 3);
223 
224    /* Swap kx and ky dimensions to preseve winding order */
225    unsigned swap_xy_swizzle[4] = {1, 0, 2, 3};
226    k = nir_bcsel(b, nir_flt(b, nir_vector_extract(b, dir, kz), nir_imm_float(b, 0.0f)),
227                  nir_swizzle(b, k, swap_xy_swizzle, 3), k);
228 
229    kx = nir_channel(b, k, 0);
230    ky = nir_channel(b, k, 1);
231    kz = nir_channel(b, k, 2);
232 
233    /* Calculate shear constants */
234    nir_ssa_def *sz = nir_frcp(b, nir_vector_extract(b, dir, kz));
235    nir_ssa_def *sx = nir_fmul(b, nir_vector_extract(b, dir, kx), sz);
236    nir_ssa_def *sy = nir_fmul(b, nir_vector_extract(b, dir, ky), sz);
237 
238    /* Calculate vertices relative to ray origin */
239    nir_ssa_def *v_a = nir_fsub(b, node_coords[0], origin);
240    nir_ssa_def *v_b = nir_fsub(b, node_coords[1], origin);
241    nir_ssa_def *v_c = nir_fsub(b, node_coords[2], origin);
242 
243    /* Perform shear and scale */
244    nir_ssa_def *ax =
245       nir_fsub(b, nir_vector_extract(b, v_a, kx), nir_fmul(b, sx, nir_vector_extract(b, v_a, kz)));
246    nir_ssa_def *ay =
247       nir_fsub(b, nir_vector_extract(b, v_a, ky), nir_fmul(b, sy, nir_vector_extract(b, v_a, kz)));
248    nir_ssa_def *bx =
249       nir_fsub(b, nir_vector_extract(b, v_b, kx), nir_fmul(b, sx, nir_vector_extract(b, v_b, kz)));
250    nir_ssa_def *by =
251       nir_fsub(b, nir_vector_extract(b, v_b, ky), nir_fmul(b, sy, nir_vector_extract(b, v_b, kz)));
252    nir_ssa_def *cx =
253       nir_fsub(b, nir_vector_extract(b, v_c, kx), nir_fmul(b, sx, nir_vector_extract(b, v_c, kz)));
254    nir_ssa_def *cy =
255       nir_fsub(b, nir_vector_extract(b, v_c, ky), nir_fmul(b, sy, nir_vector_extract(b, v_c, kz)));
256 
257    nir_ssa_def *u = nir_fsub(b, nir_fmul(b, cx, by), nir_fmul(b, cy, bx));
258    nir_ssa_def *v = nir_fsub(b, nir_fmul(b, ax, cy), nir_fmul(b, ay, cx));
259    nir_ssa_def *w = nir_fsub(b, nir_fmul(b, bx, ay), nir_fmul(b, by, ax));
260 
261    nir_variable *u_var =
262       nir_variable_create(b->shader, nir_var_shader_temp, glsl_float_type(), "u");
263    nir_variable *v_var =
264       nir_variable_create(b->shader, nir_var_shader_temp, glsl_float_type(), "v");
265    nir_variable *w_var =
266       nir_variable_create(b->shader, nir_var_shader_temp, glsl_float_type(), "w");
267    nir_store_var(b, u_var, u, 0x1);
268    nir_store_var(b, v_var, v, 0x1);
269    nir_store_var(b, w_var, w, 0x1);
270 
271    /* Fallback to testing edges with double precision...
272     *
273     * The Vulkan spec states it only needs single precision watertightness
274     * but we fail dEQP-VK.ray_tracing_pipeline.watertightness.closedFan2.1024 with
275     * failures = 1 without doing this. :( */
276    nir_ssa_def *cond_retest = nir_ior(
277       b, nir_ior(b, nir_feq(b, u, nir_imm_float(b, 0.0f)), nir_feq(b, v, nir_imm_float(b, 0.0f))),
278       nir_feq(b, w, nir_imm_float(b, 0.0f)));
279 
280    nir_push_if(b, cond_retest);
281    {
282       ax = nir_f2f64(b, ax);
283       ay = nir_f2f64(b, ay);
284       bx = nir_f2f64(b, bx);
285       by = nir_f2f64(b, by);
286       cx = nir_f2f64(b, cx);
287       cy = nir_f2f64(b, cy);
288 
289       nir_store_var(b, u_var, nir_f2f32(b, nir_fsub(b, nir_fmul(b, cx, by), nir_fmul(b, cy, bx))),
290                     0x1);
291       nir_store_var(b, v_var, nir_f2f32(b, nir_fsub(b, nir_fmul(b, ax, cy), nir_fmul(b, ay, cx))),
292                     0x1);
293       nir_store_var(b, w_var, nir_f2f32(b, nir_fsub(b, nir_fmul(b, bx, ay), nir_fmul(b, by, ax))),
294                     0x1);
295    }
296    nir_pop_if(b, NULL);
297 
298    u = nir_load_var(b, u_var);
299    v = nir_load_var(b, v_var);
300    w = nir_load_var(b, w_var);
301 
302    /* Perform edge tests. */
303    nir_ssa_def *cond_back = nir_ior(
304       b, nir_ior(b, nir_flt(b, u, nir_imm_float(b, 0.0f)), nir_flt(b, v, nir_imm_float(b, 0.0f))),
305       nir_flt(b, w, nir_imm_float(b, 0.0f)));
306 
307    nir_ssa_def *cond_front = nir_ior(
308       b, nir_ior(b, nir_flt(b, nir_imm_float(b, 0.0f), u), nir_flt(b, nir_imm_float(b, 0.0f), v)),
309       nir_flt(b, nir_imm_float(b, 0.0f), w));
310 
311    nir_ssa_def *cond = nir_inot(b, nir_iand(b, cond_back, cond_front));
312 
313    nir_push_if(b, cond);
314    {
315       nir_ssa_def *det = nir_fadd(b, u, nir_fadd(b, v, w));
316 
317       nir_ssa_def *az = nir_fmul(b, sz, nir_vector_extract(b, v_a, kz));
318       nir_ssa_def *bz = nir_fmul(b, sz, nir_vector_extract(b, v_b, kz));
319       nir_ssa_def *cz = nir_fmul(b, sz, nir_vector_extract(b, v_c, kz));
320 
321       nir_ssa_def *t =
322          nir_fadd(b, nir_fadd(b, nir_fmul(b, u, az), nir_fmul(b, v, bz)), nir_fmul(b, w, cz));
323 
324       nir_ssa_def *t_signed = nir_fmul(b, nir_fsign(b, det), t);
325 
326       nir_ssa_def *det_cond_front = nir_inot(b, nir_flt(b, t_signed, nir_imm_float(b, 0.0f)));
327 
328       nir_push_if(b, det_cond_front);
329       {
330          nir_ssa_def *indices[4] = {t, det, v, w};
331          nir_store_var(b, result, nir_vec(b, indices, 4), 0xf);
332       }
333       nir_pop_if(b, NULL);
334    }
335    nir_pop_if(b, NULL);
336 
337    return nir_load_var(b, result);
338 }
339 
340 nir_ssa_def *
build_addr_to_node(nir_builder * b,nir_ssa_def * addr)341 build_addr_to_node(nir_builder *b, nir_ssa_def *addr)
342 {
343    const uint64_t bvh_size = 1ull << 42;
344    nir_ssa_def *node = nir_ushr_imm(b, addr, 3);
345    return nir_iand_imm(b, node, (bvh_size - 1) << 3);
346 }
347 
348 nir_ssa_def *
build_node_to_addr(struct radv_device * device,nir_builder * b,nir_ssa_def * node)349 build_node_to_addr(struct radv_device *device, nir_builder *b, nir_ssa_def *node)
350 {
351    nir_ssa_def *addr = nir_iand_imm(b, node, ~7ull);
352    addr = nir_ishl_imm(b, addr, 3);
353    /* Assumes everything is in the top half of address space, which is true in
354     * GFX9+ for now. */
355    return device->physical_device->rad_info.gfx_level >= GFX9
356              ? nir_ior_imm(b, addr, 0xffffull << 48)
357              : addr;
358 }
359 
360 nir_ssa_def *
nir_build_vec3_mat_mult(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * matrix[],bool translation)361 nir_build_vec3_mat_mult(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *matrix[], bool translation)
362 {
363    nir_ssa_def *result_components[3] = {
364       nir_channel(b, matrix[0], 3),
365       nir_channel(b, matrix[1], 3),
366       nir_channel(b, matrix[2], 3),
367    };
368    for (unsigned i = 0; i < 3; ++i) {
369       for (unsigned j = 0; j < 3; ++j) {
370          nir_ssa_def *v =
371             nir_fmul(b, nir_channels(b, vec, 1 << j), nir_channels(b, matrix[i], 1 << j));
372          result_components[i] = (translation || j) ? nir_fadd(b, result_components[i], v) : v;
373       }
374    }
375    return nir_vec(b, result_components, 3);
376 }
377 
378 nir_ssa_def *
nir_build_vec3_mat_mult_pre(nir_builder * b,nir_ssa_def * vec,nir_ssa_def * matrix[])379 nir_build_vec3_mat_mult_pre(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *matrix[])
380 {
381    nir_ssa_def *result_components[3] = {
382       nir_channel(b, matrix[0], 3),
383       nir_channel(b, matrix[1], 3),
384       nir_channel(b, matrix[2], 3),
385    };
386    return nir_build_vec3_mat_mult(b, nir_fsub(b, vec, nir_vec(b, result_components, 3)), matrix,
387                                   false);
388 }
389 
390 void
nir_build_wto_matrix_load(nir_builder * b,nir_ssa_def * instance_addr,nir_ssa_def ** out)391 nir_build_wto_matrix_load(nir_builder *b, nir_ssa_def *instance_addr, nir_ssa_def **out)
392 {
393    unsigned offset = offsetof(struct radv_bvh_instance_node, wto_matrix);
394    for (unsigned i = 0; i < 3; ++i) {
395       out[i] = nir_build_load_global(b, 4, 32, nir_iadd_imm(b, instance_addr, offset + i * 16),
396                                      .align_mul = 64, .align_offset = offset + i * 16);
397    }
398 }
399 
400 /* When a hit is opaque the any_hit shader is skipped for this hit and the hit
401  * is assumed to be an actual hit. */
402 nir_ssa_def *
hit_is_opaque(nir_builder * b,nir_ssa_def * sbt_offset_and_flags,nir_ssa_def * flags,nir_ssa_def * geometry_id_and_flags)403 hit_is_opaque(nir_builder *b, nir_ssa_def *sbt_offset_and_flags, nir_ssa_def *flags,
404               nir_ssa_def *geometry_id_and_flags)
405 {
406    nir_ssa_def *geom_force_opaque =
407       nir_test_mask(b, geometry_id_and_flags, VK_GEOMETRY_OPAQUE_BIT_KHR << 28);
408    nir_ssa_def *instance_force_opaque =
409       nir_test_mask(b, sbt_offset_and_flags, VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR << 24);
410    nir_ssa_def *instance_force_non_opaque =
411       nir_test_mask(b, sbt_offset_and_flags, VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR << 24);
412 
413    nir_ssa_def *opaque = geom_force_opaque;
414    opaque = nir_bcsel(b, instance_force_opaque, nir_imm_bool(b, true), opaque);
415    opaque = nir_bcsel(b, instance_force_non_opaque, nir_imm_bool(b, false), opaque);
416 
417    nir_ssa_def *ray_force_opaque = nir_test_mask(b, flags, SpvRayFlagsOpaqueKHRMask);
418    nir_ssa_def *ray_force_non_opaque = nir_test_mask(b, flags, SpvRayFlagsNoOpaqueKHRMask);
419 
420    opaque = nir_bcsel(b, ray_force_opaque, nir_imm_bool(b, true), opaque);
421    opaque = nir_bcsel(b, ray_force_non_opaque, nir_imm_bool(b, false), opaque);
422    return opaque;
423 }
424 
425 nir_ssa_def *
create_bvh_descriptor(nir_builder * b)426 create_bvh_descriptor(nir_builder *b)
427 {
428    /* We create a BVH descriptor that covers the entire memory range. That way we can always
429     * use the same descriptor, which avoids divergence when different rays hit different
430     * instances at the cost of having to use 64-bit node ids. */
431    const uint64_t bvh_size = 1ull << 42;
432    return nir_imm_ivec4(
433       b, 0, 1u << 31 /* Enable box sorting */, (bvh_size - 1) & 0xFFFFFFFFu,
434       ((bvh_size - 1) >> 32) | (1u << 24 /* Return IJ for triangles */) | (1u << 31));
435 }
436