• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright © 2022 Bas Nieuwenhuizen
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#version 460
25
26#extension GL_GOOGLE_include_directive : require
27
28#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
29#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
30#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
31#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
32#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
33#extension GL_EXT_scalar_block_layout : require
34#extension GL_EXT_buffer_reference : require
35#extension GL_EXT_buffer_reference2 : require
36#extension GL_KHR_memory_scope_semantics : require
37#extension GL_KHR_shader_subgroup_vote : require
38#extension GL_KHR_shader_subgroup_arithmetic : require
39#extension GL_KHR_shader_subgroup_ballot : require
40
41layout(local_size_x = 1024, local_size_y = 1, local_size_z = 1) in;
42
43#define USE_GLOBAL_SYNC
44#include "build_interface.h"
45
46TYPE(ploc_prefix_scan_partition, 4);
47
48layout(push_constant) uniform CONSTS
49{
50   ploc_args args;
51};
52
53shared uint32_t exclusive_prefix_sum;
54shared uint32_t aggregate_sums[PLOC_WORKGROUP_SIZE / 64];
55
56/*
57 * Global prefix scan over all workgroups to find out the index of the collapsed node to write.
58 * See https://research.nvidia.com/sites/default/files/publications/nvr-2016-002.pdf
59 * One partition = one workgroup in this case.
60 */
61uint32_t
62prefix_scan(uvec4 ballot, REF(ploc_prefix_scan_partition) partitions, uint32_t task_index)
63{
64   if (gl_LocalInvocationIndex == 0) {
65      exclusive_prefix_sum = 0;
66      if (task_index >= gl_WorkGroupSize.x) {
67         REF(ploc_prefix_scan_partition) current_partition =
68            REF(ploc_prefix_scan_partition)(INDEX(ploc_prefix_scan_partition, partitions, task_index / gl_WorkGroupSize.x));
69
70         REF(ploc_prefix_scan_partition) previous_partition = current_partition - 1;
71
72         while (true) {
73            /* See if this previous workgroup already set their inclusive sum */
74            if (atomicLoad(DEREF(previous_partition).inclusive_sum, gl_ScopeDevice,
75                           gl_StorageSemanticsBuffer,
76                           gl_SemanticsAcquire | gl_SemanticsMakeVisible) != 0xFFFFFFFF) {
77               atomicAdd(exclusive_prefix_sum, DEREF(previous_partition).inclusive_sum);
78               break;
79            } else {
80               atomicAdd(exclusive_prefix_sum, DEREF(previous_partition).aggregate);
81               previous_partition -= 1;
82            }
83         }
84         /* Set the inclusive sum for the next workgroups */
85         atomicStore(DEREF(current_partition).inclusive_sum,
86                     DEREF(current_partition).aggregate + exclusive_prefix_sum, gl_ScopeDevice,
87                     gl_StorageSemanticsBuffer, gl_SemanticsRelease | gl_SemanticsMakeAvailable);
88      }
89   }
90
91   if (subgroupElect())
92      aggregate_sums[gl_SubgroupID] = subgroupBallotBitCount(ballot);
93   barrier();
94
95   if (gl_LocalInvocationID.x < PLOC_WORKGROUP_SIZE / 64) {
96      aggregate_sums[gl_LocalInvocationID.x] =
97         exclusive_prefix_sum + subgroupExclusiveAdd(aggregate_sums[gl_LocalInvocationID.x]);
98   }
99   barrier();
100
101   return aggregate_sums[gl_SubgroupID] + subgroupBallotExclusiveBitCount(ballot);
102}
103
104/* Relative cost of increasing the BVH depth. Deep BVHs will require more backtracking. */
105#define BVH_LEVEL_COST 0.2
106
107uint32_t
108push_node(uint32_t children[2], radv_aabb bounds[2])
109{
110   uint32_t internal_node_index = atomicAdd(DEREF(args.header).ir_internal_node_count, 1);
111   uint32_t dst_offset = args.internal_node_offset + internal_node_index * SIZEOF(radv_ir_box_node);
112   uint32_t dst_id = pack_ir_node_id(dst_offset, radv_ir_node_internal);
113   REF(radv_ir_box_node) dst_node = REF(radv_ir_box_node)(OFFSET(args.bvh, dst_offset));
114
115   radv_aabb total_bounds;
116   total_bounds.min = vec3(INFINITY);
117   total_bounds.max = vec3(-INFINITY);
118
119   for (uint i = 0; i < 2; ++i) {
120      VOID_REF node = OFFSET(args.bvh, ir_id_to_offset(children[i]));
121      REF(radv_ir_node) child = REF(radv_ir_node)(node);
122
123      total_bounds.min = min(total_bounds.min, bounds[i].min);
124      total_bounds.max = max(total_bounds.max, bounds[i].max);
125
126      DEREF(dst_node).children[i] = children[i];
127   }
128
129   DEREF(dst_node).base.aabb = total_bounds;
130   DEREF(dst_node).bvh_offset = RADV_UNKNOWN_BVH_OFFSET;
131   return dst_id;
132}
133
134#define PLOC_NEIGHBOURHOOD 16
135#define PLOC_OFFSET_MASK   ((1 << 5) - 1)
136
137uint32_t
138encode_neighbour_offset(float sah, uint32_t i, uint32_t j)
139{
140   int32_t offset = int32_t(j) - int32_t(i);
141   uint32_t encoded_offset = offset + PLOC_NEIGHBOURHOOD - (offset > 0 ? 1 : 0);
142   return (floatBitsToUint(sah) & (~PLOC_OFFSET_MASK)) | encoded_offset;
143}
144
145int32_t
146decode_neighbour_offset(uint32_t encoded_offset)
147{
148   int32_t offset = int32_t(encoded_offset & PLOC_OFFSET_MASK) - PLOC_NEIGHBOURHOOD;
149   return offset + (offset >= 0 ? 1 : 0);
150}
151
152#define NUM_PLOC_LDS_ITEMS PLOC_WORKGROUP_SIZE + 4 * PLOC_NEIGHBOURHOOD
153
154shared radv_aabb shared_bounds[NUM_PLOC_LDS_ITEMS];
155shared uint32_t nearest_neighbour_indices[NUM_PLOC_LDS_ITEMS];
156
157uint32_t
158load_id(VOID_REF ids, uint32_t iter, uint32_t index)
159{
160   if (iter == 0)
161      return DEREF(REF(key_id_pair)(INDEX(key_id_pair, ids, index))).id;
162   else
163      return DEREF(REF(uint32_t)(INDEX(uint32_t, ids, index)));
164}
165
166void
167load_bounds(VOID_REF ids, uint32_t iter, uint32_t task_index, uint32_t lds_base,
168            uint32_t neighbourhood_overlap, uint32_t search_bound)
169{
170   for (uint32_t i = task_index - 2 * neighbourhood_overlap; i < search_bound;
171        i += gl_WorkGroupSize.x) {
172      uint32_t id = load_id(ids, iter, i);
173      if (id == RADV_BVH_INVALID_NODE)
174         continue;
175
176      VOID_REF addr = OFFSET(args.bvh, ir_id_to_offset(id));
177      REF(radv_ir_node) node = REF(radv_ir_node)(addr);
178
179      shared_bounds[i - lds_base] = DEREF(node).aabb;
180   }
181}
182
183float
184combined_node_cost(uint32_t lds_base, uint32_t i, uint32_t j)
185{
186   radv_aabb combined_bounds;
187   combined_bounds.min = min(shared_bounds[i - lds_base].min, shared_bounds[j - lds_base].min);
188   combined_bounds.max = max(shared_bounds[i - lds_base].max, shared_bounds[j - lds_base].max);
189   return aabb_surface_area(combined_bounds);
190}
191
192shared uint32_t shared_aggregate_sum;
193
194void
195main(void)
196{
197   VOID_REF src_ids = args.ids_0;
198   VOID_REF dst_ids = args.ids_1;
199
200   /* We try to use LBVH for BVHs where we know there will be less than 5 leaves,
201    * but sometimes all leaves might be inactive */
202   if (DEREF(args.header).active_leaf_count <= 2) {
203      if (gl_GlobalInvocationID.x == 0) {
204         uint32_t internal_node_index = atomicAdd(DEREF(args.header).ir_internal_node_count, 1);
205         uint32_t dst_offset = args.internal_node_offset + internal_node_index * SIZEOF(radv_ir_box_node);
206         REF(radv_ir_box_node) dst_node = REF(radv_ir_box_node)(OFFSET(args.bvh, dst_offset));
207
208         radv_aabb total_bounds;
209         total_bounds.min = vec3(INFINITY);
210         total_bounds.max = vec3(-INFINITY);
211
212         uint32_t i = 0;
213         for (; i < DEREF(args.header).active_leaf_count; i++) {
214            uint32_t child_id = DEREF(INDEX(key_id_pair, src_ids, i)).id;
215
216            if (child_id != RADV_BVH_INVALID_NODE) {
217               VOID_REF node = OFFSET(args.bvh, ir_id_to_offset(child_id));
218               REF(radv_ir_node) child = REF(radv_ir_node)(node);
219               radv_aabb bounds = DEREF(child).aabb;
220
221               total_bounds.min = min(total_bounds.min, bounds.min);
222               total_bounds.max = max(total_bounds.max, bounds.max);
223            }
224
225            DEREF(dst_node).children[i] = child_id;
226         }
227         for (; i < 2; i++)
228            DEREF(dst_node).children[i] = RADV_BVH_INVALID_NODE;
229
230         DEREF(dst_node).base.aabb = total_bounds;
231         DEREF(dst_node).bvh_offset = RADV_UNKNOWN_BVH_OFFSET;
232      }
233      return;
234   }
235
236   atomicCompSwap(DEREF(args.header).sync_data.task_counts[0], 0xFFFFFFFF,
237                  DEREF(args.header).active_leaf_count);
238   atomicCompSwap(DEREF(args.header).sync_data.current_phase_end_counter, 0xFFFFFFFF,
239                  DIV_ROUND_UP(DEREF(args.header).active_leaf_count, gl_WorkGroupSize.x));
240   REF(ploc_prefix_scan_partition)
241   partitions = REF(ploc_prefix_scan_partition)(args.prefix_scan_partitions);
242
243   uint32_t task_index = fetch_task(args.header, false);
244
245   for (uint iter = 0;; ++iter) {
246      uint32_t current_task_count = task_count(args.header);
247      if (task_index == TASK_INDEX_INVALID)
248         break;
249
250      /* Find preferred partners and merge them */
251      PHASE (args.header) {
252         uint32_t base_index = task_index - gl_LocalInvocationID.x;
253         uint32_t neighbourhood_overlap = min(PLOC_NEIGHBOURHOOD, base_index);
254         uint32_t double_neighbourhood_overlap = min(2 * PLOC_NEIGHBOURHOOD, base_index);
255         /* Upper bound to where valid nearest node indices are written. */
256         uint32_t write_bound =
257            min(current_task_count, base_index + gl_WorkGroupSize.x + PLOC_NEIGHBOURHOOD);
258         /* Upper bound to where valid nearest node indices are searched. */
259         uint32_t search_bound =
260            min(current_task_count, base_index + gl_WorkGroupSize.x + 2 * PLOC_NEIGHBOURHOOD);
261         uint32_t lds_base = base_index - double_neighbourhood_overlap;
262
263         load_bounds(src_ids, iter, task_index, lds_base, neighbourhood_overlap, search_bound);
264
265         for (uint32_t i = gl_LocalInvocationID.x; i < NUM_PLOC_LDS_ITEMS; i += gl_WorkGroupSize.x)
266            nearest_neighbour_indices[i] = 0xFFFFFFFF;
267         barrier();
268
269         for (uint32_t i = task_index - double_neighbourhood_overlap; i < write_bound;
270              i += gl_WorkGroupSize.x) {
271            uint32_t right_bound = min(search_bound - 1 - i, PLOC_NEIGHBOURHOOD);
272
273            uint32_t fallback_pair = i == 0 ? (i + 1) : (i - 1);
274            uint32_t min_offset = encode_neighbour_offset(INFINITY, i, fallback_pair);
275
276            for (uint32_t j = max(i + 1, base_index - neighbourhood_overlap); j <= i + right_bound;
277                 ++j) {
278
279               float sah = combined_node_cost(lds_base, i, j);
280               uint32_t i_encoded_offset = encode_neighbour_offset(sah, i, j);
281               uint32_t j_encoded_offset = encode_neighbour_offset(sah, j, i);
282               min_offset = min(min_offset, i_encoded_offset);
283               atomicMin(nearest_neighbour_indices[j - lds_base], j_encoded_offset);
284            }
285            if (i >= base_index - neighbourhood_overlap)
286               atomicMin(nearest_neighbour_indices[i - lds_base], min_offset);
287         }
288
289         if (gl_LocalInvocationID.x == 0)
290            shared_aggregate_sum = 0;
291         barrier();
292
293         for (uint32_t i = task_index - neighbourhood_overlap; i < write_bound;
294              i += gl_WorkGroupSize.x) {
295            uint32_t left_bound = min(i, PLOC_NEIGHBOURHOOD);
296            uint32_t right_bound = min(search_bound - 1 - i, PLOC_NEIGHBOURHOOD);
297            /*
298             * Workaround for a worst-case scenario in PLOC: If the combined area of
299             * all nodes (in the neighbourhood) is the same, then the chosen nearest
300             * neighbour is the first neighbour. However, this means that no nodes
301             * except the first two will find each other as nearest neighbour. Therefore,
302             * only one node is contained in each BVH level. By first testing if the immediate
303             * neighbour on one side is the nearest, all immediate neighbours will be merged
304             * on every step.
305             */
306            uint32_t preferred_pair;
307            if ((i & 1) != 0)
308               preferred_pair = i - min(left_bound, 1);
309            else
310               preferred_pair = i + min(right_bound, 1);
311
312            if (preferred_pair != i) {
313               uint32_t encoded_min_sah =
314                  nearest_neighbour_indices[i - lds_base] & (~PLOC_OFFSET_MASK);
315               float sah = combined_node_cost(lds_base, i, preferred_pair);
316               uint32_t encoded_sah = floatBitsToUint(sah) & (~PLOC_OFFSET_MASK);
317               uint32_t encoded_offset = encode_neighbour_offset(sah, i, preferred_pair);
318               if (encoded_sah <= encoded_min_sah) {
319                  nearest_neighbour_indices[i - lds_base] = encoded_offset;
320               }
321            }
322         }
323         barrier();
324
325         bool has_valid_node = true;
326
327         if (task_index < current_task_count) {
328            uint32_t base_index = task_index - gl_LocalInvocationID.x;
329
330            uint32_t neighbour_index =
331               task_index +
332               decode_neighbour_offset(nearest_neighbour_indices[task_index - lds_base]);
333            uint32_t other_neighbour_index =
334               neighbour_index +
335               decode_neighbour_offset(nearest_neighbour_indices[neighbour_index - lds_base]);
336            uint32_t id = load_id(src_ids, iter, task_index);
337            if (other_neighbour_index == task_index) {
338               if (task_index < neighbour_index) {
339                  uint32_t neighbour_id = load_id(src_ids, iter, neighbour_index);
340                  uint32_t children[2] = {id, neighbour_id};
341                  radv_aabb bounds[2] = {shared_bounds[task_index - lds_base], shared_bounds[neighbour_index - lds_base]};
342
343                  DEREF(REF(uint32_t)(INDEX(uint32_t, dst_ids, task_index))) = push_node(children, bounds);
344                  DEREF(REF(uint32_t)(INDEX(uint32_t, dst_ids, neighbour_index))) =
345                     RADV_BVH_INVALID_NODE;
346               } else {
347                  /* We still store in the other case so we don't destroy the node id needed to
348                   * create the internal node */
349                  has_valid_node = false;
350               }
351            } else {
352               DEREF(REF(uint32_t)(INDEX(uint32_t, dst_ids, task_index))) = id;
353            }
354
355            /* Compact - prepare prefix scan */
356            uvec4 ballot = subgroupBallot(has_valid_node);
357
358            uint32_t aggregate_sum = subgroupBallotBitCount(ballot);
359            if (subgroupElect())
360               atomicAdd(shared_aggregate_sum, aggregate_sum);
361         }
362
363         barrier();
364         /*
365          * The paper proposes initializing all partitions to an invalid state
366          * and only computing aggregates afterwards. We skip that step and
367          * initialize the partitions to a valid state. This also simplifies
368          * the look-back, as there will never be any blocking due to invalid
369          * partitions.
370          */
371         if (gl_LocalInvocationIndex == 0) {
372            REF(ploc_prefix_scan_partition)
373            current_partition = REF(ploc_prefix_scan_partition)(
374               INDEX(ploc_prefix_scan_partition, partitions, task_index / gl_WorkGroupSize.x));
375            DEREF(current_partition).aggregate = shared_aggregate_sum;
376            if (task_index < gl_WorkGroupSize.x) {
377               DEREF(current_partition).inclusive_sum = shared_aggregate_sum;
378            } else {
379               DEREF(current_partition).inclusive_sum = 0xFFFFFFFF;
380            }
381         }
382
383         if (task_index == 0)
384            set_next_task_count(args.header, task_count(args.header));
385      }
386
387      /* Compact - prefix scan and update */
388      PHASE (args.header) {
389         uint32_t current_task_count = task_count(args.header);
390
391         uint32_t id = task_index < current_task_count
392                          ? DEREF(REF(uint32_t)(INDEX(uint32_t, dst_ids, task_index)))
393                          : RADV_BVH_INVALID_NODE;
394         uvec4 ballot = subgroupBallot(id != RADV_BVH_INVALID_NODE);
395
396         uint32_t new_offset = prefix_scan(ballot, partitions, task_index);
397         if (task_index >= current_task_count)
398            continue;
399
400         if (id != RADV_BVH_INVALID_NODE) {
401            DEREF(REF(uint32_t)(INDEX(uint32_t, src_ids, new_offset))) = id;
402            ++new_offset;
403         }
404
405         if (task_index == current_task_count - 1) {
406            set_next_task_count(args.header, new_offset);
407            if (new_offset == 1)
408               DEREF(args.header).sync_data.next_phase_exit_flag = 1;
409         }
410      }
411   }
412}
413