• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyrigh 2016 Red Hat Inc.
3  * Based on anv:
4  * Copyright © 2015 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  */
25 
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 
32 #include "nir/nir_builder.h"
33 #include "radv_meta.h"
34 #include "radv_private.h"
35 #include "radv_cs.h"
36 #include "sid.h"
37 
38 
39 static const int pipelinestat_block_size = 11 * 8;
40 static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
41 
get_max_db(struct radv_device * device)42 static unsigned get_max_db(struct radv_device *device)
43 {
44 	unsigned num_db = device->physical_device->rad_info.num_render_backends;
45 	MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
46 
47 	/* Otherwise we need to change the query reset procedure */
48 	assert(rb_mask == ((1ull << num_db) - 1));
49 
50 	return num_db;
51 }
52 
radv_break_on_count(nir_builder * b,nir_variable * var,nir_ssa_def * count)53 static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
54 {
55 	nir_ssa_def *counter = nir_load_var(b, var);
56 
57 	nir_if *if_stmt = nir_if_create(b->shader);
58 	if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
59 	nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
60 
61 	b->cursor = nir_after_cf_list(&if_stmt->then_list);
62 
63 	nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
64 	nir_builder_instr_insert(b, &instr->instr);
65 
66 	b->cursor = nir_after_cf_node(&if_stmt->cf_node);
67 	counter = nir_iadd(b, counter, nir_imm_int(b, 1));
68 	nir_store_var(b, var, counter, 0x1);
69 }
70 
71 static struct nir_ssa_def *
radv_load_push_int(nir_builder * b,unsigned offset,const char * name)72 radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
73 {
74 	nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
75 	nir_intrinsic_set_base(flags, 0);
76 	nir_intrinsic_set_range(flags, 16);
77 	flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
78 	flags->num_components = 1;
79 	nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
80 	nir_builder_instr_insert(b, &flags->instr);
81 	return &flags->dest.ssa;
82 }
83 
84 static nir_shader *
build_occlusion_query_shader(struct radv_device * device)85 build_occlusion_query_shader(struct radv_device *device) {
86 	/* the shader this builds is roughly
87 	 *
88 	 * push constants {
89 	 * 	uint32_t flags;
90 	 * 	uint32_t dst_stride;
91 	 * };
92 	 *
93 	 * uint32_t src_stride = 16 * db_count;
94 	 *
95 	 * location(binding = 0) buffer dst_buf;
96 	 * location(binding = 1) buffer src_buf;
97 	 *
98 	 * void main() {
99 	 * 	uint64_t result = 0;
100 	 * 	uint64_t src_offset = src_stride * global_id.x;
101 	 * 	uint64_t dst_offset = dst_stride * global_id.x;
102 	 * 	bool available = true;
103 	 * 	for (int i = 0; i < db_count; ++i) {
104 	 * 		uint64_t start = src_buf[src_offset + 16 * i];
105 	 * 		uint64_t end = src_buf[src_offset + 16 * i + 8];
106 	 * 		if ((start & (1ull << 63)) && (end & (1ull << 63)))
107 	 * 			result += end - start;
108 	 * 		else
109 	 * 			available = false;
110 	 * 	}
111 	 * 	uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
112 	 * 	if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
113 	 * 		if (flags & VK_QUERY_RESULT_64_BIT)
114 	 * 			dst_buf[dst_offset] = result;
115 	 * 		else
116 	 * 			dst_buf[dst_offset] = (uint32_t)result.
117 	 * 	}
118 	 * 	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
119 	 * 		dst_buf[dst_offset + elem_size] = available;
120 	 * 	}
121 	 * }
122 	 */
123 	nir_builder b;
124 	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
125 	b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
126 	b.shader->info.cs.local_size[0] = 64;
127 	b.shader->info.cs.local_size[1] = 1;
128 	b.shader->info.cs.local_size[2] = 1;
129 
130 	nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
131 	nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
132 	nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
133 	nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
134 	nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available");
135 	unsigned db_count = get_max_db(device);
136 
137 	nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
138 
139 	nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
140 	                                                          nir_intrinsic_vulkan_resource_index);
141 	dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
142 	nir_intrinsic_set_desc_set(dst_buf, 0);
143 	nir_intrinsic_set_binding(dst_buf, 0);
144 	nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
145 	nir_builder_instr_insert(&b, &dst_buf->instr);
146 
147 	nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
148 	                                                          nir_intrinsic_vulkan_resource_index);
149 	src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
150 	nir_intrinsic_set_desc_set(src_buf, 0);
151 	nir_intrinsic_set_binding(src_buf, 1);
152 	nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
153 	nir_builder_instr_insert(&b, &src_buf->instr);
154 
155 	nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
156 	nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
157 	nir_ssa_def *block_size = nir_imm_ivec4(&b,
158 	                                        b.shader->info.cs.local_size[0],
159 	                                        b.shader->info.cs.local_size[1],
160 	                                        b.shader->info.cs.local_size[2], 0);
161 	nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
162 	global_id = nir_channel(&b, global_id, 0); // We only care about x here.
163 
164 	nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
165 	nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
166 	nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
167 	nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
168 
169 
170 	nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
171 	nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
172 	nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
173 
174 	nir_loop *outer_loop = nir_loop_create(b.shader);
175 	nir_builder_cf_insert(&b, &outer_loop->cf_node);
176 	b.cursor = nir_after_cf_list(&outer_loop->body);
177 
178 	nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
179 	radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
180 
181 	nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
182 	load_offset = nir_iadd(&b, input_base, load_offset);
183 
184 	nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
185 	load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
186 	load->src[1] = nir_src_for_ssa(load_offset);
187 	nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
188 	load->num_components = 2;
189 	nir_builder_instr_insert(&b, &load->instr);
190 
191 	const unsigned swizzle0[] = {0,0,0,0};
192 	const unsigned swizzle1[] = {1,1,1,1};
193 	nir_store_var(&b, start, nir_swizzle(&b, &load->dest.ssa, swizzle0, 1, false), 0x1);
194 	nir_store_var(&b, end, nir_swizzle(&b, &load->dest.ssa, swizzle1, 1, false), 0x1);
195 
196 	nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
197 	nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
198 
199 	nir_if *update_if = nir_if_create(b.shader);
200 	update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
201 	nir_cf_node_insert(b.cursor, &update_if->cf_node);
202 
203 	b.cursor = nir_after_cf_list(&update_if->then_list);
204 
205 	nir_store_var(&b, result,
206 	              nir_iadd(&b, nir_load_var(&b, result),
207 	                           nir_isub(&b, nir_load_var(&b, end),
208 	                                        nir_load_var(&b, start))), 0x1);
209 
210 	b.cursor = nir_after_cf_list(&update_if->else_list);
211 
212 	nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
213 
214 	b.cursor = nir_after_cf_node(&outer_loop->cf_node);
215 
216 	/* Store the result if complete or if partial results have been requested. */
217 
218 	nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
219 	                                        nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
220 	nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
221 
222 	nir_if *store_if = nir_if_create(b.shader);
223 	store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available)));
224 	nir_cf_node_insert(b.cursor, &store_if->cf_node);
225 
226 	b.cursor = nir_after_cf_list(&store_if->then_list);
227 
228 	nir_if *store_64bit_if = nir_if_create(b.shader);
229 	store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
230 	nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
231 
232 	b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
233 
234 	nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
235 	store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
236 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
237 	store->src[2] = nir_src_for_ssa(output_base);
238 	nir_intrinsic_set_write_mask(store, 0x1);
239 	store->num_components = 1;
240 	nir_builder_instr_insert(&b, &store->instr);
241 
242 	b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
243 
244 	store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
245 	store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
246 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
247 	store->src[2] = nir_src_for_ssa(output_base);
248 	nir_intrinsic_set_write_mask(store, 0x1);
249 	store->num_components = 1;
250 	nir_builder_instr_insert(&b, &store->instr);
251 
252 	b.cursor = nir_after_cf_node(&store_if->cf_node);
253 
254 	/* Store the availability bit if requested. */
255 
256 	nir_if *availability_if = nir_if_create(b.shader);
257 	availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
258 	nir_cf_node_insert(b.cursor, &availability_if->cf_node);
259 
260 	b.cursor = nir_after_cf_list(&availability_if->then_list);
261 
262 	store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
263 	store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
264 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
265 	store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
266 	nir_intrinsic_set_write_mask(store, 0x1);
267 	store->num_components = 1;
268 	nir_builder_instr_insert(&b, &store->instr);
269 
270 	return b.shader;
271 }
272 
273 static nir_shader *
build_pipeline_statistics_query_shader(struct radv_device * device)274 build_pipeline_statistics_query_shader(struct radv_device *device) {
275 	/* the shader this builds is roughly
276 	 *
277 	 * push constants {
278 	 * 	uint32_t flags;
279 	 * 	uint32_t dst_stride;
280 	 * 	uint32_t stats_mask;
281 	 * 	uint32_t avail_offset;
282 	 * };
283 	 *
284 	 * uint32_t src_stride = pipelinestat_block_size * 2;
285 	 *
286 	 * location(binding = 0) buffer dst_buf;
287 	 * location(binding = 1) buffer src_buf;
288 	 *
289 	 * void main() {
290 	 * 	uint64_t src_offset = src_stride * global_id.x;
291 	 * 	uint64_t dst_base = dst_stride * global_id.x;
292 	 * 	uint64_t dst_offset = dst_base;
293 	 * 	uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
294 	 * 	uint32_t elem_count = stats_mask >> 16;
295 	 * 	uint32_t available = src_buf[avail_offset + 4 * global_id.x];
296 	 * 	if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
297 	 * 		dst_buf[dst_offset + elem_count * elem_size] = available;
298 	 * 	}
299 	 * 	if (available) {
300 	 * 		// repeat 11 times:
301 	 * 		if (stats_mask & (1 << 0)) {
302 	 * 			uint64_t start = src_buf[src_offset + 8 * indices[0]];
303 	 * 			uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
304 	 * 			uint64_t result = end - start;
305 	 * 			if (flags & VK_QUERY_RESULT_64_BIT)
306 	 * 				dst_buf[dst_offset] = result;
307 	 * 			else
308 	 * 				dst_buf[dst_offset] = (uint32_t)result.
309 	 * 			dst_offset += elem_size;
310 	 * 		}
311 	 * 	} else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
312 	 *              // Set everything to 0 as we don't know what is valid.
313 	 * 		for (int i = 0; i < elem_count; ++i)
314 	 * 			dst_buf[dst_base + elem_size * i] = 0;
315 	 * 	}
316 	 * }
317 	 */
318 	nir_builder b;
319 	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
320 	b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
321 	b.shader->info.cs.local_size[0] = 64;
322 	b.shader->info.cs.local_size[1] = 1;
323 	b.shader->info.cs.local_size[2] = 1;
324 
325 	nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
326 
327 	nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
328 	nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
329 	nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
330 
331 	nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
332 	                                                          nir_intrinsic_vulkan_resource_index);
333 	dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
334 	nir_intrinsic_set_desc_set(dst_buf, 0);
335 	nir_intrinsic_set_binding(dst_buf, 0);
336 	nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
337 	nir_builder_instr_insert(&b, &dst_buf->instr);
338 
339 	nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
340 	                                                          nir_intrinsic_vulkan_resource_index);
341 	src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
342 	nir_intrinsic_set_desc_set(src_buf, 0);
343 	nir_intrinsic_set_binding(src_buf, 1);
344 	nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
345 	nir_builder_instr_insert(&b, &src_buf->instr);
346 
347 	nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
348 	nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
349 	nir_ssa_def *block_size = nir_imm_ivec4(&b,
350 	                                        b.shader->info.cs.local_size[0],
351 	                                        b.shader->info.cs.local_size[1],
352 	                                        b.shader->info.cs.local_size[2], 0);
353 	nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
354 	global_id = nir_channel(&b, global_id, 0); // We only care about x here.
355 
356 	nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
357 	nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
358 	nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
359 	nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
360 
361 
362 	avail_offset = nir_iadd(&b, avail_offset,
363 	                            nir_imul(&b, global_id, nir_imm_int(&b, 4)));
364 
365 	nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
366 	load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
367 	load->src[1] = nir_src_for_ssa(avail_offset);
368 	nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
369 	load->num_components = 1;
370 	nir_builder_instr_insert(&b, &load->instr);
371 	nir_ssa_def *available = &load->dest.ssa;
372 
373 	nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
374 	                                        nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
375 	nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
376 	nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
377 
378 	/* Store the availability bit if requested. */
379 
380 	nir_if *availability_if = nir_if_create(b.shader);
381 	availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
382 	nir_cf_node_insert(b.cursor, &availability_if->cf_node);
383 
384 	b.cursor = nir_after_cf_list(&availability_if->then_list);
385 
386 	nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
387 	store->src[0] = nir_src_for_ssa(available);
388 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
389 	store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
390 	nir_intrinsic_set_write_mask(store, 0x1);
391 	store->num_components = 1;
392 	nir_builder_instr_insert(&b, &store->instr);
393 
394 	b.cursor = nir_after_cf_node(&availability_if->cf_node);
395 
396 	nir_if *available_if = nir_if_create(b.shader);
397 	available_if->condition = nir_src_for_ssa(available);
398 	nir_cf_node_insert(b.cursor, &available_if->cf_node);
399 
400 	b.cursor = nir_after_cf_list(&available_if->then_list);
401 
402 	nir_store_var(&b, output_offset, output_base, 0x1);
403 	for (int i = 0; i < 11; ++i) {
404 		nir_if *store_if = nir_if_create(b.shader);
405 		store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i)));
406 		nir_cf_node_insert(b.cursor, &store_if->cf_node);
407 
408 		b.cursor = nir_after_cf_list(&store_if->then_list);
409 
410 		load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
411 		load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
412 		load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
413 		                                            nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
414 		nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
415 		load->num_components = 1;
416 		nir_builder_instr_insert(&b, &load->instr);
417 		nir_ssa_def *start = &load->dest.ssa;
418 
419 		load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
420 		load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
421 		load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
422 		                                            nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
423 		nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
424 		load->num_components = 1;
425 		nir_builder_instr_insert(&b, &load->instr);
426 		nir_ssa_def *end = &load->dest.ssa;
427 
428 		nir_ssa_def *result = nir_isub(&b, end, start);
429 
430 		/* Store result */
431 		nir_if *store_64bit_if = nir_if_create(b.shader);
432 		store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
433 		nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
434 
435 		b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
436 
437 		nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
438 		store->src[0] = nir_src_for_ssa(result);
439 		store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
440 		store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
441 		nir_intrinsic_set_write_mask(store, 0x1);
442 		store->num_components = 1;
443 		nir_builder_instr_insert(&b, &store->instr);
444 
445 		b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
446 
447 		store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
448 		store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
449 		store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
450 		store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
451 		nir_intrinsic_set_write_mask(store, 0x1);
452 		store->num_components = 1;
453 		nir_builder_instr_insert(&b, &store->instr);
454 
455 		b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
456 
457 		nir_store_var(&b, output_offset,
458 		                  nir_iadd(&b, nir_load_var(&b, output_offset),
459 		                               elem_size), 0x1);
460 
461 		b.cursor = nir_after_cf_node(&store_if->cf_node);
462 	}
463 
464 	b.cursor = nir_after_cf_list(&available_if->else_list);
465 
466 	available_if = nir_if_create(b.shader);
467 	available_if->condition = nir_src_for_ssa(nir_iand(&b, flags,
468 	                                                       nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)));
469 	nir_cf_node_insert(b.cursor, &available_if->cf_node);
470 
471 	b.cursor = nir_after_cf_list(&available_if->then_list);
472 
473 	/* Stores zeros in all outputs. */
474 
475 	nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
476 	nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
477 
478 	nir_loop *loop = nir_loop_create(b.shader);
479 	nir_builder_cf_insert(&b, &loop->cf_node);
480 	b.cursor = nir_after_cf_list(&loop->body);
481 
482 	nir_ssa_def *current_counter = nir_load_var(&b, counter);
483 	radv_break_on_count(&b, counter, elem_count);
484 
485 	nir_ssa_def *output_elem = nir_iadd(&b, output_base,
486 	                                        nir_imul(&b, elem_size, current_counter));
487 
488 	nir_if *store_64bit_if = nir_if_create(b.shader);
489 	store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
490 	nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
491 
492 	b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
493 
494 	store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
495 	store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
496 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
497 	store->src[2] = nir_src_for_ssa(output_elem);
498 	nir_intrinsic_set_write_mask(store, 0x1);
499 	store->num_components = 1;
500 	nir_builder_instr_insert(&b, &store->instr);
501 
502 	b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
503 
504 	store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
505 	store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
506 	store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
507 	store->src[2] = nir_src_for_ssa(output_elem);
508 	nir_intrinsic_set_write_mask(store, 0x1);
509 	store->num_components = 1;
510 	nir_builder_instr_insert(&b, &store->instr);
511 
512 	b.cursor = nir_after_cf_node(&loop->cf_node);
513 	return b.shader;
514 }
515 
radv_device_init_meta_query_state(struct radv_device * device)516 VkResult radv_device_init_meta_query_state(struct radv_device *device)
517 {
518 	VkResult result;
519 	struct radv_shader_module occlusion_cs = { .nir = NULL };
520 	struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
521 
522 	occlusion_cs.nir = build_occlusion_query_shader(device);
523 	pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
524 
525 	VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
526 		.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
527 		.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
528 		.bindingCount = 2,
529 		.pBindings = (VkDescriptorSetLayoutBinding[]) {
530 			{
531 				.binding = 0,
532 				.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
533 				.descriptorCount = 1,
534 				.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
535 				.pImmutableSamplers = NULL
536 			},
537 			{
538 				.binding = 1,
539 				.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
540 				.descriptorCount = 1,
541 				.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
542 				.pImmutableSamplers = NULL
543 			},
544 		}
545 	};
546 
547 	result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
548 						&occlusion_ds_create_info,
549 						&device->meta_state.alloc,
550 						&device->meta_state.query.ds_layout);
551 	if (result != VK_SUCCESS)
552 		goto fail;
553 
554 	VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
555 		.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
556 		.setLayoutCount = 1,
557 		.pSetLayouts = &device->meta_state.query.ds_layout,
558 		.pushConstantRangeCount = 1,
559 		.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
560 	};
561 
562 	result = radv_CreatePipelineLayout(radv_device_to_handle(device),
563 					  &occlusion_pl_create_info,
564 					  &device->meta_state.alloc,
565 					  &device->meta_state.query.p_layout);
566 	if (result != VK_SUCCESS)
567 		goto fail;
568 
569 	VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
570 		.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
571 		.stage = VK_SHADER_STAGE_COMPUTE_BIT,
572 		.module = radv_shader_module_to_handle(&occlusion_cs),
573 		.pName = "main",
574 		.pSpecializationInfo = NULL,
575 	};
576 
577 	VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
578 		.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
579 		.stage = occlusion_pipeline_shader_stage,
580 		.flags = 0,
581 		.layout = device->meta_state.query.p_layout,
582 	};
583 
584 	result = radv_CreateComputePipelines(radv_device_to_handle(device),
585 					     radv_pipeline_cache_to_handle(&device->meta_state.cache),
586 					     1, &occlusion_vk_pipeline_info, NULL,
587 					     &device->meta_state.query.occlusion_query_pipeline);
588 	if (result != VK_SUCCESS)
589 		goto fail;
590 
591 	VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
592 		.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
593 		.stage = VK_SHADER_STAGE_COMPUTE_BIT,
594 		.module = radv_shader_module_to_handle(&pipeline_statistics_cs),
595 		.pName = "main",
596 		.pSpecializationInfo = NULL,
597 	};
598 
599 	VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
600 		.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
601 		.stage = pipeline_statistics_pipeline_shader_stage,
602 		.flags = 0,
603 		.layout = device->meta_state.query.p_layout,
604 	};
605 
606 	result = radv_CreateComputePipelines(radv_device_to_handle(device),
607 					     radv_pipeline_cache_to_handle(&device->meta_state.cache),
608 					     1, &pipeline_statistics_vk_pipeline_info, NULL,
609 					     &device->meta_state.query.pipeline_statistics_query_pipeline);
610 
611 fail:
612 	if (result != VK_SUCCESS)
613 		radv_device_finish_meta_query_state(device);
614 	ralloc_free(occlusion_cs.nir);
615 	ralloc_free(pipeline_statistics_cs.nir);
616 	return result;
617 }
618 
radv_device_finish_meta_query_state(struct radv_device * device)619 void radv_device_finish_meta_query_state(struct radv_device *device)
620 {
621 	if (device->meta_state.query.pipeline_statistics_query_pipeline)
622 		radv_DestroyPipeline(radv_device_to_handle(device),
623 				     device->meta_state.query.pipeline_statistics_query_pipeline,
624 				     &device->meta_state.alloc);
625 
626 	if (device->meta_state.query.occlusion_query_pipeline)
627 		radv_DestroyPipeline(radv_device_to_handle(device),
628 				     device->meta_state.query.occlusion_query_pipeline,
629 				     &device->meta_state.alloc);
630 
631 	if (device->meta_state.query.p_layout)
632 		radv_DestroyPipelineLayout(radv_device_to_handle(device),
633 					   device->meta_state.query.p_layout,
634 					   &device->meta_state.alloc);
635 
636 	if (device->meta_state.query.ds_layout)
637 		radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
638 						device->meta_state.query.ds_layout,
639 						&device->meta_state.alloc);
640 }
641 
radv_query_shader(struct radv_cmd_buffer * cmd_buffer,VkPipeline pipeline,struct radeon_winsys_bo * src_bo,struct radeon_winsys_bo * dst_bo,uint64_t src_offset,uint64_t dst_offset,uint32_t src_stride,uint32_t dst_stride,uint32_t count,uint32_t flags,uint32_t pipeline_stats_mask,uint32_t avail_offset)642 static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
643                               VkPipeline pipeline,
644                               struct radeon_winsys_bo *src_bo,
645                               struct radeon_winsys_bo *dst_bo,
646                               uint64_t src_offset, uint64_t dst_offset,
647                               uint32_t src_stride, uint32_t dst_stride,
648                               uint32_t count, uint32_t flags,
649                               uint32_t pipeline_stats_mask, uint32_t avail_offset)
650 {
651 	struct radv_device *device = cmd_buffer->device;
652 	struct radv_meta_saved_state saved_state;
653 
654 	radv_meta_save(&saved_state, cmd_buffer,
655 		       RADV_META_SAVE_COMPUTE_PIPELINE |
656 		       RADV_META_SAVE_CONSTANTS |
657 		       RADV_META_SAVE_DESCRIPTORS);
658 
659 	struct radv_buffer dst_buffer = {
660 		.bo = dst_bo,
661 		.offset = dst_offset,
662 		.size = dst_stride * count
663 	};
664 
665 	struct radv_buffer src_buffer = {
666 		.bo = src_bo,
667 		.offset = src_offset,
668 		.size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
669 	};
670 
671 	radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
672 			     VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
673 
674 	radv_meta_push_descriptor_set(cmd_buffer,
675 				      VK_PIPELINE_BIND_POINT_COMPUTE,
676 				      device->meta_state.query.p_layout,
677 				      0, /* set */
678 				      2, /* descriptorWriteCount */
679 				      (VkWriteDescriptorSet[]) {
680 				              {
681 				                      .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
682 				                      .dstBinding = 0,
683 				                      .dstArrayElement = 0,
684 				                      .descriptorCount = 1,
685 				                      .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
686 				                      .pBufferInfo = &(VkDescriptorBufferInfo) {
687 				                              .buffer = radv_buffer_to_handle(&dst_buffer),
688 				                              .offset = 0,
689 				                              .range = VK_WHOLE_SIZE
690 				                      }
691 				              },
692 				              {
693 				                      .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
694 				                      .dstBinding = 1,
695 				                      .dstArrayElement = 0,
696 				                      .descriptorCount = 1,
697 				                      .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
698 				                      .pBufferInfo = &(VkDescriptorBufferInfo) {
699 				                              .buffer = radv_buffer_to_handle(&src_buffer),
700 				                              .offset = 0,
701 				                              .range = VK_WHOLE_SIZE
702 				                      }
703 				              }
704 				      });
705 
706 	/* Encode the number of elements for easy access by the shader. */
707 	pipeline_stats_mask &= 0x7ff;
708 	pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
709 
710 	avail_offset -= src_offset;
711 
712 	struct {
713 		uint32_t flags;
714 		uint32_t dst_stride;
715 		uint32_t pipeline_stats_mask;
716 		uint32_t avail_offset;
717 	} push_constants = {
718 		flags,
719 		dst_stride,
720 		pipeline_stats_mask,
721 		avail_offset
722 	};
723 
724 	radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
725 				      device->meta_state.query.p_layout,
726 				      VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
727 				      &push_constants);
728 
729 	cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
730 	                                RADV_CMD_FLAG_INV_VMEM_L1;
731 
732 	if (flags & VK_QUERY_RESULT_WAIT_BIT)
733 		cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
734 
735 	radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
736 
737 	cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
738 	                                RADV_CMD_FLAG_INV_VMEM_L1 |
739 	                                RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
740 
741 	radv_meta_restore(&saved_state, cmd_buffer);
742 }
743 
radv_CreateQueryPool(VkDevice _device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)744 VkResult radv_CreateQueryPool(
745 	VkDevice                                    _device,
746 	const VkQueryPoolCreateInfo*                pCreateInfo,
747 	const VkAllocationCallbacks*                pAllocator,
748 	VkQueryPool*                                pQueryPool)
749 {
750 	RADV_FROM_HANDLE(radv_device, device, _device);
751 	uint64_t size;
752 	struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
753 					       sizeof(*pool), 8,
754 					       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
755 
756 	if (!pool)
757 		return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
758 
759 
760 	switch(pCreateInfo->queryType) {
761 	case VK_QUERY_TYPE_OCCLUSION:
762 		pool->stride = 16 * get_max_db(device);
763 		break;
764 	case VK_QUERY_TYPE_PIPELINE_STATISTICS:
765 		pool->stride = pipelinestat_block_size * 2;
766 		break;
767 	case VK_QUERY_TYPE_TIMESTAMP:
768 		pool->stride = 8;
769 		break;
770 	default:
771 		unreachable("creating unhandled query type");
772 	}
773 
774 	pool->type = pCreateInfo->queryType;
775 	pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
776 	pool->availability_offset = pool->stride * pCreateInfo->queryCount;
777 	size = pool->availability_offset;
778 	if (pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP ||
779 	    pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
780 		size += 4 * pCreateInfo->queryCount;
781 
782 	pool->bo = device->ws->buffer_create(device->ws, size,
783 					     64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
784 
785 	if (!pool->bo) {
786 		vk_free2(&device->alloc, pAllocator, pool);
787 		return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
788 	}
789 
790 	pool->ptr = device->ws->buffer_map(pool->bo);
791 
792 	if (!pool->ptr) {
793 		device->ws->buffer_destroy(pool->bo);
794 		vk_free2(&device->alloc, pAllocator, pool);
795 		return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
796 	}
797 	memset(pool->ptr, 0, size);
798 
799 	*pQueryPool = radv_query_pool_to_handle(pool);
800 	return VK_SUCCESS;
801 }
802 
radv_DestroyQueryPool(VkDevice _device,VkQueryPool _pool,const VkAllocationCallbacks * pAllocator)803 void radv_DestroyQueryPool(
804 	VkDevice                                    _device,
805 	VkQueryPool                                 _pool,
806 	const VkAllocationCallbacks*                pAllocator)
807 {
808 	RADV_FROM_HANDLE(radv_device, device, _device);
809 	RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
810 
811 	if (!pool)
812 		return;
813 
814 	device->ws->buffer_destroy(pool->bo);
815 	vk_free2(&device->alloc, pAllocator, pool);
816 }
817 
radv_GetQueryPoolResults(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)818 VkResult radv_GetQueryPoolResults(
819 	VkDevice                                    _device,
820 	VkQueryPool                                 queryPool,
821 	uint32_t                                    firstQuery,
822 	uint32_t                                    queryCount,
823 	size_t                                      dataSize,
824 	void*                                       pData,
825 	VkDeviceSize                                stride,
826 	VkQueryResultFlags                          flags)
827 {
828 	RADV_FROM_HANDLE(radv_device, device, _device);
829 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
830 	char *data = pData;
831 	VkResult result = VK_SUCCESS;
832 
833 	for(unsigned i = 0; i < queryCount; ++i, data += stride) {
834 		char *dest = data;
835 		unsigned query = firstQuery + i;
836 		char *src = pool->ptr + query * pool->stride;
837 		uint32_t available;
838 
839 		if (pool->type != VK_QUERY_TYPE_OCCLUSION) {
840 			if (flags & VK_QUERY_RESULT_WAIT_BIT)
841 				while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
842 					;
843 			available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
844 		}
845 
846 		switch (pool->type) {
847 		case VK_QUERY_TYPE_TIMESTAMP: {
848 			if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
849 				result = VK_NOT_READY;
850 				break;
851 
852 			}
853 
854 			if (flags & VK_QUERY_RESULT_64_BIT) {
855 				*(uint64_t*)dest = *(uint64_t*)src;
856 				dest += 8;
857 			} else {
858 				*(uint32_t*)dest = *(uint32_t*)src;
859 				dest += 4;
860 			}
861 			break;
862 		}
863 		case VK_QUERY_TYPE_OCCLUSION: {
864 			volatile uint64_t const *src64 = (volatile uint64_t const *)src;
865 			uint64_t sample_count = 0;
866 			int db_count = get_max_db(device);
867 			available = 1;
868 
869 			for (int i = 0; i < db_count; ++i) {
870 				uint64_t start, end;
871 				do {
872 					start = src64[2 * i];
873 					end = src64[2 * i + 1];
874 				} while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
875 
876 				if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
877 					available = 0;
878 				else {
879 					sample_count += end - start;
880 				}
881 			}
882 
883 			if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
884 				result = VK_NOT_READY;
885 				break;
886 
887 			}
888 
889 			if (flags & VK_QUERY_RESULT_64_BIT) {
890 				*(uint64_t*)dest = sample_count;
891 				dest += 8;
892 			} else {
893 				*(uint32_t*)dest = sample_count;
894 				dest += 4;
895 			}
896 			break;
897 		}
898 		case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
899 			if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
900 				result = VK_NOT_READY;
901 				break;
902 
903 			}
904 
905 			const uint64_t *start = (uint64_t*)src;
906 			const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
907 			if (flags & VK_QUERY_RESULT_64_BIT) {
908 				uint64_t *dst = (uint64_t*)dest;
909 				dest += util_bitcount(pool->pipeline_stats_mask) * 8;
910 				for(int i = 0; i < 11; ++i)
911 					if(pool->pipeline_stats_mask & (1u << i))
912 						*dst++ = stop[pipeline_statistics_indices[i]] -
913 						         start[pipeline_statistics_indices[i]];
914 
915 			} else {
916 				uint32_t *dst = (uint32_t*)dest;
917 				dest += util_bitcount(pool->pipeline_stats_mask) * 4;
918 				for(int i = 0; i < 11; ++i)
919 					if(pool->pipeline_stats_mask & (1u << i))
920 						*dst++ = stop[pipeline_statistics_indices[i]] -
921 						         start[pipeline_statistics_indices[i]];
922 			}
923 			break;
924 		}
925 		default:
926 			unreachable("trying to get results of unhandled query type");
927 		}
928 
929 		if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
930 			if (flags & VK_QUERY_RESULT_64_BIT) {
931 				*(uint64_t*)dest = available;
932 			} else {
933 				*(uint32_t*)dest = available;
934 			}
935 		}
936 	}
937 
938 	return result;
939 }
940 
radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)941 void radv_CmdCopyQueryPoolResults(
942     VkCommandBuffer                             commandBuffer,
943     VkQueryPool                                 queryPool,
944     uint32_t                                    firstQuery,
945     uint32_t                                    queryCount,
946     VkBuffer                                    dstBuffer,
947     VkDeviceSize                                dstOffset,
948     VkDeviceSize                                stride,
949     VkQueryResultFlags                          flags)
950 {
951 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
952 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
953 	RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
954 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
955 	unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
956 	uint64_t va = radv_buffer_get_va(pool->bo);
957 	uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
958 	dest_va += dst_buffer->offset + dstOffset;
959 
960 	radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo, 8);
961 	radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo, 8);
962 
963 	switch (pool->type) {
964 	case VK_QUERY_TYPE_OCCLUSION:
965 		if (flags & VK_QUERY_RESULT_WAIT_BIT) {
966 			for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
967 				unsigned query = firstQuery + i;
968 				uint64_t src_va = va + query * pool->stride + pool->stride - 4;
969 
970 				/* Waits on the upper word of the last DB entry */
971 				radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
972 				radeon_emit(cs, 5 | WAIT_REG_MEM_MEM_SPACE(1));
973 				radeon_emit(cs, src_va);
974 				radeon_emit(cs, src_va >> 32);
975 				radeon_emit(cs, 0x80000000); /* reference value */
976 				radeon_emit(cs, 0xffffffff); /* mask */
977 				radeon_emit(cs, 4); /* poll interval */
978 			}
979 		}
980 		radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
981 		                  pool->bo, dst_buffer->bo, firstQuery * pool->stride,
982 		                  dst_buffer->offset + dstOffset,
983 		                  get_max_db(cmd_buffer->device) * 16, stride,
984 		                  queryCount, flags, 0, 0);
985 		break;
986 	case VK_QUERY_TYPE_PIPELINE_STATISTICS:
987 		if (flags & VK_QUERY_RESULT_WAIT_BIT) {
988 			for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
989 				unsigned query = firstQuery + i;
990 
991 				radeon_check_space(cmd_buffer->device->ws, cs, 7);
992 
993 				uint64_t avail_va = va + pool->availability_offset + 4 * query;
994 
995 				/* This waits on the ME. All copies below are done on the ME */
996 				si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff);
997 			}
998 		}
999 		radv_query_shader(cmd_buffer, cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1000 		                  pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1001 		                  dst_buffer->offset + dstOffset,
1002 		                  pipelinestat_block_size * 2, stride, queryCount, flags,
1003 		                  pool->pipeline_stats_mask,
1004 		                  pool->availability_offset + 4 * firstQuery);
1005 		break;
1006 	case VK_QUERY_TYPE_TIMESTAMP:
1007 		for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1008 			unsigned query = firstQuery + i;
1009 			uint64_t local_src_va = va  + query * pool->stride;
1010 
1011 			MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
1012 
1013 
1014 			if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1015 				/* TODO, not sure if there is any case where we won't always be ready yet */
1016 				uint64_t avail_va = va + pool->availability_offset + 4 * query;
1017 
1018 				/* This waits on the ME. All copies below are done on the ME */
1019 				si_emit_wait_fence(cs, false, avail_va, 1, 0xffffffff);
1020 			}
1021 			if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1022 				uint64_t avail_va = va + pool->availability_offset + 4 * query;
1023 				uint64_t avail_dest_va = dest_va + elem_size;
1024 
1025 				radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1026 				radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1027 						COPY_DATA_DST_SEL(COPY_DATA_MEM));
1028 				radeon_emit(cs, avail_va);
1029 				radeon_emit(cs, avail_va >> 32);
1030 				radeon_emit(cs, avail_dest_va);
1031 				radeon_emit(cs, avail_dest_va >> 32);
1032 			}
1033 
1034 			radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1035 			radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1036 					COPY_DATA_DST_SEL(COPY_DATA_MEM) |
1037 					((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
1038 			radeon_emit(cs, local_src_va);
1039 			radeon_emit(cs, local_src_va >> 32);
1040 			radeon_emit(cs, dest_va);
1041 			radeon_emit(cs, dest_va >> 32);
1042 
1043 
1044 			assert(cs->cdw <= cdw_max);
1045 		}
1046 		break;
1047 	default:
1048 		unreachable("trying to get results of unhandled query type");
1049 	}
1050 
1051 }
1052 
radv_CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)1053 void radv_CmdResetQueryPool(
1054 	VkCommandBuffer                             commandBuffer,
1055 	VkQueryPool                                 queryPool,
1056 	uint32_t                                    firstQuery,
1057 	uint32_t                                    queryCount)
1058 {
1059 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1060 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1061 	uint32_t flush_bits = 0;
1062 
1063 	flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1064 				       firstQuery * pool->stride,
1065 				       queryCount * pool->stride, 0);
1066 
1067 	if (pool->type == VK_QUERY_TYPE_TIMESTAMP ||
1068 	    pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1069 		flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
1070 					       pool->availability_offset + firstQuery * 4,
1071 					       queryCount * 4, 0);
1072 	}
1073 
1074 	if (flush_bits) {
1075 		/* Only need to flush caches for the compute shader path. */
1076 		cmd_buffer->pending_reset_query = true;
1077 		cmd_buffer->state.flush_bits |= flush_bits;
1078 	}
1079 }
1080 
radv_CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags)1081 void radv_CmdBeginQuery(
1082     VkCommandBuffer                             commandBuffer,
1083     VkQueryPool                                 queryPool,
1084     uint32_t                                    query,
1085     VkQueryControlFlags                         flags)
1086 {
1087 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1088 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1089 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
1090 	uint64_t va = radv_buffer_get_va(pool->bo);
1091 	va += pool->stride * query;
1092 
1093 	radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8);
1094 
1095 	if (cmd_buffer->pending_reset_query) {
1096 		/* Make sure to flush caches if the query pool has been
1097 		 * previously resetted using the compute shader path.
1098 		 */
1099 		si_emit_cache_flush(cmd_buffer);
1100 		cmd_buffer->pending_reset_query = false;
1101 	}
1102 
1103 	switch (pool->type) {
1104 	case VK_QUERY_TYPE_OCCLUSION:
1105 		radeon_check_space(cmd_buffer->device->ws, cs, 7);
1106 
1107 		++cmd_buffer->state.active_occlusion_queries;
1108 		if (cmd_buffer->state.active_occlusion_queries == 1)
1109 			radv_set_db_count_control(cmd_buffer);
1110 
1111 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1112 		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1113 		radeon_emit(cs, va);
1114 		radeon_emit(cs, va >> 32);
1115 		break;
1116 	case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1117 		radeon_check_space(cmd_buffer->device->ws, cs, 4);
1118 
1119 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1120 		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1121 		radeon_emit(cs, va);
1122 		radeon_emit(cs, va >> 32);
1123 		break;
1124 	default:
1125 		unreachable("beginning unhandled query type");
1126 	}
1127 }
1128 
1129 
radv_CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query)1130 void radv_CmdEndQuery(
1131     VkCommandBuffer                             commandBuffer,
1132     VkQueryPool                                 queryPool,
1133     uint32_t                                    query)
1134 {
1135 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1136 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1137 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
1138 	uint64_t va = radv_buffer_get_va(pool->bo);
1139 	uint64_t avail_va = va + pool->availability_offset + 4 * query;
1140 	va += pool->stride * query;
1141 
1142 	/* Do not need to add the pool BO to the list because the query must
1143 	 * currently be active, which means the BO is already in the list.
1144 	 */
1145 
1146 	switch (pool->type) {
1147 	case VK_QUERY_TYPE_OCCLUSION:
1148 		radeon_check_space(cmd_buffer->device->ws, cs, 14);
1149 
1150 		cmd_buffer->state.active_occlusion_queries--;
1151 		if (cmd_buffer->state.active_occlusion_queries == 0)
1152 			radv_set_db_count_control(cmd_buffer);
1153 
1154 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1155 		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1156 		radeon_emit(cs, va + 8);
1157 		radeon_emit(cs, (va + 8) >> 32);
1158 
1159 		break;
1160 	case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1161 		radeon_check_space(cmd_buffer->device->ws, cs, 16);
1162 
1163 		va += pipelinestat_block_size;
1164 
1165 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1166 		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1167 		radeon_emit(cs, va);
1168 		radeon_emit(cs, va >> 32);
1169 
1170 		si_cs_emit_write_event_eop(cs,
1171 					   false,
1172 					   cmd_buffer->device->physical_device->rad_info.chip_class,
1173 					   radv_cmd_buffer_uses_mec(cmd_buffer),
1174 					   V_028A90_BOTTOM_OF_PIPE_TS, 0,
1175 					   1, avail_va, 0, 1);
1176 		break;
1177 	default:
1178 		unreachable("ending unhandled query type");
1179 	}
1180 }
1181 
radv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)1182 void radv_CmdWriteTimestamp(
1183     VkCommandBuffer                             commandBuffer,
1184     VkPipelineStageFlagBits                     pipelineStage,
1185     VkQueryPool                                 queryPool,
1186     uint32_t                                    query)
1187 {
1188 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1189 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1190 	bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1191 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
1192 	uint64_t va = radv_buffer_get_va(pool->bo);
1193 	uint64_t avail_va = va + pool->availability_offset + 4 * query;
1194 	uint64_t query_va = va + pool->stride * query;
1195 
1196 	radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 5);
1197 
1198 	MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28);
1199 
1200 	switch(pipelineStage) {
1201 	case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1202 		radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1203 		radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1204 		                COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
1205 		                COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
1206 		radeon_emit(cs, 0);
1207 		radeon_emit(cs, 0);
1208 		radeon_emit(cs, query_va);
1209 		radeon_emit(cs, query_va >> 32);
1210 
1211 		radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1212 		radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1213 		                S_370_WR_CONFIRM(1) |
1214 		                S_370_ENGINE_SEL(V_370_ME));
1215 		radeon_emit(cs, avail_va);
1216 		radeon_emit(cs, avail_va >> 32);
1217 		radeon_emit(cs, 1);
1218 		break;
1219 	default:
1220 		si_cs_emit_write_event_eop(cs,
1221 					   false,
1222 					   cmd_buffer->device->physical_device->rad_info.chip_class,
1223 					   mec,
1224 					   V_028A90_BOTTOM_OF_PIPE_TS, 0,
1225 					   3, query_va, 0, 0);
1226 		si_cs_emit_write_event_eop(cs,
1227 					   false,
1228 					   cmd_buffer->device->physical_device->rad_info.chip_class,
1229 					   mec,
1230 					   V_028A90_BOTTOM_OF_PIPE_TS, 0,
1231 					   1, avail_va, 0, 1);
1232 		break;
1233 	}
1234 
1235 	assert(cmd_buffer->cs->cdw <= cdw_max);
1236 }
1237