• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyrigh 2016 Red Hat Inc.
3  * Based on anv:
4  * Copyright © 2015 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  */
25 
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31 
32 #include "radv_private.h"
33 #include "radv_cs.h"
34 #include "sid.h"
35 
get_max_db(struct radv_device * device)36 static unsigned get_max_db(struct radv_device *device)
37 {
38 	unsigned num_db = device->physical_device->rad_info.num_render_backends;
39 	MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
40 
41 	if (device->physical_device->rad_info.chip_class == SI)
42 		num_db = 8;
43 	else
44 		num_db = MAX2(8, num_db);
45 
46 	/* Otherwise we need to change the query reset procedure */
47 	assert(rb_mask == ((1ull << num_db) - 1));
48 
49 	return num_db;
50 }
51 
radv_CreateQueryPool(VkDevice _device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)52 VkResult radv_CreateQueryPool(
53 	VkDevice                                    _device,
54 	const VkQueryPoolCreateInfo*                pCreateInfo,
55 	const VkAllocationCallbacks*                pAllocator,
56 	VkQueryPool*                                pQueryPool)
57 {
58 	RADV_FROM_HANDLE(radv_device, device, _device);
59 	uint64_t size;
60 	struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
61 					       sizeof(*pool), 8,
62 					       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
63 
64 	if (!pool)
65 		return VK_ERROR_OUT_OF_HOST_MEMORY;
66 
67 
68 	switch(pCreateInfo->queryType) {
69 	case VK_QUERY_TYPE_OCCLUSION:
70 		/* 16 bytes tmp. buffer as the compute packet writes 64 bits, but
71 		 * the app. may have 32 bits of space. */
72 		pool->stride = 16 * get_max_db(device) + 16;
73 		break;
74 	case VK_QUERY_TYPE_PIPELINE_STATISTICS:
75 		pool->stride = 16 * 11;
76 		break;
77 	case VK_QUERY_TYPE_TIMESTAMP:
78 		pool->stride = 8;
79 		break;
80 	default:
81 		unreachable("creating unhandled query type");
82 	}
83 
84 	pool->type = pCreateInfo->queryType;
85 	pool->availability_offset = pool->stride * pCreateInfo->queryCount;
86 	size = pool->availability_offset + 4 * pCreateInfo->queryCount;
87 
88 	pool->bo = device->ws->buffer_create(device->ws, size,
89 					     64, RADEON_DOMAIN_GTT, 0);
90 
91 	if (!pool->bo) {
92 		vk_free2(&device->alloc, pAllocator, pool);
93 		return VK_ERROR_OUT_OF_DEVICE_MEMORY;
94 	}
95 
96 	pool->ptr = device->ws->buffer_map(pool->bo);
97 
98 	if (!pool->ptr) {
99 		device->ws->buffer_destroy(pool->bo);
100 		vk_free2(&device->alloc, pAllocator, pool);
101 		return VK_ERROR_OUT_OF_DEVICE_MEMORY;
102 	}
103 	memset(pool->ptr, 0, size);
104 
105 	*pQueryPool = radv_query_pool_to_handle(pool);
106 	return VK_SUCCESS;
107 }
108 
radv_DestroyQueryPool(VkDevice _device,VkQueryPool _pool,const VkAllocationCallbacks * pAllocator)109 void radv_DestroyQueryPool(
110 	VkDevice                                    _device,
111 	VkQueryPool                                 _pool,
112 	const VkAllocationCallbacks*                pAllocator)
113 {
114 	RADV_FROM_HANDLE(radv_device, device, _device);
115 	RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
116 
117 	if (!pool)
118 		return;
119 
120 	device->ws->buffer_destroy(pool->bo);
121 	vk_free2(&device->alloc, pAllocator, pool);
122 }
123 
radv_GetQueryPoolResults(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)124 VkResult radv_GetQueryPoolResults(
125 	VkDevice                                    _device,
126 	VkQueryPool                                 queryPool,
127 	uint32_t                                    firstQuery,
128 	uint32_t                                    queryCount,
129 	size_t                                      dataSize,
130 	void*                                       pData,
131 	VkDeviceSize                                stride,
132 	VkQueryResultFlags                          flags)
133 {
134 	RADV_FROM_HANDLE(radv_device, device, _device);
135 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
136 	char *data = pData;
137 	VkResult result = VK_SUCCESS;
138 
139 	for(unsigned i = 0; i < queryCount; ++i, data += stride) {
140 		char *dest = data;
141 		unsigned query = firstQuery + i;
142 		char *src = pool->ptr + query * pool->stride;
143 		uint32_t available;
144 
145 		switch (pool->type) {
146 		case VK_QUERY_TYPE_TIMESTAMP: {
147 			if (flags & VK_QUERY_RESULT_WAIT_BIT) {
148 				while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
149 					;
150 			}
151 
152 			available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
153 			if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
154 				result = VK_NOT_READY;
155 				break;
156 
157 			}
158 
159 			if (flags & VK_QUERY_RESULT_64_BIT) {
160 				*(uint64_t*)dest = *(uint64_t*)src;
161 				dest += 8;
162 			} else {
163 				*(uint32_t*)dest = *(uint32_t*)src;
164 				dest += 4;
165 			}
166 			break;
167 		}
168 		case VK_QUERY_TYPE_OCCLUSION: {
169 			volatile uint64_t const *src64 = (volatile uint64_t const *)src;
170 			uint64_t result = 0;
171 			int db_count = get_max_db(device);
172 			available = 1;
173 
174 			for (int i = 0; i < db_count; ++i) {
175 				uint64_t start, end;
176 				do {
177 					start = src64[2 * i];
178 					end = src64[2 * i + 1];
179 				} while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
180 
181 				if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
182 					available = 0;
183 				else {
184 					result += end - start;
185 				}
186 			}
187 
188 			if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
189 				result = VK_NOT_READY;
190 				break;
191 
192 			}
193 
194 			if (flags & VK_QUERY_RESULT_64_BIT) {
195 				*(uint64_t*)dest = result;
196 				dest += 8;
197 			} else {
198 				*(uint32_t*)dest = result;
199 				dest += 4;
200 			}
201 			break;
202 		default:
203 			unreachable("trying to get results of unhandled query type");
204 		}
205 		}
206 
207 		if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
208 			if (flags & VK_QUERY_RESULT_64_BIT) {
209 				*(uint64_t*)dest = available;
210 			} else {
211 				*(uint32_t*)dest = available;
212 			}
213 		}
214 	}
215 
216 	return result;
217 }
218 
radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)219 void radv_CmdCopyQueryPoolResults(
220     VkCommandBuffer                             commandBuffer,
221     VkQueryPool                                 queryPool,
222     uint32_t                                    firstQuery,
223     uint32_t                                    queryCount,
224     VkBuffer                                    dstBuffer,
225     VkDeviceSize                                dstOffset,
226     VkDeviceSize                                stride,
227     VkQueryResultFlags                          flags)
228 {
229 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
230 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
231 	RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
232 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
233 	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
234 	uint64_t dest_va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
235 	dest_va += dst_buffer->offset + dstOffset;
236 
237 	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
238 	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
239 
240 	for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
241 		unsigned query = firstQuery + i;
242 		uint64_t local_src_va = va  + query * pool->stride;
243 		unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
244 
245 		MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 26);
246 
247 		if (flags & VK_QUERY_RESULT_WAIT_BIT) {
248 			/* TODO, not sure if there is any case where we won't always be ready yet */
249 			uint64_t avail_va = va + pool->availability_offset + 4 * query;
250 
251 
252 			/* This waits on the ME. All copies below are done on the ME */
253 			radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
254 			radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
255 			radeon_emit(cs, avail_va);
256 			radeon_emit(cs, avail_va >> 32);
257 			radeon_emit(cs, 1); /* reference value */
258 			radeon_emit(cs, 0xffffffff); /* mask */
259 			radeon_emit(cs, 4); /* poll interval */
260 		}
261 
262 		switch (pool->type) {
263 		case VK_QUERY_TYPE_OCCLUSION:
264 			local_src_va += pool->stride - 16;
265 
266 		case VK_QUERY_TYPE_TIMESTAMP:
267 			radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
268 			radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
269 					COPY_DATA_DST_SEL(COPY_DATA_MEM) |
270 					((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
271 			radeon_emit(cs, local_src_va);
272 			radeon_emit(cs, local_src_va >> 32);
273 			radeon_emit(cs, dest_va);
274 			radeon_emit(cs, dest_va >> 32);
275 			break;
276 		default:
277 			unreachable("trying to get results of unhandled query type");
278 		}
279 
280 		/* The flag could be still changed while the data copy is busy and we
281 		 * then might have invalid data, but a ready flag. However, the availability
282 		 * writes happen on the ME too, so they should be synchronized. Might need to
283 		 * revisit this with multiple queues.
284 		 */
285 		if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
286 			uint64_t avail_va = va + pool->availability_offset + 4 * query;
287 			uint64_t avail_dest_va = dest_va;
288 			if (pool->type != VK_QUERY_TYPE_PIPELINE_STATISTICS)
289 				avail_dest_va += elem_size;
290 			else
291 				abort();
292 
293 			radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
294 			radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
295 					COPY_DATA_DST_SEL(COPY_DATA_MEM));
296 			radeon_emit(cs, avail_va);
297 			radeon_emit(cs, avail_va >> 32);
298 			radeon_emit(cs, avail_dest_va);
299 			radeon_emit(cs, avail_dest_va >> 32);
300 		}
301 
302 		assert(cs->cdw <= cdw_max);
303 	}
304 
305 }
306 
radv_CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)307 void radv_CmdResetQueryPool(
308 	VkCommandBuffer                             commandBuffer,
309 	VkQueryPool                                 queryPool,
310 	uint32_t                                    firstQuery,
311 	uint32_t                                    queryCount)
312 {
313 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
314 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
315 	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
316 
317 	cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
318 
319 	si_cp_dma_clear_buffer(cmd_buffer, va + firstQuery * pool->stride,
320 			       queryCount * pool->stride, 0);
321 	si_cp_dma_clear_buffer(cmd_buffer, va + pool->availability_offset + firstQuery * 4,
322 			       queryCount * 4, 0);
323 }
324 
radv_CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags)325 void radv_CmdBeginQuery(
326     VkCommandBuffer                             commandBuffer,
327     VkQueryPool                                 queryPool,
328     uint32_t                                    query,
329     VkQueryControlFlags                         flags)
330 {
331 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
332 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
333 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
334 	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
335 	va += pool->stride * query;
336 
337 	cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
338 
339 	switch (pool->type) {
340 	case VK_QUERY_TYPE_OCCLUSION:
341 		radeon_check_space(cmd_buffer->device->ws, cs, 7);
342 
343 		++cmd_buffer->state.active_occlusion_queries;
344 		if (cmd_buffer->state.active_occlusion_queries == 1)
345 			radv_set_db_count_control(cmd_buffer);
346 
347 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
348 		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
349 		radeon_emit(cs, va);
350 		radeon_emit(cs, va >> 32);
351 		break;
352 	default:
353 		unreachable("beginning unhandled query type");
354 	}
355 }
356 
357 
radv_CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query)358 void radv_CmdEndQuery(
359     VkCommandBuffer                             commandBuffer,
360     VkQueryPool                                 queryPool,
361     uint32_t                                    query)
362 {
363 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
364 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
365 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
366 	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
367 	uint64_t avail_va = va + pool->availability_offset + 4 * query;
368 	va += pool->stride * query;
369 
370 	cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
371 
372 	switch (pool->type) {
373 	case VK_QUERY_TYPE_OCCLUSION:
374 		radeon_check_space(cmd_buffer->device->ws, cs, 14);
375 
376 		cmd_buffer->state.active_occlusion_queries--;
377 		if (cmd_buffer->state.active_occlusion_queries == 0)
378 			radv_set_db_count_control(cmd_buffer);
379 
380 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
381 		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
382 		radeon_emit(cs, va + 8);
383 		radeon_emit(cs, (va + 8) >> 32);
384 
385 		/* hangs for VK_COMMAND_BUFFER_LEVEL_SECONDARY. */
386 		if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
387 			radeon_emit(cs, PKT3(PKT3_OCCLUSION_QUERY, 3, 0));
388 			radeon_emit(cs, va);
389 			radeon_emit(cs, va >> 32);
390 			radeon_emit(cs, va + pool->stride - 16);
391 			radeon_emit(cs, (va + pool->stride - 16) >> 32);
392 		}
393 
394 		break;
395 	default:
396 		unreachable("ending unhandled query type");
397 	}
398 
399 	radeon_check_space(cmd_buffer->device->ws, cs, 5);
400 
401 	radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
402 	radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
403 		    S_370_WR_CONFIRM(1) |
404 		    S_370_ENGINE_SEL(V_370_ME));
405 	radeon_emit(cs, avail_va);
406 	radeon_emit(cs, avail_va >> 32);
407 	radeon_emit(cs, 1);
408 }
409 
radv_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)410 void radv_CmdWriteTimestamp(
411     VkCommandBuffer                             commandBuffer,
412     VkPipelineStageFlagBits                     pipelineStage,
413     VkQueryPool                                 queryPool,
414     uint32_t                                    query)
415 {
416 	RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
417 	RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
418 	bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
419 	struct radeon_winsys_cs *cs = cmd_buffer->cs;
420 	uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
421 	uint64_t avail_va = va + pool->availability_offset + 4 * query;
422 	uint64_t query_va = va + pool->stride * query;
423 
424 	cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
425 
426 	MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
427 
428 	if (mec) {
429 		radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, 0));
430 		radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
431 		radeon_emit(cs, 3 << 29);
432 		radeon_emit(cs, query_va);
433 		radeon_emit(cs, query_va >> 32);
434 		radeon_emit(cs, 0);
435 		radeon_emit(cs, 0);
436 	} else {
437 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
438 		radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
439 		radeon_emit(cs, query_va);
440 		radeon_emit(cs, (3 << 29) | ((query_va >> 32) & 0xFFFF));
441 		radeon_emit(cs, 0);
442 		radeon_emit(cs, 0);
443 	}
444 
445 	radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
446 	radeon_emit(cs, S_370_DST_SEL(mec ? V_370_MEM_ASYNC : V_370_MEMORY_SYNC) |
447 		    S_370_WR_CONFIRM(1) |
448 		    S_370_ENGINE_SEL(V_370_ME));
449 	radeon_emit(cs, avail_va);
450 	radeon_emit(cs, avail_va >> 32);
451 	radeon_emit(cs, 1);
452 
453 	assert(cmd_buffer->cs->cdw <= cdw_max);
454 }
455