1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_pipe.h"
27 #include "r600_cs.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include "util/os_time.h"
31 #include "tgsi/tgsi_text.h"
32
33 #define R600_MAX_STREAMS 4
34
35 struct r600_hw_query_params {
36 unsigned start_offset;
37 unsigned end_offset;
38 unsigned fence_offset;
39 unsigned pair_stride;
40 unsigned pair_count;
41 };
42
43 /* Queries without buffer handling or suspend/resume. */
44 struct r600_query_sw {
45 struct r600_query b;
46
47 uint64_t begin_result;
48 uint64_t end_result;
49
50 uint64_t begin_time;
51 uint64_t end_time;
52
53 /* Fence for GPU_FINISHED. */
54 struct pipe_fence_handle *fence;
55 };
56
r600_query_sw_destroy(struct r600_common_screen * rscreen,struct r600_query * rquery)57 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
58 struct r600_query *rquery)
59 {
60 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
61
62 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
63 FREE(query);
64 }
65
winsys_id_from_type(unsigned type)66 static enum radeon_value_id winsys_id_from_type(unsigned type)
67 {
68 switch (type) {
69 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
70 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
71 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
72 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
73 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
74 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
75 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
76 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
77 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
78 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
79 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
80 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
81 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
82 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
83 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
84 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
85 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
86 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
87 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
88 default: unreachable("query type does not correspond to winsys id");
89 }
90 }
91
r600_query_sw_begin(struct r600_common_context * rctx,struct r600_query * rquery)92 static bool r600_query_sw_begin(struct r600_common_context *rctx,
93 struct r600_query *rquery)
94 {
95 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
96 enum radeon_value_id ws_id;
97
98 switch(query->b.type) {
99 case PIPE_QUERY_TIMESTAMP_DISJOINT:
100 case PIPE_QUERY_GPU_FINISHED:
101 break;
102 case R600_QUERY_DRAW_CALLS:
103 query->begin_result = rctx->num_draw_calls;
104 break;
105 case R600_QUERY_DECOMPRESS_CALLS:
106 query->begin_result = rctx->num_decompress_calls;
107 break;
108 case R600_QUERY_MRT_DRAW_CALLS:
109 query->begin_result = rctx->num_mrt_draw_calls;
110 break;
111 case R600_QUERY_PRIM_RESTART_CALLS:
112 query->begin_result = rctx->num_prim_restart_calls;
113 break;
114 case R600_QUERY_SPILL_DRAW_CALLS:
115 query->begin_result = rctx->num_spill_draw_calls;
116 break;
117 case R600_QUERY_COMPUTE_CALLS:
118 query->begin_result = rctx->num_compute_calls;
119 break;
120 case R600_QUERY_SPILL_COMPUTE_CALLS:
121 query->begin_result = rctx->num_spill_compute_calls;
122 break;
123 case R600_QUERY_DMA_CALLS:
124 query->begin_result = rctx->num_dma_calls;
125 break;
126 case R600_QUERY_CP_DMA_CALLS:
127 query->begin_result = rctx->num_cp_dma_calls;
128 break;
129 case R600_QUERY_NUM_VS_FLUSHES:
130 query->begin_result = rctx->num_vs_flushes;
131 break;
132 case R600_QUERY_NUM_PS_FLUSHES:
133 query->begin_result = rctx->num_ps_flushes;
134 break;
135 case R600_QUERY_NUM_CS_FLUSHES:
136 query->begin_result = rctx->num_cs_flushes;
137 break;
138 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
139 query->begin_result = rctx->num_cb_cache_flushes;
140 break;
141 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
142 query->begin_result = rctx->num_db_cache_flushes;
143 break;
144 case R600_QUERY_NUM_RESIDENT_HANDLES:
145 query->begin_result = rctx->num_resident_handles;
146 break;
147 case R600_QUERY_TC_OFFLOADED_SLOTS:
148 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
149 break;
150 case R600_QUERY_TC_DIRECT_SLOTS:
151 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
152 break;
153 case R600_QUERY_TC_NUM_SYNCS:
154 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
155 break;
156 case R600_QUERY_REQUESTED_VRAM:
157 case R600_QUERY_REQUESTED_GTT:
158 case R600_QUERY_MAPPED_VRAM:
159 case R600_QUERY_MAPPED_GTT:
160 case R600_QUERY_VRAM_USAGE:
161 case R600_QUERY_VRAM_VIS_USAGE:
162 case R600_QUERY_GTT_USAGE:
163 case R600_QUERY_GPU_TEMPERATURE:
164 case R600_QUERY_CURRENT_GPU_SCLK:
165 case R600_QUERY_CURRENT_GPU_MCLK:
166 case R600_QUERY_NUM_MAPPED_BUFFERS:
167 query->begin_result = 0;
168 break;
169 case R600_QUERY_BUFFER_WAIT_TIME:
170 case R600_QUERY_NUM_GFX_IBS:
171 case R600_QUERY_NUM_SDMA_IBS:
172 case R600_QUERY_NUM_BYTES_MOVED:
173 case R600_QUERY_NUM_EVICTIONS:
174 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
175 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
176 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
177 break;
178 }
179 case R600_QUERY_GFX_BO_LIST_SIZE:
180 ws_id = winsys_id_from_type(query->b.type);
181 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
182 query->begin_time = rctx->ws->query_value(rctx->ws,
183 RADEON_NUM_GFX_IBS);
184 break;
185 case R600_QUERY_CS_THREAD_BUSY:
186 ws_id = winsys_id_from_type(query->b.type);
187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188 query->begin_time = os_time_get_nano();
189 break;
190 case R600_QUERY_GALLIUM_THREAD_BUSY:
191 query->begin_result =
192 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
193 query->begin_time = os_time_get_nano();
194 break;
195 case R600_QUERY_GPU_LOAD:
196 case R600_QUERY_GPU_SHADERS_BUSY:
197 case R600_QUERY_GPU_TA_BUSY:
198 case R600_QUERY_GPU_GDS_BUSY:
199 case R600_QUERY_GPU_VGT_BUSY:
200 case R600_QUERY_GPU_IA_BUSY:
201 case R600_QUERY_GPU_SX_BUSY:
202 case R600_QUERY_GPU_WD_BUSY:
203 case R600_QUERY_GPU_BCI_BUSY:
204 case R600_QUERY_GPU_SC_BUSY:
205 case R600_QUERY_GPU_PA_BUSY:
206 case R600_QUERY_GPU_DB_BUSY:
207 case R600_QUERY_GPU_CP_BUSY:
208 case R600_QUERY_GPU_CB_BUSY:
209 case R600_QUERY_GPU_SDMA_BUSY:
210 case R600_QUERY_GPU_PFP_BUSY:
211 case R600_QUERY_GPU_MEQ_BUSY:
212 case R600_QUERY_GPU_ME_BUSY:
213 case R600_QUERY_GPU_SURF_SYNC_BUSY:
214 case R600_QUERY_GPU_CP_DMA_BUSY:
215 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
216 query->begin_result = r600_begin_counter(rctx->screen,
217 query->b.type);
218 break;
219 case R600_QUERY_NUM_COMPILATIONS:
220 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
221 break;
222 case R600_QUERY_NUM_SHADERS_CREATED:
223 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
224 break;
225 case R600_QUERY_NUM_SHADER_CACHE_HITS:
226 query->begin_result =
227 p_atomic_read(&rctx->screen->num_shader_cache_hits);
228 break;
229 case R600_QUERY_GPIN_ASIC_ID:
230 case R600_QUERY_GPIN_NUM_SIMD:
231 case R600_QUERY_GPIN_NUM_RB:
232 case R600_QUERY_GPIN_NUM_SPI:
233 case R600_QUERY_GPIN_NUM_SE:
234 break;
235 default:
236 unreachable("r600_query_sw_begin: bad query type");
237 }
238
239 return true;
240 }
241
r600_query_sw_end(struct r600_common_context * rctx,struct r600_query * rquery)242 static bool r600_query_sw_end(struct r600_common_context *rctx,
243 struct r600_query *rquery)
244 {
245 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
246 enum radeon_value_id ws_id;
247
248 switch(query->b.type) {
249 case PIPE_QUERY_TIMESTAMP_DISJOINT:
250 break;
251 case PIPE_QUERY_GPU_FINISHED:
252 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
253 break;
254 case R600_QUERY_DRAW_CALLS:
255 query->end_result = rctx->num_draw_calls;
256 break;
257 case R600_QUERY_DECOMPRESS_CALLS:
258 query->end_result = rctx->num_decompress_calls;
259 break;
260 case R600_QUERY_MRT_DRAW_CALLS:
261 query->end_result = rctx->num_mrt_draw_calls;
262 break;
263 case R600_QUERY_PRIM_RESTART_CALLS:
264 query->end_result = rctx->num_prim_restart_calls;
265 break;
266 case R600_QUERY_SPILL_DRAW_CALLS:
267 query->end_result = rctx->num_spill_draw_calls;
268 break;
269 case R600_QUERY_COMPUTE_CALLS:
270 query->end_result = rctx->num_compute_calls;
271 break;
272 case R600_QUERY_SPILL_COMPUTE_CALLS:
273 query->end_result = rctx->num_spill_compute_calls;
274 break;
275 case R600_QUERY_DMA_CALLS:
276 query->end_result = rctx->num_dma_calls;
277 break;
278 case R600_QUERY_CP_DMA_CALLS:
279 query->end_result = rctx->num_cp_dma_calls;
280 break;
281 case R600_QUERY_NUM_VS_FLUSHES:
282 query->end_result = rctx->num_vs_flushes;
283 break;
284 case R600_QUERY_NUM_PS_FLUSHES:
285 query->end_result = rctx->num_ps_flushes;
286 break;
287 case R600_QUERY_NUM_CS_FLUSHES:
288 query->end_result = rctx->num_cs_flushes;
289 break;
290 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
291 query->end_result = rctx->num_cb_cache_flushes;
292 break;
293 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
294 query->end_result = rctx->num_db_cache_flushes;
295 break;
296 case R600_QUERY_NUM_RESIDENT_HANDLES:
297 query->end_result = rctx->num_resident_handles;
298 break;
299 case R600_QUERY_TC_OFFLOADED_SLOTS:
300 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
301 break;
302 case R600_QUERY_TC_DIRECT_SLOTS:
303 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
304 break;
305 case R600_QUERY_TC_NUM_SYNCS:
306 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
307 break;
308 case R600_QUERY_REQUESTED_VRAM:
309 case R600_QUERY_REQUESTED_GTT:
310 case R600_QUERY_MAPPED_VRAM:
311 case R600_QUERY_MAPPED_GTT:
312 case R600_QUERY_VRAM_USAGE:
313 case R600_QUERY_VRAM_VIS_USAGE:
314 case R600_QUERY_GTT_USAGE:
315 case R600_QUERY_GPU_TEMPERATURE:
316 case R600_QUERY_CURRENT_GPU_SCLK:
317 case R600_QUERY_CURRENT_GPU_MCLK:
318 case R600_QUERY_BUFFER_WAIT_TIME:
319 case R600_QUERY_NUM_MAPPED_BUFFERS:
320 case R600_QUERY_NUM_GFX_IBS:
321 case R600_QUERY_NUM_SDMA_IBS:
322 case R600_QUERY_NUM_BYTES_MOVED:
323 case R600_QUERY_NUM_EVICTIONS:
324 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
325 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
326 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
327 break;
328 }
329 case R600_QUERY_GFX_BO_LIST_SIZE:
330 ws_id = winsys_id_from_type(query->b.type);
331 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
332 query->end_time = rctx->ws->query_value(rctx->ws,
333 RADEON_NUM_GFX_IBS);
334 break;
335 case R600_QUERY_CS_THREAD_BUSY:
336 ws_id = winsys_id_from_type(query->b.type);
337 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
338 query->end_time = os_time_get_nano();
339 break;
340 case R600_QUERY_GALLIUM_THREAD_BUSY:
341 query->end_result =
342 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
343 query->end_time = os_time_get_nano();
344 break;
345 case R600_QUERY_GPU_LOAD:
346 case R600_QUERY_GPU_SHADERS_BUSY:
347 case R600_QUERY_GPU_TA_BUSY:
348 case R600_QUERY_GPU_GDS_BUSY:
349 case R600_QUERY_GPU_VGT_BUSY:
350 case R600_QUERY_GPU_IA_BUSY:
351 case R600_QUERY_GPU_SX_BUSY:
352 case R600_QUERY_GPU_WD_BUSY:
353 case R600_QUERY_GPU_BCI_BUSY:
354 case R600_QUERY_GPU_SC_BUSY:
355 case R600_QUERY_GPU_PA_BUSY:
356 case R600_QUERY_GPU_DB_BUSY:
357 case R600_QUERY_GPU_CP_BUSY:
358 case R600_QUERY_GPU_CB_BUSY:
359 case R600_QUERY_GPU_SDMA_BUSY:
360 case R600_QUERY_GPU_PFP_BUSY:
361 case R600_QUERY_GPU_MEQ_BUSY:
362 case R600_QUERY_GPU_ME_BUSY:
363 case R600_QUERY_GPU_SURF_SYNC_BUSY:
364 case R600_QUERY_GPU_CP_DMA_BUSY:
365 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
366 query->end_result = r600_end_counter(rctx->screen,
367 query->b.type,
368 query->begin_result);
369 query->begin_result = 0;
370 break;
371 case R600_QUERY_NUM_COMPILATIONS:
372 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
373 break;
374 case R600_QUERY_NUM_SHADERS_CREATED:
375 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
376 break;
377 case R600_QUERY_NUM_SHADER_CACHE_HITS:
378 query->end_result =
379 p_atomic_read(&rctx->screen->num_shader_cache_hits);
380 break;
381 case R600_QUERY_GPIN_ASIC_ID:
382 case R600_QUERY_GPIN_NUM_SIMD:
383 case R600_QUERY_GPIN_NUM_RB:
384 case R600_QUERY_GPIN_NUM_SPI:
385 case R600_QUERY_GPIN_NUM_SE:
386 break;
387 default:
388 unreachable("r600_query_sw_end: bad query type");
389 }
390
391 return true;
392 }
393
r600_query_sw_get_result(struct r600_common_context * rctx,struct r600_query * rquery,bool wait,union pipe_query_result * result)394 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
395 struct r600_query *rquery,
396 bool wait,
397 union pipe_query_result *result)
398 {
399 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
400
401 switch (query->b.type) {
402 case PIPE_QUERY_TIMESTAMP_DISJOINT:
403 /* Convert from cycles per millisecond to cycles per second (Hz). */
404 result->timestamp_disjoint.frequency =
405 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
406 result->timestamp_disjoint.disjoint = false;
407 return true;
408 case PIPE_QUERY_GPU_FINISHED: {
409 struct pipe_screen *screen = rctx->b.screen;
410 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
411
412 result->b = screen->fence_finish(screen, ctx, query->fence,
413 wait ? PIPE_TIMEOUT_INFINITE : 0);
414 return result->b;
415 }
416
417 case R600_QUERY_GFX_BO_LIST_SIZE:
418 result->u64 = (query->end_result - query->begin_result) /
419 (query->end_time - query->begin_time);
420 return true;
421 case R600_QUERY_CS_THREAD_BUSY:
422 case R600_QUERY_GALLIUM_THREAD_BUSY:
423 result->u64 = (query->end_result - query->begin_result) * 100 /
424 (query->end_time - query->begin_time);
425 return true;
426 case R600_QUERY_GPIN_ASIC_ID:
427 result->u32 = 0;
428 return true;
429 case R600_QUERY_GPIN_NUM_SIMD:
430 result->u32 = rctx->screen->info.num_good_compute_units;
431 return true;
432 case R600_QUERY_GPIN_NUM_RB:
433 result->u32 = rctx->screen->info.num_render_backends;
434 return true;
435 case R600_QUERY_GPIN_NUM_SPI:
436 result->u32 = 1; /* all supported chips have one SPI per SE */
437 return true;
438 case R600_QUERY_GPIN_NUM_SE:
439 result->u32 = rctx->screen->info.max_se;
440 return true;
441 }
442
443 result->u64 = query->end_result - query->begin_result;
444
445 switch (query->b.type) {
446 case R600_QUERY_BUFFER_WAIT_TIME:
447 case R600_QUERY_GPU_TEMPERATURE:
448 result->u64 /= 1000;
449 break;
450 case R600_QUERY_CURRENT_GPU_SCLK:
451 case R600_QUERY_CURRENT_GPU_MCLK:
452 result->u64 *= 1000000;
453 break;
454 }
455
456 return true;
457 }
458
459
460 static struct r600_query_ops sw_query_ops = {
461 .destroy = r600_query_sw_destroy,
462 .begin = r600_query_sw_begin,
463 .end = r600_query_sw_end,
464 .get_result = r600_query_sw_get_result,
465 .get_result_resource = NULL
466 };
467
r600_query_sw_create(unsigned query_type)468 static struct pipe_query *r600_query_sw_create(unsigned query_type)
469 {
470 struct r600_query_sw *query;
471
472 query = CALLOC_STRUCT(r600_query_sw);
473 if (!query)
474 return NULL;
475
476 query->b.type = query_type;
477 query->b.ops = &sw_query_ops;
478
479 return (struct pipe_query *)query;
480 }
481
r600_query_hw_destroy(struct r600_common_screen * rscreen,struct r600_query * rquery)482 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
483 struct r600_query *rquery)
484 {
485 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
486 struct r600_query_buffer *prev = query->buffer.previous;
487
488 /* Release all query buffers. */
489 while (prev) {
490 struct r600_query_buffer *qbuf = prev;
491 prev = prev->previous;
492 r600_resource_reference(&qbuf->buf, NULL);
493 FREE(qbuf);
494 }
495
496 r600_resource_reference(&query->buffer.buf, NULL);
497 FREE(rquery);
498 }
499
r600_new_query_buffer(struct r600_common_screen * rscreen,struct r600_query_hw * query)500 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
501 struct r600_query_hw *query)
502 {
503 unsigned buf_size = MAX2(query->result_size,
504 rscreen->info.min_alloc_size);
505
506 /* Queries are normally read by the CPU after
507 * being written by the gpu, hence staging is probably a good
508 * usage pattern.
509 */
510 struct r600_resource *buf = (struct r600_resource*)
511 pipe_buffer_create(&rscreen->b, 0,
512 PIPE_USAGE_STAGING, buf_size);
513 if (!buf)
514 return NULL;
515
516 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
517 r600_resource_reference(&buf, NULL);
518 return NULL;
519 }
520
521 return buf;
522 }
523
r600_query_hw_prepare_buffer(struct r600_common_screen * rscreen,struct r600_query_hw * query,struct r600_resource * buffer)524 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
525 struct r600_query_hw *query,
526 struct r600_resource *buffer)
527 {
528 /* Callers ensure that the buffer is currently unused by the GPU. */
529 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
530 PIPE_MAP_WRITE |
531 PIPE_MAP_UNSYNCHRONIZED);
532 if (!results)
533 return false;
534
535 memset(results, 0, buffer->b.b.width0);
536
537 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
538 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
539 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
540 unsigned max_rbs = rscreen->info.num_render_backends;
541 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
542 unsigned num_results;
543 unsigned i, j;
544
545 /* Set top bits for unused backends. */
546 num_results = buffer->b.b.width0 / query->result_size;
547 for (j = 0; j < num_results; j++) {
548 for (i = 0; i < max_rbs; i++) {
549 if (!(enabled_rb_mask & (1<<i))) {
550 results[(i * 4)+1] = 0x80000000;
551 results[(i * 4)+3] = 0x80000000;
552 }
553 }
554 results += 4 * max_rbs;
555 }
556 }
557
558 return true;
559 }
560
561 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
562 struct r600_query *rquery,
563 bool wait,
564 enum pipe_query_value_type result_type,
565 int index,
566 struct pipe_resource *resource,
567 unsigned offset);
568
569 static struct r600_query_ops query_hw_ops = {
570 .destroy = r600_query_hw_destroy,
571 .begin = r600_query_hw_begin,
572 .end = r600_query_hw_end,
573 .get_result = r600_query_hw_get_result,
574 .get_result_resource = r600_query_hw_get_result_resource,
575 };
576
577 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
578 struct r600_query_hw *query,
579 struct r600_resource *buffer,
580 uint64_t va);
581 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
582 struct r600_query_hw *query,
583 struct r600_resource *buffer,
584 uint64_t va);
585 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
586 struct r600_query_hw *, void *buffer,
587 union pipe_query_result *result);
588 static void r600_query_hw_clear_result(struct r600_query_hw *,
589 union pipe_query_result *);
590
591 static struct r600_query_hw_ops query_hw_default_hw_ops = {
592 .prepare_buffer = r600_query_hw_prepare_buffer,
593 .emit_start = r600_query_hw_do_emit_start,
594 .emit_stop = r600_query_hw_do_emit_stop,
595 .clear_result = r600_query_hw_clear_result,
596 .add_result = r600_query_hw_add_result,
597 };
598
r600_query_hw_init(struct r600_common_screen * rscreen,struct r600_query_hw * query)599 bool r600_query_hw_init(struct r600_common_screen *rscreen,
600 struct r600_query_hw *query)
601 {
602 query->buffer.buf = r600_new_query_buffer(rscreen, query);
603 if (!query->buffer.buf)
604 return false;
605
606 return true;
607 }
608
r600_query_hw_create(struct r600_common_screen * rscreen,unsigned query_type,unsigned index)609 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
610 unsigned query_type,
611 unsigned index)
612 {
613 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
614 if (!query)
615 return NULL;
616
617 query->b.type = query_type;
618 query->b.ops = &query_hw_ops;
619 query->ops = &query_hw_default_hw_ops;
620
621 switch (query_type) {
622 case PIPE_QUERY_OCCLUSION_COUNTER:
623 case PIPE_QUERY_OCCLUSION_PREDICATE:
624 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
625 query->result_size = 16 * rscreen->info.num_render_backends;
626 query->result_size += 16; /* for the fence + alignment */
627 query->num_cs_dw_begin = 6;
628 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
629 break;
630 case PIPE_QUERY_TIME_ELAPSED:
631 query->result_size = 24;
632 query->num_cs_dw_begin = 8;
633 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
634 break;
635 case PIPE_QUERY_TIMESTAMP:
636 query->result_size = 16;
637 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
638 query->flags = R600_QUERY_HW_FLAG_NO_START;
639 break;
640 case PIPE_QUERY_PRIMITIVES_EMITTED:
641 case PIPE_QUERY_PRIMITIVES_GENERATED:
642 case PIPE_QUERY_SO_STATISTICS:
643 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
644 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
645 query->result_size = 32;
646 query->num_cs_dw_begin = 6;
647 query->num_cs_dw_end = 6;
648 query->stream = index;
649 break;
650 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
651 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
652 query->result_size = 32 * R600_MAX_STREAMS;
653 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
654 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
655 break;
656 case PIPE_QUERY_PIPELINE_STATISTICS:
657 /* 11 values on EG, 8 on R600. */
658 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
659 query->result_size += 8; /* for the fence + alignment */
660 query->num_cs_dw_begin = 6;
661 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
662 break;
663 default:
664 assert(0);
665 FREE(query);
666 return NULL;
667 }
668
669 if (!r600_query_hw_init(rscreen, query)) {
670 FREE(query);
671 return NULL;
672 }
673
674 return (struct pipe_query *)query;
675 }
676
r600_update_occlusion_query_state(struct r600_common_context * rctx,unsigned type,int diff)677 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
678 unsigned type, int diff)
679 {
680 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
681 type == PIPE_QUERY_OCCLUSION_PREDICATE ||
682 type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
683 bool old_enable = rctx->num_occlusion_queries != 0;
684 bool old_perfect_enable =
685 rctx->num_perfect_occlusion_queries != 0;
686 bool enable, perfect_enable;
687
688 rctx->num_occlusion_queries += diff;
689 assert(rctx->num_occlusion_queries >= 0);
690
691 if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
692 rctx->num_perfect_occlusion_queries += diff;
693 assert(rctx->num_perfect_occlusion_queries >= 0);
694 }
695
696 enable = rctx->num_occlusion_queries != 0;
697 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
698
699 if (enable != old_enable || perfect_enable != old_perfect_enable) {
700 struct r600_context *ctx = (struct r600_context*)rctx;
701 r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
702 }
703 }
704 }
705
event_type_for_stream(unsigned stream)706 static unsigned event_type_for_stream(unsigned stream)
707 {
708 switch (stream) {
709 default:
710 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
711 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
712 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
713 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
714 }
715 }
716
emit_sample_streamout(struct radeon_cmdbuf * cs,uint64_t va,unsigned stream)717 static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
718 unsigned stream)
719 {
720 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
721 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
722 radeon_emit(cs, va);
723 radeon_emit(cs, va >> 32);
724 }
725
r600_query_hw_do_emit_start(struct r600_common_context * ctx,struct r600_query_hw * query,struct r600_resource * buffer,uint64_t va)726 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
727 struct r600_query_hw *query,
728 struct r600_resource *buffer,
729 uint64_t va)
730 {
731 struct radeon_cmdbuf *cs = ctx->gfx.cs;
732
733 switch (query->b.type) {
734 case PIPE_QUERY_OCCLUSION_COUNTER:
735 case PIPE_QUERY_OCCLUSION_PREDICATE:
736 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
737 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
738 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
739 radeon_emit(cs, va);
740 radeon_emit(cs, va >> 32);
741 break;
742 case PIPE_QUERY_PRIMITIVES_EMITTED:
743 case PIPE_QUERY_PRIMITIVES_GENERATED:
744 case PIPE_QUERY_SO_STATISTICS:
745 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
746 emit_sample_streamout(cs, va, query->stream);
747 break;
748 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
749 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
750 emit_sample_streamout(cs, va + 32 * stream, stream);
751 break;
752 case PIPE_QUERY_TIME_ELAPSED:
753 /* Write the timestamp after the last draw is done.
754 * (bottom-of-pipe)
755 */
756 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
757 0, EOP_DATA_SEL_TIMESTAMP,
758 NULL, va, 0, query->b.type);
759 break;
760 case PIPE_QUERY_PIPELINE_STATISTICS:
761 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
762 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
763 radeon_emit(cs, va);
764 radeon_emit(cs, va >> 32);
765 break;
766 default:
767 assert(0);
768 }
769 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
770 RADEON_PRIO_QUERY);
771 }
772
r600_query_hw_emit_start(struct r600_common_context * ctx,struct r600_query_hw * query)773 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
774 struct r600_query_hw *query)
775 {
776 uint64_t va;
777
778 if (!query->buffer.buf)
779 return; // previous buffer allocation failure
780
781 r600_update_occlusion_query_state(ctx, query->b.type, 1);
782 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
783
784 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
785 true);
786
787 /* Get a new query buffer if needed. */
788 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
789 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
790 *qbuf = query->buffer;
791 query->buffer.results_end = 0;
792 query->buffer.previous = qbuf;
793 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
794 if (!query->buffer.buf)
795 return;
796 }
797
798 /* emit begin query */
799 va = query->buffer.buf->gpu_address + query->buffer.results_end;
800
801 query->ops->emit_start(ctx, query, query->buffer.buf, va);
802
803 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
804 }
805
r600_query_hw_do_emit_stop(struct r600_common_context * ctx,struct r600_query_hw * query,struct r600_resource * buffer,uint64_t va)806 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
807 struct r600_query_hw *query,
808 struct r600_resource *buffer,
809 uint64_t va)
810 {
811 struct radeon_cmdbuf *cs = ctx->gfx.cs;
812 uint64_t fence_va = 0;
813
814 switch (query->b.type) {
815 case PIPE_QUERY_OCCLUSION_COUNTER:
816 case PIPE_QUERY_OCCLUSION_PREDICATE:
817 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
818 va += 8;
819 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
820 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
821 radeon_emit(cs, va);
822 radeon_emit(cs, va >> 32);
823
824 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
825 break;
826 case PIPE_QUERY_PRIMITIVES_EMITTED:
827 case PIPE_QUERY_PRIMITIVES_GENERATED:
828 case PIPE_QUERY_SO_STATISTICS:
829 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
830 va += 16;
831 emit_sample_streamout(cs, va, query->stream);
832 break;
833 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
834 va += 16;
835 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
836 emit_sample_streamout(cs, va + 32 * stream, stream);
837 break;
838 case PIPE_QUERY_TIME_ELAPSED:
839 va += 8;
840 /* fall through */
841 case PIPE_QUERY_TIMESTAMP:
842 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
843 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
844 0, query->b.type);
845 fence_va = va + 8;
846 break;
847 case PIPE_QUERY_PIPELINE_STATISTICS: {
848 unsigned sample_size = (query->result_size - 8) / 2;
849
850 va += sample_size;
851 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
852 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
853 radeon_emit(cs, va);
854 radeon_emit(cs, va >> 32);
855
856 fence_va = va + sample_size;
857 break;
858 }
859 default:
860 assert(0);
861 }
862 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
863 RADEON_PRIO_QUERY);
864
865 if (fence_va)
866 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
867 EOP_DATA_SEL_VALUE_32BIT,
868 query->buffer.buf, fence_va, 0x80000000,
869 query->b.type);
870 }
871
r600_query_hw_emit_stop(struct r600_common_context * ctx,struct r600_query_hw * query)872 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
873 struct r600_query_hw *query)
874 {
875 uint64_t va;
876
877 if (!query->buffer.buf)
878 return; // previous buffer allocation failure
879
880 /* The queries which need begin already called this in begin_query. */
881 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
882 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
883 }
884
885 /* emit end query */
886 va = query->buffer.buf->gpu_address + query->buffer.results_end;
887
888 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
889
890 query->buffer.results_end += query->result_size;
891
892 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
893 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
894
895 r600_update_occlusion_query_state(ctx, query->b.type, -1);
896 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
897 }
898
emit_set_predicate(struct r600_common_context * ctx,struct r600_resource * buf,uint64_t va,uint32_t op)899 static void emit_set_predicate(struct r600_common_context *ctx,
900 struct r600_resource *buf, uint64_t va,
901 uint32_t op)
902 {
903 struct radeon_cmdbuf *cs = ctx->gfx.cs;
904
905 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
906 radeon_emit(cs, va);
907 radeon_emit(cs, op | ((va >> 32) & 0xFF));
908 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
909 RADEON_PRIO_QUERY);
910 }
911
r600_emit_query_predication(struct r600_common_context * ctx,struct r600_atom * atom)912 static void r600_emit_query_predication(struct r600_common_context *ctx,
913 struct r600_atom *atom)
914 {
915 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
916 struct r600_query_buffer *qbuf;
917 uint32_t op;
918 bool flag_wait, invert;
919
920 if (!query)
921 return;
922
923 invert = ctx->render_cond_invert;
924 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
925 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
926
927 switch (query->b.type) {
928 case PIPE_QUERY_OCCLUSION_COUNTER:
929 case PIPE_QUERY_OCCLUSION_PREDICATE:
930 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
931 op = PRED_OP(PREDICATION_OP_ZPASS);
932 break;
933 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
934 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
935 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
936 invert = !invert;
937 break;
938 default:
939 assert(0);
940 return;
941 }
942
943 /* if true then invert, see GL_ARB_conditional_render_inverted */
944 if (invert)
945 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
946 else
947 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
948
949 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
950
951 /* emit predicate packets for all data blocks */
952 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
953 unsigned results_base = 0;
954 uint64_t va_base = qbuf->buf->gpu_address;
955
956 while (results_base < qbuf->results_end) {
957 uint64_t va = va_base + results_base;
958
959 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
960 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
961 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
962
963 /* set CONTINUE bit for all packets except the first */
964 op |= PREDICATION_CONTINUE;
965 }
966 } else {
967 emit_set_predicate(ctx, qbuf->buf, va, op);
968 op |= PREDICATION_CONTINUE;
969 }
970
971 results_base += query->result_size;
972 }
973 }
974 }
975
r600_create_query(struct pipe_context * ctx,unsigned query_type,unsigned index)976 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
977 {
978 struct r600_common_screen *rscreen =
979 (struct r600_common_screen *)ctx->screen;
980
981 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
982 query_type == PIPE_QUERY_GPU_FINISHED ||
983 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
984 return r600_query_sw_create(query_type);
985
986 return r600_query_hw_create(rscreen, query_type, index);
987 }
988
r600_destroy_query(struct pipe_context * ctx,struct pipe_query * query)989 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
990 {
991 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
992 struct r600_query *rquery = (struct r600_query *)query;
993
994 rquery->ops->destroy(rctx->screen, rquery);
995 }
996
r600_begin_query(struct pipe_context * ctx,struct pipe_query * query)997 static bool r600_begin_query(struct pipe_context *ctx,
998 struct pipe_query *query)
999 {
1000 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1001 struct r600_query *rquery = (struct r600_query *)query;
1002
1003 return rquery->ops->begin(rctx, rquery);
1004 }
1005
r600_query_hw_reset_buffers(struct r600_common_context * rctx,struct r600_query_hw * query)1006 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
1007 struct r600_query_hw *query)
1008 {
1009 struct r600_query_buffer *prev = query->buffer.previous;
1010
1011 /* Discard the old query buffers. */
1012 while (prev) {
1013 struct r600_query_buffer *qbuf = prev;
1014 prev = prev->previous;
1015 r600_resource_reference(&qbuf->buf, NULL);
1016 FREE(qbuf);
1017 }
1018
1019 query->buffer.results_end = 0;
1020 query->buffer.previous = NULL;
1021
1022 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1023 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1024 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1025 r600_resource_reference(&query->buffer.buf, NULL);
1026 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1027 } else {
1028 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1029 r600_resource_reference(&query->buffer.buf, NULL);
1030 }
1031 }
1032
r600_query_hw_begin(struct r600_common_context * rctx,struct r600_query * rquery)1033 bool r600_query_hw_begin(struct r600_common_context *rctx,
1034 struct r600_query *rquery)
1035 {
1036 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1037
1038 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1039 assert(0);
1040 return false;
1041 }
1042
1043 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1044 r600_query_hw_reset_buffers(rctx, query);
1045
1046 r600_query_hw_emit_start(rctx, query);
1047 if (!query->buffer.buf)
1048 return false;
1049
1050 list_addtail(&query->list, &rctx->active_queries);
1051 return true;
1052 }
1053
r600_end_query(struct pipe_context * ctx,struct pipe_query * query)1054 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1055 {
1056 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1057 struct r600_query *rquery = (struct r600_query *)query;
1058
1059 return rquery->ops->end(rctx, rquery);
1060 }
1061
r600_query_hw_end(struct r600_common_context * rctx,struct r600_query * rquery)1062 bool r600_query_hw_end(struct r600_common_context *rctx,
1063 struct r600_query *rquery)
1064 {
1065 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1066
1067 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1068 r600_query_hw_reset_buffers(rctx, query);
1069
1070 r600_query_hw_emit_stop(rctx, query);
1071
1072 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1073 list_delinit(&query->list);
1074
1075 if (!query->buffer.buf)
1076 return false;
1077
1078 return true;
1079 }
1080
r600_get_hw_query_params(struct r600_common_context * rctx,struct r600_query_hw * rquery,int index,struct r600_hw_query_params * params)1081 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1082 struct r600_query_hw *rquery, int index,
1083 struct r600_hw_query_params *params)
1084 {
1085 unsigned max_rbs = rctx->screen->info.num_render_backends;
1086
1087 params->pair_stride = 0;
1088 params->pair_count = 1;
1089
1090 switch (rquery->b.type) {
1091 case PIPE_QUERY_OCCLUSION_COUNTER:
1092 case PIPE_QUERY_OCCLUSION_PREDICATE:
1093 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1094 params->start_offset = 0;
1095 params->end_offset = 8;
1096 params->fence_offset = max_rbs * 16;
1097 params->pair_stride = 16;
1098 params->pair_count = max_rbs;
1099 break;
1100 case PIPE_QUERY_TIME_ELAPSED:
1101 params->start_offset = 0;
1102 params->end_offset = 8;
1103 params->fence_offset = 16;
1104 break;
1105 case PIPE_QUERY_TIMESTAMP:
1106 params->start_offset = 0;
1107 params->end_offset = 0;
1108 params->fence_offset = 8;
1109 break;
1110 case PIPE_QUERY_PRIMITIVES_EMITTED:
1111 params->start_offset = 8;
1112 params->end_offset = 24;
1113 params->fence_offset = params->end_offset + 4;
1114 break;
1115 case PIPE_QUERY_PRIMITIVES_GENERATED:
1116 params->start_offset = 0;
1117 params->end_offset = 16;
1118 params->fence_offset = params->end_offset + 4;
1119 break;
1120 case PIPE_QUERY_SO_STATISTICS:
1121 params->start_offset = 8 - index * 8;
1122 params->end_offset = 24 - index * 8;
1123 params->fence_offset = params->end_offset + 4;
1124 break;
1125 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1126 params->pair_count = R600_MAX_STREAMS;
1127 params->pair_stride = 32;
1128 /* fallthrough */
1129 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1130 params->start_offset = 0;
1131 params->end_offset = 16;
1132
1133 /* We can re-use the high dword of the last 64-bit value as a
1134 * fence: it is initialized as 0, and the high bit is set by
1135 * the write of the streamout stats event.
1136 */
1137 params->fence_offset = rquery->result_size - 4;
1138 break;
1139 case PIPE_QUERY_PIPELINE_STATISTICS:
1140 {
1141 /* Offsets apply to EG+ */
1142 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1143 params->start_offset = offsets[index];
1144 params->end_offset = 88 + offsets[index];
1145 params->fence_offset = 2 * 88;
1146 break;
1147 }
1148 default:
1149 unreachable("r600_get_hw_query_params unsupported");
1150 }
1151 }
1152
r600_query_read_result(void * map,unsigned start_index,unsigned end_index,bool test_status_bit)1153 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1154 bool test_status_bit)
1155 {
1156 uint32_t *current_result = (uint32_t*)map;
1157 uint64_t start, end;
1158
1159 start = (uint64_t)current_result[start_index] |
1160 (uint64_t)current_result[start_index+1] << 32;
1161 end = (uint64_t)current_result[end_index] |
1162 (uint64_t)current_result[end_index+1] << 32;
1163
1164 if (!test_status_bit ||
1165 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1166 return end - start;
1167 }
1168 return 0;
1169 }
1170
r600_query_hw_add_result(struct r600_common_screen * rscreen,struct r600_query_hw * query,void * buffer,union pipe_query_result * result)1171 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1172 struct r600_query_hw *query,
1173 void *buffer,
1174 union pipe_query_result *result)
1175 {
1176 unsigned max_rbs = rscreen->info.num_render_backends;
1177
1178 switch (query->b.type) {
1179 case PIPE_QUERY_OCCLUSION_COUNTER: {
1180 for (unsigned i = 0; i < max_rbs; ++i) {
1181 unsigned results_base = i * 16;
1182 result->u64 +=
1183 r600_query_read_result(buffer + results_base, 0, 2, true);
1184 }
1185 break;
1186 }
1187 case PIPE_QUERY_OCCLUSION_PREDICATE:
1188 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1189 for (unsigned i = 0; i < max_rbs; ++i) {
1190 unsigned results_base = i * 16;
1191 result->b = result->b ||
1192 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1193 }
1194 break;
1195 }
1196 case PIPE_QUERY_TIME_ELAPSED:
1197 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1198 break;
1199 case PIPE_QUERY_TIMESTAMP:
1200 result->u64 = *(uint64_t*)buffer;
1201 break;
1202 case PIPE_QUERY_PRIMITIVES_EMITTED:
1203 /* SAMPLE_STREAMOUTSTATS stores this structure:
1204 * {
1205 * u64 NumPrimitivesWritten;
1206 * u64 PrimitiveStorageNeeded;
1207 * }
1208 * We only need NumPrimitivesWritten here. */
1209 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1210 break;
1211 case PIPE_QUERY_PRIMITIVES_GENERATED:
1212 /* Here we read PrimitiveStorageNeeded. */
1213 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1214 break;
1215 case PIPE_QUERY_SO_STATISTICS:
1216 result->so_statistics.num_primitives_written +=
1217 r600_query_read_result(buffer, 2, 6, true);
1218 result->so_statistics.primitives_storage_needed +=
1219 r600_query_read_result(buffer, 0, 4, true);
1220 break;
1221 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1222 result->b = result->b ||
1223 r600_query_read_result(buffer, 2, 6, true) !=
1224 r600_query_read_result(buffer, 0, 4, true);
1225 break;
1226 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1227 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1228 result->b = result->b ||
1229 r600_query_read_result(buffer, 2, 6, true) !=
1230 r600_query_read_result(buffer, 0, 4, true);
1231 buffer = (char *)buffer + 32;
1232 }
1233 break;
1234 case PIPE_QUERY_PIPELINE_STATISTICS:
1235 if (rscreen->chip_class >= EVERGREEN) {
1236 result->pipeline_statistics.ps_invocations +=
1237 r600_query_read_result(buffer, 0, 22, false);
1238 result->pipeline_statistics.c_primitives +=
1239 r600_query_read_result(buffer, 2, 24, false);
1240 result->pipeline_statistics.c_invocations +=
1241 r600_query_read_result(buffer, 4, 26, false);
1242 result->pipeline_statistics.vs_invocations +=
1243 r600_query_read_result(buffer, 6, 28, false);
1244 result->pipeline_statistics.gs_invocations +=
1245 r600_query_read_result(buffer, 8, 30, false);
1246 result->pipeline_statistics.gs_primitives +=
1247 r600_query_read_result(buffer, 10, 32, false);
1248 result->pipeline_statistics.ia_primitives +=
1249 r600_query_read_result(buffer, 12, 34, false);
1250 result->pipeline_statistics.ia_vertices +=
1251 r600_query_read_result(buffer, 14, 36, false);
1252 result->pipeline_statistics.hs_invocations +=
1253 r600_query_read_result(buffer, 16, 38, false);
1254 result->pipeline_statistics.ds_invocations +=
1255 r600_query_read_result(buffer, 18, 40, false);
1256 result->pipeline_statistics.cs_invocations +=
1257 r600_query_read_result(buffer, 20, 42, false);
1258 } else {
1259 result->pipeline_statistics.ps_invocations +=
1260 r600_query_read_result(buffer, 0, 16, false);
1261 result->pipeline_statistics.c_primitives +=
1262 r600_query_read_result(buffer, 2, 18, false);
1263 result->pipeline_statistics.c_invocations +=
1264 r600_query_read_result(buffer, 4, 20, false);
1265 result->pipeline_statistics.vs_invocations +=
1266 r600_query_read_result(buffer, 6, 22, false);
1267 result->pipeline_statistics.gs_invocations +=
1268 r600_query_read_result(buffer, 8, 24, false);
1269 result->pipeline_statistics.gs_primitives +=
1270 r600_query_read_result(buffer, 10, 26, false);
1271 result->pipeline_statistics.ia_primitives +=
1272 r600_query_read_result(buffer, 12, 28, false);
1273 result->pipeline_statistics.ia_vertices +=
1274 r600_query_read_result(buffer, 14, 30, false);
1275 }
1276 #if 0 /* for testing */
1277 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1278 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1279 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1280 result->pipeline_statistics.ia_vertices,
1281 result->pipeline_statistics.ia_primitives,
1282 result->pipeline_statistics.vs_invocations,
1283 result->pipeline_statistics.hs_invocations,
1284 result->pipeline_statistics.ds_invocations,
1285 result->pipeline_statistics.gs_invocations,
1286 result->pipeline_statistics.gs_primitives,
1287 result->pipeline_statistics.c_invocations,
1288 result->pipeline_statistics.c_primitives,
1289 result->pipeline_statistics.ps_invocations,
1290 result->pipeline_statistics.cs_invocations);
1291 #endif
1292 break;
1293 default:
1294 assert(0);
1295 }
1296 }
1297
r600_get_query_result(struct pipe_context * ctx,struct pipe_query * query,bool wait,union pipe_query_result * result)1298 static bool r600_get_query_result(struct pipe_context *ctx,
1299 struct pipe_query *query, bool wait,
1300 union pipe_query_result *result)
1301 {
1302 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1303 struct r600_query *rquery = (struct r600_query *)query;
1304
1305 return rquery->ops->get_result(rctx, rquery, wait, result);
1306 }
1307
r600_get_query_result_resource(struct pipe_context * ctx,struct pipe_query * query,bool wait,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1308 static void r600_get_query_result_resource(struct pipe_context *ctx,
1309 struct pipe_query *query,
1310 bool wait,
1311 enum pipe_query_value_type result_type,
1312 int index,
1313 struct pipe_resource *resource,
1314 unsigned offset)
1315 {
1316 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1317 struct r600_query *rquery = (struct r600_query *)query;
1318
1319 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1320 resource, offset);
1321 }
1322
r600_query_hw_clear_result(struct r600_query_hw * query,union pipe_query_result * result)1323 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1324 union pipe_query_result *result)
1325 {
1326 util_query_clear_result(result, query->b.type);
1327 }
1328
r600_query_hw_get_result(struct r600_common_context * rctx,struct r600_query * rquery,bool wait,union pipe_query_result * result)1329 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1330 struct r600_query *rquery,
1331 bool wait, union pipe_query_result *result)
1332 {
1333 struct r600_common_screen *rscreen = rctx->screen;
1334 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1335 struct r600_query_buffer *qbuf;
1336
1337 query->ops->clear_result(query, result);
1338
1339 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1340 unsigned usage = PIPE_MAP_READ |
1341 (wait ? 0 : PIPE_MAP_DONTBLOCK);
1342 unsigned results_base = 0;
1343 void *map;
1344
1345 if (rquery->b.flushed)
1346 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1347 else
1348 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1349
1350 if (!map)
1351 return false;
1352
1353 while (results_base != qbuf->results_end) {
1354 query->ops->add_result(rscreen, query, map + results_base,
1355 result);
1356 results_base += query->result_size;
1357 }
1358 }
1359
1360 /* Convert the time to expected units. */
1361 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1362 rquery->type == PIPE_QUERY_TIMESTAMP) {
1363 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1364 }
1365 return true;
1366 }
1367
1368 /* Create the compute shader that is used to collect the results.
1369 *
1370 * One compute grid with a single thread is launched for every query result
1371 * buffer. The thread (optionally) reads a previous summary buffer, then
1372 * accumulates data from the query result buffer, and writes the result either
1373 * to a summary buffer to be consumed by the next grid invocation or to the
1374 * user-supplied buffer.
1375 *
1376 * Data layout:
1377 *
1378 * CONST
1379 * 0.x = end_offset
1380 * 0.y = result_stride
1381 * 0.z = result_count
1382 * 0.w = bit field:
1383 * 1: read previously accumulated values
1384 * 2: write accumulated values for chaining
1385 * 4: write result available
1386 * 8: convert result to boolean (0/1)
1387 * 16: only read one dword and use that as result
1388 * 32: apply timestamp conversion
1389 * 64: store full 64 bits result
1390 * 128: store signed 32 bits result
1391 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1392 * 1.x = fence_offset
1393 * 1.y = pair_stride
1394 * 1.z = pair_count
1395 * 1.w = result_offset
1396 * 2.x = buffer0 offset
1397 *
1398 * BUFFER[0] = query result buffer
1399 * BUFFER[1] = previous summary buffer
1400 * BUFFER[2] = next summary buffer or user-supplied buffer
1401 */
r600_create_query_result_shader(struct r600_common_context * rctx)1402 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1403 {
1404 /* TEMP[0].xy = accumulated result so far
1405 * TEMP[0].z = result not available
1406 *
1407 * TEMP[1].x = current result index
1408 * TEMP[1].y = current pair index
1409 */
1410 static const char text_tmpl[] =
1411 "COMP\n"
1412 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1413 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1414 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1415 "DCL BUFFER[0]\n"
1416 "DCL BUFFER[1]\n"
1417 "DCL BUFFER[2]\n"
1418 "DCL CONST[0][0..2]\n"
1419 "DCL TEMP[0..5]\n"
1420 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1421 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1422 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1423 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1424 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1425
1426 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1427 "UIF TEMP[5]\n"
1428 /* Check result availability. */
1429 "UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n"
1430 "LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n"
1431 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1432 "MOV TEMP[1], TEMP[0].zzzz\n"
1433 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1434
1435 /* Load result if available. */
1436 "UIF TEMP[1]\n"
1437 "UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n"
1438 "LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n"
1439 "ENDIF\n"
1440 "ELSE\n"
1441 /* Load previously accumulated result if requested. */
1442 "MOV TEMP[0], IMM[0].xxxx\n"
1443 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1444 "UIF TEMP[4]\n"
1445 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1446 "ENDIF\n"
1447
1448 "MOV TEMP[1].x, IMM[0].xxxx\n"
1449 "BGNLOOP\n"
1450 /* Break if accumulated result so far is not available. */
1451 "UIF TEMP[0].zzzz\n"
1452 "BRK\n"
1453 "ENDIF\n"
1454
1455 /* Break if result_index >= result_count. */
1456 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1457 "UIF TEMP[5]\n"
1458 "BRK\n"
1459 "ENDIF\n"
1460
1461 /* Load fence and check result availability */
1462 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1463 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1464 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1465 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1466 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1467 "UIF TEMP[0].zzzz\n"
1468 "BRK\n"
1469 "ENDIF\n"
1470
1471 "MOV TEMP[1].y, IMM[0].xxxx\n"
1472 "BGNLOOP\n"
1473 /* Load start and end. */
1474 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1475 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1476 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1477 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1478
1479 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1480 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1481
1482 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1483
1484 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1485 "UIF TEMP[5].zzzz\n"
1486 /* Load second start/end half-pair and
1487 * take the difference
1488 */
1489 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1490 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1491 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1492
1493 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1494 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1495 "ENDIF\n"
1496
1497 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1498
1499 /* Increment pair index */
1500 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1501 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1502 "UIF TEMP[5]\n"
1503 "BRK\n"
1504 "ENDIF\n"
1505 "ENDLOOP\n"
1506
1507 /* Increment result index */
1508 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1509 "ENDLOOP\n"
1510 "ENDIF\n"
1511
1512 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1513 "UIF TEMP[4]\n"
1514 /* Store accumulated data for chaining. */
1515 "STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n"
1516 "ELSE\n"
1517 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1518 "UIF TEMP[4]\n"
1519 /* Store result availability. */
1520 "NOT TEMP[0].z, TEMP[0]\n"
1521 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1522 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n"
1523
1524 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1525 "UIF TEMP[4]\n"
1526 "STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n"
1527 "ENDIF\n"
1528 "ELSE\n"
1529 /* Store result if it is available. */
1530 "NOT TEMP[4], TEMP[0].zzzz\n"
1531 "UIF TEMP[4]\n"
1532 /* Apply timestamp conversion */
1533 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1534 "UIF TEMP[4]\n"
1535 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1536 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1537 "ENDIF\n"
1538
1539 /* Convert to boolean */
1540 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1541 "UIF TEMP[4]\n"
1542 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1543 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1544 "MOV TEMP[0].y, IMM[0].xxxx\n"
1545 "ENDIF\n"
1546
1547 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1548 "UIF TEMP[4]\n"
1549 "STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n"
1550 "ELSE\n"
1551 /* Clamping */
1552 "UIF TEMP[0].yyyy\n"
1553 "MOV TEMP[0].x, IMM[0].wwww\n"
1554 "ENDIF\n"
1555
1556 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1557 "UIF TEMP[4]\n"
1558 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1559 "ENDIF\n"
1560
1561 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n"
1562 "ENDIF\n"
1563 "ENDIF\n"
1564 "ENDIF\n"
1565 "ENDIF\n"
1566
1567 "END\n";
1568
1569 char text[sizeof(text_tmpl) + 32];
1570 struct tgsi_token tokens[1024];
1571 struct pipe_compute_state state = {};
1572
1573 /* Hard code the frequency into the shader so that the backend can
1574 * use the full range of optimizations for divide-by-constant.
1575 */
1576 snprintf(text, sizeof(text), text_tmpl,
1577 rctx->screen->info.clock_crystal_freq);
1578
1579 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1580 assert(false);
1581 return;
1582 }
1583
1584 state.ir_type = PIPE_SHADER_IR_TGSI;
1585 state.prog = tokens;
1586
1587 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1588 }
1589
r600_restore_qbo_state(struct r600_common_context * rctx,struct r600_qbo_state * st)1590 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1591 struct r600_qbo_state *st)
1592 {
1593 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1594
1595 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1596 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1597
1598 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
1599 for (unsigned i = 0; i < 3; ++i)
1600 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1601 }
1602
r600_query_hw_get_result_resource(struct r600_common_context * rctx,struct r600_query * rquery,bool wait,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1603 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1604 struct r600_query *rquery,
1605 bool wait,
1606 enum pipe_query_value_type result_type,
1607 int index,
1608 struct pipe_resource *resource,
1609 unsigned offset)
1610 {
1611 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1612 struct r600_query_buffer *qbuf;
1613 struct r600_query_buffer *qbuf_prev;
1614 struct pipe_resource *tmp_buffer = NULL;
1615 unsigned tmp_buffer_offset = 0;
1616 struct r600_qbo_state saved_state = {};
1617 struct pipe_grid_info grid = {};
1618 struct pipe_constant_buffer constant_buffer = {};
1619 struct pipe_shader_buffer ssbo[3];
1620 struct r600_hw_query_params params;
1621 struct {
1622 uint32_t end_offset;
1623 uint32_t result_stride;
1624 uint32_t result_count;
1625 uint32_t config;
1626 uint32_t fence_offset;
1627 uint32_t pair_stride;
1628 uint32_t pair_count;
1629 uint32_t buffer_offset;
1630 uint32_t buffer0_offset;
1631 } consts;
1632
1633 if (!rctx->query_result_shader) {
1634 r600_create_query_result_shader(rctx);
1635 if (!rctx->query_result_shader)
1636 return;
1637 }
1638
1639 if (query->buffer.previous) {
1640 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 256,
1641 &tmp_buffer_offset, &tmp_buffer);
1642 if (!tmp_buffer)
1643 return;
1644 }
1645
1646 rctx->save_qbo_state(&rctx->b, &saved_state);
1647
1648 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, ¶ms);
1649 consts.end_offset = params.end_offset - params.start_offset;
1650 consts.fence_offset = params.fence_offset - params.start_offset;
1651 consts.result_stride = query->result_size;
1652 consts.pair_stride = params.pair_stride;
1653 consts.pair_count = params.pair_count;
1654
1655 constant_buffer.buffer_size = sizeof(consts);
1656 constant_buffer.user_buffer = &consts;
1657
1658 ssbo[1].buffer = tmp_buffer;
1659 ssbo[1].buffer_offset = tmp_buffer_offset;
1660 ssbo[1].buffer_size = 16;
1661
1662 ssbo[2] = ssbo[1];
1663
1664 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1665
1666 grid.block[0] = 1;
1667 grid.block[1] = 1;
1668 grid.block[2] = 1;
1669 grid.grid[0] = 1;
1670 grid.grid[1] = 1;
1671 grid.grid[2] = 1;
1672
1673 consts.config = 0;
1674 if (index < 0)
1675 consts.config |= 4;
1676 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1677 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1678 consts.config |= 8;
1679 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1680 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1681 consts.config |= 8 | 256;
1682 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1683 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1684 consts.config |= 32;
1685
1686 switch (result_type) {
1687 case PIPE_QUERY_TYPE_U64:
1688 case PIPE_QUERY_TYPE_I64:
1689 consts.config |= 64;
1690 break;
1691 case PIPE_QUERY_TYPE_I32:
1692 consts.config |= 128;
1693 break;
1694 case PIPE_QUERY_TYPE_U32:
1695 break;
1696 }
1697
1698 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1699
1700 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1701 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1702 qbuf_prev = qbuf->previous;
1703 consts.result_count = qbuf->results_end / query->result_size;
1704 consts.config &= ~3;
1705 if (qbuf != &query->buffer)
1706 consts.config |= 1;
1707 if (qbuf->previous)
1708 consts.config |= 2;
1709 } else {
1710 /* Only read the last timestamp. */
1711 qbuf_prev = NULL;
1712 consts.result_count = 0;
1713 consts.config |= 16;
1714 params.start_offset += qbuf->results_end - query->result_size;
1715 }
1716
1717 ssbo[0].buffer = &qbuf->buf->b.b;
1718 ssbo[0].buffer_offset = params.start_offset & ~0xff;
1719 ssbo[0].buffer_size = qbuf->results_end - ssbo[0].buffer_offset;
1720 consts.buffer0_offset = (params.start_offset & 0xff);
1721 if (!qbuf->previous) {
1722
1723 ssbo[2].buffer = resource;
1724 ssbo[2].buffer_offset = offset & ~0xff;
1725 ssbo[2].buffer_size = offset + 8;
1726 consts.buffer_offset = (offset & 0xff);
1727 } else
1728 consts.buffer_offset = 0;
1729
1730 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1731
1732 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0);
1733
1734 if (wait && qbuf == &query->buffer) {
1735 uint64_t va;
1736
1737 /* Wait for result availability. Wait only for readiness
1738 * of the last entry, since the fence writes should be
1739 * serialized in the CP.
1740 */
1741 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1742 va += params.fence_offset;
1743
1744 r600_gfx_wait_fence(rctx, qbuf->buf, va, 0x80000000, 0x80000000);
1745 }
1746
1747 rctx->b.launch_grid(&rctx->b, &grid);
1748 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1749 }
1750
1751 r600_restore_qbo_state(rctx, &saved_state);
1752 pipe_resource_reference(&tmp_buffer, NULL);
1753 }
1754
r600_render_condition(struct pipe_context * ctx,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)1755 static void r600_render_condition(struct pipe_context *ctx,
1756 struct pipe_query *query,
1757 bool condition,
1758 enum pipe_render_cond_flag mode)
1759 {
1760 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1761 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1762 struct r600_query_buffer *qbuf;
1763 struct r600_atom *atom = &rctx->render_cond_atom;
1764
1765 /* Compute the size of SET_PREDICATION packets. */
1766 atom->num_dw = 0;
1767 if (query) {
1768 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1769 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1770
1771 if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1772 atom->num_dw *= R600_MAX_STREAMS;
1773 }
1774
1775 rctx->render_cond = query;
1776 rctx->render_cond_invert = condition;
1777 rctx->render_cond_mode = mode;
1778
1779 rctx->set_atom_dirty(rctx, atom, query != NULL);
1780 }
1781
r600_suspend_queries(struct r600_common_context * ctx)1782 void r600_suspend_queries(struct r600_common_context *ctx)
1783 {
1784 struct r600_query_hw *query;
1785
1786 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1787 r600_query_hw_emit_stop(ctx, query);
1788 }
1789 assert(ctx->num_cs_dw_queries_suspend == 0);
1790 }
1791
r600_queries_num_cs_dw_for_resuming(struct r600_common_context * ctx,struct list_head * query_list)1792 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1793 struct list_head *query_list)
1794 {
1795 struct r600_query_hw *query;
1796 unsigned num_dw = 0;
1797
1798 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1799 /* begin + end */
1800 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1801
1802 /* Workaround for the fact that
1803 * num_cs_dw_nontimer_queries_suspend is incremented for every
1804 * resumed query, which raises the bar in need_cs_space for
1805 * queries about to be resumed.
1806 */
1807 num_dw += query->num_cs_dw_end;
1808 }
1809 /* primitives generated query */
1810 num_dw += ctx->streamout.enable_atom.num_dw;
1811 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1812 num_dw += 13;
1813
1814 return num_dw;
1815 }
1816
r600_resume_queries(struct r600_common_context * ctx)1817 void r600_resume_queries(struct r600_common_context *ctx)
1818 {
1819 struct r600_query_hw *query;
1820 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1821
1822 assert(ctx->num_cs_dw_queries_suspend == 0);
1823
1824 /* Check CS space here. Resuming must not be interrupted by flushes. */
1825 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1826
1827 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1828 r600_query_hw_emit_start(ctx, query);
1829 }
1830 }
1831
1832 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
r600_query_fix_enabled_rb_mask(struct r600_common_screen * rscreen)1833 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1834 {
1835 struct r600_common_context *ctx =
1836 (struct r600_common_context*)rscreen->aux_context;
1837 struct radeon_cmdbuf *cs = ctx->gfx.cs;
1838 struct r600_resource *buffer;
1839 uint32_t *results;
1840 unsigned i, mask = 0;
1841 unsigned max_rbs;
1842
1843 if (ctx->family == CHIP_JUNIPER) {
1844 /*
1845 * Fix for predication lockups - the chip can only ever have
1846 * 4 RBs, however it looks like the predication logic assumes
1847 * there's 8, trying to read results from query buffers never
1848 * written to. By increasing this number we'll write the
1849 * status bit for these as per the normal disabled rb logic.
1850 */
1851 ctx->screen->info.num_render_backends = 8;
1852 }
1853 max_rbs = ctx->screen->info.num_render_backends;
1854
1855 assert(rscreen->chip_class <= CAYMAN);
1856
1857 /*
1858 * if backend_map query is supported by the kernel.
1859 * Note the kernel drm driver for a long time never filled in the
1860 * associated data on eg/cm, only on r600/r700, hence ignore the valid
1861 * bit there if the map is zero.
1862 * (Albeit some chips with just one active rb can have a valid 0 map.)
1863 */
1864 if (rscreen->info.r600_gb_backend_map_valid &&
1865 (ctx->chip_class < EVERGREEN || rscreen->info.r600_gb_backend_map != 0)) {
1866 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1867 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1868 unsigned item_width, item_mask;
1869
1870 if (ctx->chip_class >= EVERGREEN) {
1871 item_width = 4;
1872 item_mask = 0x7;
1873 } else {
1874 item_width = 2;
1875 item_mask = 0x3;
1876 }
1877
1878 while (num_tile_pipes--) {
1879 i = backend_map & item_mask;
1880 mask |= (1<<i);
1881 backend_map >>= item_width;
1882 }
1883 if (mask != 0) {
1884 rscreen->info.enabled_rb_mask = mask;
1885 return;
1886 }
1887 }
1888
1889 /* otherwise backup path for older kernels */
1890
1891 /* create buffer for event data */
1892 buffer = (struct r600_resource*)
1893 pipe_buffer_create(ctx->b.screen, 0,
1894 PIPE_USAGE_STAGING, max_rbs * 16);
1895 if (!buffer)
1896 return;
1897
1898 /* initialize buffer with zeroes */
1899 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_WRITE);
1900 if (results) {
1901 memset(results, 0, max_rbs * 4 * 4);
1902
1903 /* emit EVENT_WRITE for ZPASS_DONE */
1904 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1905 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1906 radeon_emit(cs, buffer->gpu_address);
1907 radeon_emit(cs, buffer->gpu_address >> 32);
1908
1909 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1910 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1911
1912 /* analyze results */
1913 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_READ);
1914 if (results) {
1915 for(i = 0; i < max_rbs; i++) {
1916 /* at least highest bit will be set if backend is used */
1917 if (results[i*4 + 1])
1918 mask |= (1<<i);
1919 }
1920 }
1921 }
1922
1923 r600_resource_reference(&buffer, NULL);
1924
1925 if (mask) {
1926 if (rscreen->debug_flags & DBG_INFO &&
1927 mask != rscreen->info.enabled_rb_mask) {
1928 printf("enabled_rb_mask (fixed) = 0x%x\n", mask);
1929 }
1930 rscreen->info.enabled_rb_mask = mask;
1931 }
1932 }
1933
1934 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1935 { \
1936 .name = name_, \
1937 .query_type = R600_QUERY_##query_type_, \
1938 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1939 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1940 .group_id = group_id_ \
1941 }
1942
1943 #define X(name_, query_type_, type_, result_type_) \
1944 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1945
1946 #define XG(group_, name_, query_type_, type_, result_type_) \
1947 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1948
1949 static const struct pipe_driver_query_info r600_driver_query_list[] = {
1950 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1951 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1952 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1953 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1954 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
1955 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1956 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1957 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1958 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1959 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1960 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1961 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1962 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1963 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1964 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1965 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1966 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1967 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1968 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1969 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1970 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1971 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1972 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1973 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1974 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1975 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1976 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1977 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1978 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1979 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1980 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1981 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1982 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1983 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1984 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1985 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1986 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1987 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1988
1989 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1990 * which use it as a fallback path to detect the GPU type.
1991 *
1992 * Note: The names of these queries are significant for GPUPerfStudio
1993 * (and possibly their order as well). */
1994 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1995 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1996 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1997 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1998 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1999
2000 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
2001 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
2002 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
2003
2004 /* The following queries must be at the end of the list because their
2005 * availability is adjusted dynamically based on the DRM version. */
2006 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
2007 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
2008 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
2009 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
2010 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
2011 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
2012 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
2013 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
2014 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
2015 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
2016 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
2017 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
2018 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
2019 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
2020 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
2021 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
2022 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
2023 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
2024 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
2025 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
2026 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
2027 };
2028
2029 #undef X
2030 #undef XG
2031 #undef XFULL
2032
r600_get_num_queries(struct r600_common_screen * rscreen)2033 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2034 {
2035 if (rscreen->info.drm_minor >= 42)
2036 return ARRAY_SIZE(r600_driver_query_list);
2037 else
2038 return ARRAY_SIZE(r600_driver_query_list) - 25;
2039 }
2040
r600_get_driver_query_info(struct pipe_screen * screen,unsigned index,struct pipe_driver_query_info * info)2041 static int r600_get_driver_query_info(struct pipe_screen *screen,
2042 unsigned index,
2043 struct pipe_driver_query_info *info)
2044 {
2045 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2046 unsigned num_queries = r600_get_num_queries(rscreen);
2047
2048 if (!info) {
2049 unsigned num_perfcounters =
2050 r600_get_perfcounter_info(rscreen, 0, NULL);
2051
2052 return num_queries + num_perfcounters;
2053 }
2054
2055 if (index >= num_queries)
2056 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2057
2058 *info = r600_driver_query_list[index];
2059
2060 switch (info->query_type) {
2061 case R600_QUERY_REQUESTED_VRAM:
2062 case R600_QUERY_VRAM_USAGE:
2063 case R600_QUERY_MAPPED_VRAM:
2064 info->max_value.u64 = rscreen->info.vram_size;
2065 break;
2066 case R600_QUERY_REQUESTED_GTT:
2067 case R600_QUERY_GTT_USAGE:
2068 case R600_QUERY_MAPPED_GTT:
2069 info->max_value.u64 = rscreen->info.gart_size;
2070 break;
2071 case R600_QUERY_GPU_TEMPERATURE:
2072 info->max_value.u64 = 125;
2073 break;
2074 case R600_QUERY_VRAM_VIS_USAGE:
2075 info->max_value.u64 = rscreen->info.vram_vis_size;
2076 break;
2077 }
2078
2079 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2080 info->group_id += rscreen->perfcounters->num_groups;
2081
2082 return 1;
2083 }
2084
2085 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2086 * performance counter groups, so be careful when changing this and related
2087 * functions.
2088 */
r600_get_driver_query_group_info(struct pipe_screen * screen,unsigned index,struct pipe_driver_query_group_info * info)2089 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2090 unsigned index,
2091 struct pipe_driver_query_group_info *info)
2092 {
2093 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2094 unsigned num_pc_groups = 0;
2095
2096 if (rscreen->perfcounters)
2097 num_pc_groups = rscreen->perfcounters->num_groups;
2098
2099 if (!info)
2100 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2101
2102 if (index < num_pc_groups)
2103 return r600_get_perfcounter_group_info(rscreen, index, info);
2104
2105 index -= num_pc_groups;
2106 if (index >= R600_NUM_SW_QUERY_GROUPS)
2107 return 0;
2108
2109 info->name = "GPIN";
2110 info->max_active_queries = 5;
2111 info->num_queries = 5;
2112 return 1;
2113 }
2114
r600_query_init(struct r600_common_context * rctx)2115 void r600_query_init(struct r600_common_context *rctx)
2116 {
2117 rctx->b.create_query = r600_create_query;
2118 rctx->b.create_batch_query = r600_create_batch_query;
2119 rctx->b.destroy_query = r600_destroy_query;
2120 rctx->b.begin_query = r600_begin_query;
2121 rctx->b.end_query = r600_end_query;
2122 rctx->b.get_query_result = r600_get_query_result;
2123 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2124 rctx->render_cond_atom.emit = r600_emit_query_predication;
2125
2126 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2127 rctx->b.render_condition = r600_render_condition;
2128
2129 list_inithead(&rctx->active_queries);
2130 }
2131
r600_init_screen_query_functions(struct r600_common_screen * rscreen)2132 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2133 {
2134 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2135 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2136 }
2137