1 /*
2 * Copyright 2022 Alyssa Rosenzweig
3 * Copyright 2019-2020 Collabora, Ltd.
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include <stdint.h>
8 #include "pipe/p_defines.h"
9 #include "util/bitset.h"
10 #include "util/macros.h"
11 #include "util/ralloc.h"
12 #include "util/u_dump.h"
13 #include "util/u_inlines.h"
14 #include "agx_bo.h"
15 #include "agx_device.h"
16 #include "agx_state.h"
17 #include "libagx.h"
18 #include "libagx_dgc.h"
19 #include "libagx_shaders.h"
20
21 static bool
is_occlusion(struct agx_query * query)22 is_occlusion(struct agx_query *query)
23 {
24 switch (query->type) {
25 case PIPE_QUERY_OCCLUSION_COUNTER:
26 case PIPE_QUERY_OCCLUSION_PREDICATE:
27 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
28 return true;
29 default:
30 return false;
31 }
32 }
33
34 static bool
is_timer(struct agx_query * query)35 is_timer(struct agx_query *query)
36 {
37 switch (query->type) {
38 case PIPE_QUERY_TIMESTAMP:
39 case PIPE_QUERY_TIME_ELAPSED:
40 return true;
41 default:
42 return false;
43 }
44 }
45
46 struct agx_oq_heap {
47 /* The GPU allocation itself */
48 struct agx_device *dev;
49 struct agx_bo *bo;
50
51 /* Bitset of query indices that are in use */
52 BITSET_DECLARE(available, AGX_MAX_OCCLUSION_QUERIES);
53 };
54
55 static void
agx_destroy_oq_heap(void * heap_)56 agx_destroy_oq_heap(void *heap_)
57 {
58 struct agx_oq_heap *heap = heap_;
59 agx_bo_unreference(heap->dev, heap->bo);
60 }
61
62 static struct agx_oq_heap *
agx_alloc_oq_heap(struct agx_context * ctx)63 agx_alloc_oq_heap(struct agx_context *ctx)
64 {
65 struct agx_oq_heap *heap = rzalloc(ctx, struct agx_oq_heap);
66 ralloc_set_destructor(heap, agx_destroy_oq_heap);
67
68 heap->dev = agx_device(ctx->base.screen);
69 heap->bo =
70 agx_bo_create(heap->dev, AGX_MAX_OCCLUSION_QUERIES * sizeof(uint64_t), 0,
71 AGX_BO_WRITEBACK, "Occlusion query heap");
72
73 /* At the start, everything is available */
74 BITSET_ONES(heap->available);
75
76 return heap;
77 }
78
79 static struct agx_oq_heap *
agx_get_oq_heap(struct agx_context * ctx)80 agx_get_oq_heap(struct agx_context *ctx)
81 {
82 if (!ctx->oq)
83 ctx->oq = agx_alloc_oq_heap(ctx);
84
85 return ctx->oq;
86 }
87
88 static struct agx_ptr
agx_alloc_oq(struct agx_context * ctx)89 agx_alloc_oq(struct agx_context *ctx)
90 {
91 struct agx_oq_heap *heap = agx_get_oq_heap(ctx);
92
93 /* Find first available */
94 int ffs = BITSET_FFS(heap->available);
95 if (!ffs)
96 return (struct agx_ptr){NULL, 0};
97
98 /* Allocate it */
99 unsigned index = ffs - 1;
100 BITSET_CLEAR(heap->available, index);
101
102 unsigned offset = index * sizeof(uint64_t);
103
104 return (struct agx_ptr){
105 (uint8_t *)agx_bo_map(heap->bo) + offset,
106 heap->bo->va->addr + offset,
107 };
108 }
109
110 static unsigned
agx_oq_index(struct agx_context * ctx,struct agx_query * q)111 agx_oq_index(struct agx_context *ctx, struct agx_query *q)
112 {
113 assert(is_occlusion(q));
114
115 return (q->ptr.gpu - ctx->oq->bo->va->addr) / sizeof(uint64_t);
116 }
117
118 static void
agx_free_oq(struct agx_context * ctx,struct agx_query * q)119 agx_free_oq(struct agx_context *ctx, struct agx_query *q)
120 {
121 struct agx_oq_heap *heap = agx_get_oq_heap(ctx);
122 unsigned index = agx_oq_index(ctx, q);
123
124 assert(index < AGX_MAX_OCCLUSION_QUERIES);
125 assert(!BITSET_TEST(heap->available, index));
126
127 BITSET_SET(heap->available, index);
128 }
129
130 uint64_t
agx_get_occlusion_heap(struct agx_batch * batch)131 agx_get_occlusion_heap(struct agx_batch *batch)
132 {
133 if (!batch->ctx->oq)
134 return 0;
135
136 struct agx_bo *bo = batch->ctx->oq->bo;
137
138 if (agx_batch_uses_bo(batch, bo))
139 return bo->va->addr;
140 else
141 return 0;
142 }
143
144 static struct pipe_query *
agx_create_query(struct pipe_context * ctx,unsigned query_type,unsigned index)145 agx_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
146 {
147 struct agx_query *query = calloc(1, sizeof(struct agx_query));
148
149 query->type = query_type;
150 query->index = index;
151
152 /* Set all writer generations to a sentinel that will always compare as
153 * false, since nothing writes to no queries.
154 */
155 for (unsigned i = 0; i < ARRAY_SIZE(query->writer_generation); ++i) {
156 query->writer_generation[i] = UINT64_MAX;
157 }
158
159 if (is_occlusion(query)) {
160 query->ptr = agx_alloc_oq(agx_context(ctx));
161 } else {
162 /* TODO: a BO for the query is wasteful, but we benefit from BO list
163 * tracking / reference counting to deal with lifetimes.
164 */
165 query->bo = agx_bo_create(agx_device(ctx->screen), sizeof(uint64_t) * 2,
166 0, AGX_BO_WRITEBACK, "Query");
167 query->ptr = (struct agx_ptr){
168 .gpu = query->bo->va->addr,
169 .cpu = agx_bo_map(query->bo),
170 };
171 }
172
173 if (!query->ptr.gpu) {
174 free(query);
175 return NULL;
176 }
177
178 return (struct pipe_query *)query;
179 }
180
181 static void
flush_query_writers(struct agx_context * ctx,struct agx_query * query,const char * reason)182 flush_query_writers(struct agx_context *ctx, struct agx_query *query,
183 const char *reason)
184 {
185 STATIC_ASSERT(ARRAY_SIZE(ctx->batches.generation) == AGX_MAX_BATCHES);
186 STATIC_ASSERT(ARRAY_SIZE(ctx->batches.slots) == AGX_MAX_BATCHES);
187 STATIC_ASSERT(ARRAY_SIZE(query->writer_generation) == AGX_MAX_BATCHES);
188
189 for (unsigned i = 0; i < AGX_MAX_BATCHES; ++i) {
190 if (query->writer_generation[i] == ctx->batches.generation[i])
191 agx_flush_batch_for_reason(ctx, &ctx->batches.slots[i], reason);
192 }
193 }
194
195 static void
sync_query_writers(struct agx_context * ctx,struct agx_query * query,const char * reason)196 sync_query_writers(struct agx_context *ctx, struct agx_query *query,
197 const char *reason)
198 {
199 for (unsigned i = 0; i < AGX_MAX_BATCHES; ++i) {
200 if (query->writer_generation[i] == ctx->batches.generation[i])
201 agx_sync_batch_for_reason(ctx, &ctx->batches.slots[i], reason);
202 }
203 }
204
205 static bool
is_query_busy(struct agx_context * ctx,struct agx_query * query)206 is_query_busy(struct agx_context *ctx, struct agx_query *query)
207 {
208 for (unsigned i = 0; i < AGX_MAX_BATCHES; ++i) {
209 if (query->writer_generation[i] == ctx->batches.generation[i])
210 return true;
211 }
212
213 return false;
214 }
215
216 static void
agx_destroy_query(struct pipe_context * pctx,struct pipe_query * pquery)217 agx_destroy_query(struct pipe_context *pctx, struct pipe_query *pquery)
218 {
219 struct agx_context *ctx = agx_context(pctx);
220 struct agx_query *query = (struct agx_query *)pquery;
221 struct agx_device *dev = agx_device(pctx->screen);
222
223 /* We don't reference count the occlusion query allocations, so we need to
224 * sync writers when destroying so we can freely write from the CPU after
225 * it's destroyed, since the driver will assume an available query is idle.
226 *
227 * For other queries, the BO itself is reference counted after the pipe_query
228 * is destroyed so we don't need to flush.
229 */
230 if (is_occlusion(query)) {
231 sync_query_writers(ctx, query, "Occlusion query destroy");
232 agx_free_oq(ctx, query);
233 } else {
234 agx_bo_unreference(dev, query->bo);
235 }
236
237 free(pquery);
238 }
239
240 static bool
agx_begin_query(struct pipe_context * pctx,struct pipe_query * pquery)241 agx_begin_query(struct pipe_context *pctx, struct pipe_query *pquery)
242 {
243 struct agx_context *ctx = agx_context(pctx);
244 struct agx_query *query = (struct agx_query *)pquery;
245
246 ctx->dirty |= AGX_DIRTY_QUERY;
247
248 switch (query->type) {
249 case PIPE_QUERY_OCCLUSION_COUNTER:
250 case PIPE_QUERY_OCCLUSION_PREDICATE:
251 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
252 ctx->occlusion_query = query;
253 break;
254
255 case PIPE_QUERY_PRIMITIVES_GENERATED:
256 ctx->prims_generated[query->index] = query;
257 break;
258
259 case PIPE_QUERY_PRIMITIVES_EMITTED:
260 ctx->tf_prims_generated[query->index] = query;
261 break;
262
263 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
264 ctx->tf_overflow[query->index] = query;
265 break;
266
267 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
268 ctx->tf_any_overflow = query;
269 break;
270
271 case PIPE_QUERY_TIME_ELAPSED:
272 ctx->time_elapsed = query;
273 break;
274
275 case PIPE_QUERY_TIMESTAMP:
276 /* No-op */
277 break;
278
279 case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE:
280 assert(query->index < ARRAY_SIZE(ctx->pipeline_statistics));
281 ctx->pipeline_statistics[query->index] = query;
282 break;
283
284 default:
285 return false;
286 }
287
288 /* begin_query zeroes, sync so we can do that write from the CPU */
289 sync_query_writers(ctx, query, "Query overwritten");
290
291 uint64_t *ptr = query->ptr.cpu;
292 ptr[0] = 0;
293
294 if (query->type == PIPE_QUERY_TIME_ELAPSED) {
295 /* Timestamp begin in second record, the timestamp end in the first */
296 ptr[1] = UINT64_MAX;
297 }
298
299 return true;
300 }
301
302 static bool
agx_end_query(struct pipe_context * pctx,struct pipe_query * pquery)303 agx_end_query(struct pipe_context *pctx, struct pipe_query *pquery)
304 {
305 struct agx_context *ctx = agx_context(pctx);
306 struct agx_device *dev = agx_device(pctx->screen);
307 struct agx_query *query = (struct agx_query *)pquery;
308
309 ctx->dirty |= AGX_DIRTY_QUERY;
310
311 switch (query->type) {
312 case PIPE_QUERY_OCCLUSION_COUNTER:
313 case PIPE_QUERY_OCCLUSION_PREDICATE:
314 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
315 ctx->occlusion_query = NULL;
316 return true;
317 case PIPE_QUERY_PRIMITIVES_GENERATED:
318 ctx->prims_generated[query->index] = NULL;
319 return true;
320 case PIPE_QUERY_PRIMITIVES_EMITTED:
321 ctx->tf_prims_generated[query->index] = NULL;
322 return true;
323 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
324 ctx->tf_overflow[query->index] = NULL;
325 return true;
326 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
327 ctx->tf_any_overflow = NULL;
328 return true;
329 case PIPE_QUERY_TIME_ELAPSED:
330 ctx->time_elapsed = NULL;
331 return true;
332 case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE:
333 assert(query->index < ARRAY_SIZE(ctx->pipeline_statistics));
334 ctx->pipeline_statistics[query->index] = NULL;
335 return true;
336 case PIPE_QUERY_TIMESTAMP: {
337 /* Timestamp logically written now, set up batches to MAX their finish
338 * time in. If there are no batches, it's just the current time stamp.
339 */
340 agx_add_timestamp_end_query(ctx, query);
341
342 uint64_t *value = query->ptr.cpu;
343 *value = agx_get_gpu_timestamp(dev);
344
345 return true;
346 }
347 default:
348 return false;
349 }
350 }
351
352 enum query_copy_type {
353 QUERY_COPY_NORMAL,
354 QUERY_COPY_BOOL32,
355 QUERY_COPY_BOOL64,
356 QUERY_COPY_TIMESTAMP,
357 QUERY_COPY_TIME_ELAPSED,
358 };
359
360 static enum query_copy_type
classify_query_type(enum pipe_query_type type)361 classify_query_type(enum pipe_query_type type)
362 {
363 switch (type) {
364 case PIPE_QUERY_OCCLUSION_PREDICATE:
365 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
366 return QUERY_COPY_BOOL32;
367
368 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
369 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
370 return QUERY_COPY_BOOL64;
371
372 case PIPE_QUERY_TIMESTAMP:
373 return QUERY_COPY_TIMESTAMP;
374
375 case PIPE_QUERY_TIME_ELAPSED:
376 return QUERY_COPY_TIME_ELAPSED;
377
378 default:
379 return QUERY_COPY_NORMAL;
380 }
381 }
382
383 static bool
agx_get_query_result(struct pipe_context * pctx,struct pipe_query * pquery,bool wait,union pipe_query_result * vresult)384 agx_get_query_result(struct pipe_context *pctx, struct pipe_query *pquery,
385 bool wait, union pipe_query_result *vresult)
386 {
387 struct agx_query *query = (struct agx_query *)pquery;
388 struct agx_context *ctx = agx_context(pctx);
389 struct agx_device *dev = agx_device(pctx->screen);
390
391 /* TODO: Honour `wait` */
392 sync_query_writers(ctx, query, "Reading query results");
393
394 uint64_t *ptr = query->ptr.cpu;
395 uint64_t value = *ptr;
396
397 switch (classify_query_type(query->type)) {
398 case QUERY_COPY_BOOL32:
399 vresult->b = value;
400 return true;
401
402 case QUERY_COPY_BOOL64:
403 vresult->b = value > 0;
404 return true;
405
406 case QUERY_COPY_NORMAL:
407 vresult->u64 = value;
408 return true;
409
410 case QUERY_COPY_TIMESTAMP:
411 vresult->u64 = agx_gpu_time_to_ns(dev, value);
412 return true;
413
414 case QUERY_COPY_TIME_ELAPSED:
415 /* end - begin */
416 vresult->u64 = agx_gpu_time_to_ns(dev, ptr[0] - ptr[1]);
417 return true;
418
419 default:
420 unreachable("Other queries not yet supported");
421 }
422 }
423
424 static unsigned
result_type_size(enum pipe_query_value_type result_type)425 result_type_size(enum pipe_query_value_type result_type)
426 {
427 return (result_type <= PIPE_QUERY_TYPE_U32) ? 4 : 8;
428 }
429
430 static void
agx_get_query_result_resource_cpu(struct agx_context * ctx,struct agx_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)431 agx_get_query_result_resource_cpu(struct agx_context *ctx,
432 struct agx_query *query,
433 enum pipe_query_flags flags,
434 enum pipe_query_value_type result_type,
435 int index, struct pipe_resource *resource,
436 unsigned offset)
437 {
438 union pipe_query_result result;
439 if (index < 0) {
440 /* availability */
441 result.u64 = !is_query_busy(ctx, query);
442 } else {
443 bool ready =
444 agx_get_query_result(&ctx->base, (void *)query, true, &result);
445
446 assert(ready);
447
448 switch (classify_query_type(query->type)) {
449 case QUERY_COPY_BOOL32:
450 case QUERY_COPY_BOOL64:
451 result.u64 = result.b;
452 break;
453 default:
454 break;
455 }
456 }
457
458 /* Clamp to type, arb_query_buffer_object-qbo tests */
459 if (result_type == PIPE_QUERY_TYPE_U32) {
460 result.u32 = MIN2(result.u64, u_uintN_max(32));
461 } else if (result_type == PIPE_QUERY_TYPE_I32) {
462 int64_t x = result.u64;
463 x = MAX2(MIN2(x, u_intN_max(32)), u_intN_min(32));
464 result.u32 = x;
465 }
466
467 pipe_buffer_write(&ctx->base, resource, offset,
468 result_type_size(result_type), &result.u64);
469 }
470
471 static bool
agx_get_query_result_resource_gpu(struct agx_context * ctx,struct agx_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * prsrc,unsigned offset)472 agx_get_query_result_resource_gpu(struct agx_context *ctx,
473 struct agx_query *query,
474 enum pipe_query_flags flags,
475 enum pipe_query_value_type result_type,
476 int index, struct pipe_resource *prsrc,
477 unsigned offset)
478 {
479 /* Handle availability queries on CPU */
480 if (index < 0)
481 return false;
482
483 /* TODO: timer queries on GPU */
484 if (query->type == PIPE_QUERY_TIMESTAMP ||
485 query->type == PIPE_QUERY_TIME_ELAPSED)
486 return false;
487
488 flush_query_writers(ctx, query, util_str_query_type(query->type, true));
489
490 struct agx_resource *rsrc = agx_resource(prsrc);
491 enum query_copy_type copy_type = classify_query_type(query->type);
492
493 struct agx_batch *batch = agx_get_compute_batch(ctx);
494 agx_batch_init_state(batch);
495 agx_dirty_all(ctx);
496
497 /* Set params */
498 agx_batch_writes_range(batch, rsrc, offset, result_type_size(result_type));
499
500 unsigned bool_size = copy_type == QUERY_COPY_BOOL64 ? 8
501 : copy_type == QUERY_COPY_BOOL32 ? 4
502 : 0;
503
504 libagx_copy_query_gl(batch, agx_1d(1), AGX_BARRIER_ALL, query->ptr.gpu,
505 rsrc->bo->va->addr + offset, result_type, bool_size);
506 return true;
507 }
508
509 static void
agx_get_query_result_resource(struct pipe_context * pipe,struct pipe_query * q,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)510 agx_get_query_result_resource(struct pipe_context *pipe, struct pipe_query *q,
511 enum pipe_query_flags flags,
512 enum pipe_query_value_type result_type, int index,
513 struct pipe_resource *resource, unsigned offset)
514 {
515 struct agx_query *query = (struct agx_query *)q;
516 struct agx_context *ctx = agx_context(pipe);
517
518 /* Try to copy on the GPU */
519 if (!agx_get_query_result_resource_gpu(ctx, query, flags, result_type, index,
520 resource, offset)) {
521
522 /* Else, fallback to CPU */
523 agx_get_query_result_resource_cpu(ctx, query, flags, result_type, index,
524 resource, offset);
525 }
526 }
527
528 static void
agx_set_active_query_state(struct pipe_context * pipe,bool enable)529 agx_set_active_query_state(struct pipe_context *pipe, bool enable)
530 {
531 struct agx_context *ctx = agx_context(pipe);
532
533 ctx->active_queries = enable;
534 ctx->dirty |= AGX_DIRTY_QUERY;
535 }
536
537 static void
agx_add_query_to_batch(struct agx_batch * batch,struct agx_query * query)538 agx_add_query_to_batch(struct agx_batch *batch, struct agx_query *query)
539 {
540 unsigned idx = agx_batch_idx(batch);
541 struct agx_bo *bo = is_occlusion(query) ? batch->ctx->oq->bo : query->bo;
542
543 agx_batch_add_bo(batch, bo);
544 query->writer_generation[idx] = batch->ctx->batches.generation[idx];
545 }
546
547 void
agx_batch_add_timestamp_query(struct agx_batch * batch,struct agx_query * q)548 agx_batch_add_timestamp_query(struct agx_batch *batch, struct agx_query *q)
549 {
550 if (q) {
551 agx_add_query_to_batch(batch, q);
552 util_dynarray_append(&batch->timestamps, struct agx_ptr, q->ptr);
553 }
554 }
555
556 uint16_t
agx_get_oq_index(struct agx_batch * batch,struct agx_query * query)557 agx_get_oq_index(struct agx_batch *batch, struct agx_query *query)
558 {
559 agx_add_query_to_batch(batch, query);
560 return agx_oq_index(batch->ctx, query);
561 }
562
563 uint64_t
agx_get_query_address(struct agx_batch * batch,struct agx_query * query)564 agx_get_query_address(struct agx_batch *batch, struct agx_query *query)
565 {
566 if (query) {
567 agx_add_query_to_batch(batch, query);
568 return query->ptr.gpu;
569 } else {
570 return 0;
571 }
572 }
573
574 void
agx_finish_batch_queries(struct agx_batch * batch,uint64_t begin_ts,uint64_t end_ts)575 agx_finish_batch_queries(struct agx_batch *batch, uint64_t begin_ts,
576 uint64_t end_ts)
577 {
578 /* Remove the batch as write from all queries by incrementing the generation
579 * of the batch.
580 */
581 batch->ctx->batches.generation[agx_batch_idx(batch)]++;
582
583 /* Write out timestamps */
584 util_dynarray_foreach(&batch->timestamps, struct agx_ptr, it) {
585 uint64_t *ptr = it->cpu;
586
587 ptr[0] = MAX2(ptr[0], end_ts);
588 ptr[1] = MIN2(ptr[1], begin_ts);
589 }
590 }
591
592 void
agx_query_increment_cpu(struct agx_context * ctx,struct agx_query * query,uint64_t increment)593 agx_query_increment_cpu(struct agx_context *ctx, struct agx_query *query,
594 uint64_t increment)
595 {
596 if (!query)
597 return;
598
599 sync_query_writers(ctx, query, "CPU query increment");
600
601 uint64_t *value = query->ptr.cpu;
602 *value += increment;
603 }
604
605 static void
agx_render_condition(struct pipe_context * pipe,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)606 agx_render_condition(struct pipe_context *pipe, struct pipe_query *query,
607 bool condition, enum pipe_render_cond_flag mode)
608 {
609 struct agx_context *ctx = agx_context(pipe);
610
611 ctx->cond_query = query;
612 ctx->cond_cond = condition;
613 ctx->cond_mode = mode;
614 }
615
616 bool
agx_render_condition_check_inner(struct agx_context * ctx)617 agx_render_condition_check_inner(struct agx_context *ctx)
618 {
619 assert(ctx->cond_query != NULL && "precondition");
620
621 perf_debug_ctx(ctx, "Implementing conditional rendering on the CPU");
622
623 union pipe_query_result res = {0};
624 bool wait = ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
625 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
626
627 struct pipe_query *pq = (struct pipe_query *)ctx->cond_query;
628
629 if (agx_get_query_result(&ctx->base, pq, wait, &res))
630 return res.u64 != ctx->cond_cond;
631
632 return true;
633 }
634
635 void
agx_init_query_functions(struct pipe_context * pctx)636 agx_init_query_functions(struct pipe_context *pctx)
637 {
638 pctx->create_query = agx_create_query;
639 pctx->destroy_query = agx_destroy_query;
640 pctx->begin_query = agx_begin_query;
641 pctx->end_query = agx_end_query;
642 pctx->get_query_result = agx_get_query_result;
643 pctx->get_query_result_resource = agx_get_query_result_resource;
644 pctx->set_active_query_state = agx_set_active_query_state;
645 pctx->render_condition = agx_render_condition;
646
647 /* By default queries are active */
648 agx_context(pctx)->active_queries = true;
649 }
650