1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * Copyright 2010 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /* Authors:
30 * Keith Whitwell, Qicheng Christopher Li, Brian Paul
31 */
32
33 #include "draw/draw_context.h"
34 #include "pipe/p_defines.h"
35 #include "util/u_memory.h"
36 #include "util/os_time.h"
37 #include "lp_context.h"
38 #include "lp_flush.h"
39 #include "lp_fence.h"
40 #include "lp_query.h"
41 #include "lp_screen.h"
42 #include "lp_state.h"
43 #include "lp_rast.h"
44
45
46 static struct llvmpipe_query *
llvmpipe_query(struct pipe_query * p)47 llvmpipe_query(struct pipe_query *p)
48 {
49 return (struct llvmpipe_query *) p;
50 }
51
52
53 static struct pipe_query *
llvmpipe_create_query(struct pipe_context * pipe,unsigned type,unsigned index)54 llvmpipe_create_query(struct pipe_context *pipe,
55 unsigned type,
56 unsigned index)
57 {
58 assert(type < PIPE_QUERY_TYPES);
59
60 struct llvmpipe_query *pq = CALLOC_STRUCT(llvmpipe_query);
61 if (pq) {
62 pq->type = type;
63 pq->index = index;
64 }
65
66 return (struct pipe_query *) pq;
67 }
68
69
70 static void
llvmpipe_destroy_query(struct pipe_context * pipe,struct pipe_query * q)71 llvmpipe_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
72 {
73 struct llvmpipe_query *pq = llvmpipe_query(q);
74
75 /* Ideally we would refcount queries & not get destroyed until the
76 * last scene had finished with us.
77 */
78 if (pq->fence) {
79 if (!lp_fence_issued(pq->fence))
80 llvmpipe_flush(pipe, NULL, __func__);
81
82 if (!lp_fence_signalled(pq->fence))
83 lp_fence_wait(pq->fence);
84
85 lp_fence_reference(&pq->fence, NULL);
86 }
87
88 FREE(pq);
89 }
90
91
92 static bool
llvmpipe_get_query_result(struct pipe_context * pipe,struct pipe_query * q,bool wait,union pipe_query_result * result)93 llvmpipe_get_query_result(struct pipe_context *pipe,
94 struct pipe_query *q,
95 bool wait,
96 union pipe_query_result *result)
97 {
98 const struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
99 const unsigned num_threads = MAX2(1, screen->num_threads);
100 struct llvmpipe_query *pq = llvmpipe_query(q);
101
102 if (pq->fence) {
103 /* only have a fence if there was a scene */
104 if (!lp_fence_signalled(pq->fence)) {
105 if (!lp_fence_issued(pq->fence))
106 llvmpipe_flush(pipe, NULL, __func__);
107
108 if (!wait)
109 return false;
110
111 lp_fence_wait(pq->fence);
112 }
113 }
114
115 /* Always initialize the first 64-bit result word to zero since some
116 * callers don't consider whether the result is actually a 1-byte or 4-byte
117 * quantity.
118 */
119 result->u64 = 0;
120
121 /* Combine the per-thread results */
122 switch (pq->type) {
123 case PIPE_QUERY_OCCLUSION_COUNTER:
124 {
125 uint64_t sum = 0;
126 for (unsigned i = 0; i < num_threads; i++) {
127 sum += pq->end[i];
128 }
129 result->u64 = sum;
130 }
131 break;
132 case PIPE_QUERY_OCCLUSION_PREDICATE:
133 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
134 result->b = false;
135 for (unsigned i = 0; i < num_threads; i++) {
136 /* safer (still not guaranteed) when there's an overflow */
137 if (pq->end[i] > 0) {
138 result->b = true;
139 break;
140 }
141 }
142 break;
143 case PIPE_QUERY_TIMESTAMP:
144 {
145 uint64_t max_time = 0;
146 for (unsigned i = 0; i < num_threads; i++) {
147 max_time = MAX2(max_time, pq->end[i]);
148 }
149 result->u64 = max_time;
150 }
151 break;
152 case PIPE_QUERY_TIME_ELAPSED:
153 {
154 uint64_t start = UINT64_MAX, end = 0;
155 for (unsigned i = 0; i < num_threads; i++) {
156 if (pq->start[i]) {
157 start = MIN2(start, pq->start[i]);
158 }
159 if (pq->end[i]) {
160 end = MAX2(end, pq->end[i]);
161 }
162 }
163 result->u64 = end - start;
164 }
165 break;
166 case PIPE_QUERY_TIMESTAMP_DISJOINT:
167 /* os_get_time_nano return nanoseconds */
168 result->timestamp_disjoint.frequency = UINT64_C(1000000000);
169 result->timestamp_disjoint.disjoint = false;
170 break;
171 case PIPE_QUERY_GPU_FINISHED:
172 result->b = true;
173 break;
174 case PIPE_QUERY_PRIMITIVES_GENERATED:
175 result->u64 = pq->num_primitives_generated[0];
176 break;
177 case PIPE_QUERY_PRIMITIVES_EMITTED:
178 result->u64 = pq->num_primitives_written[0];
179 break;
180 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
181 result->b = false;
182 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
183 if (pq->num_primitives_generated[s] > pq->num_primitives_written[s]) {
184 result->b = true;
185 break;
186 }
187 }
188 break;
189 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
190 result->b = pq->num_primitives_generated[0] > pq->num_primitives_written[0];
191 break;
192 case PIPE_QUERY_SO_STATISTICS:
193 result->so_statistics.num_primitives_written = pq->num_primitives_written[0];
194 result->so_statistics.primitives_storage_needed = pq->num_primitives_generated[0];
195 break;
196 case PIPE_QUERY_PIPELINE_STATISTICS:
197 {
198 /* only ps_invocations are per-bin/thread */
199 uint64_t sum = 0;
200 for (unsigned i = 0; i < num_threads; i++) {
201 sum += pq->end[i];
202 }
203 /* The FS/PS operates on a block of pixels at a time. The counter is
204 * incremented per block so we multiply by pixels per block here.
205 * This will not be a pixel-exact result.
206 */
207 pq->stats.ps_invocations =
208 sum * LP_RASTER_BLOCK_SIZE * LP_RASTER_BLOCK_SIZE;
209 result->pipeline_statistics = pq->stats;
210 }
211 break;
212 default:
213 assert(0);
214 break;
215 }
216
217 return true;
218 }
219
220
221 static void
llvmpipe_get_query_result_resource(struct pipe_context * pipe,struct pipe_query * q,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)222 llvmpipe_get_query_result_resource(struct pipe_context *pipe,
223 struct pipe_query *q,
224 enum pipe_query_flags flags,
225 enum pipe_query_value_type result_type,
226 int index,
227 struct pipe_resource *resource,
228 unsigned offset)
229 {
230 const struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
231 const unsigned num_threads = MAX2(1, screen->num_threads);
232 const struct llvmpipe_query *pq = llvmpipe_query(q);
233 const struct llvmpipe_resource *lpr = llvmpipe_resource(resource);
234 uint64_t ready;
235
236 if (pq->fence) {
237 /* only have a fence if there was a scene */
238 if (!lp_fence_signalled(pq->fence)) {
239 if (!lp_fence_issued(pq->fence))
240 llvmpipe_flush(pipe, NULL, __func__);
241
242 if (flags & PIPE_QUERY_WAIT)
243 lp_fence_wait(pq->fence);
244 }
245 ready = lp_fence_signalled(pq->fence);
246 } else {
247 ready = 1;
248 }
249
250 uint64_t value = 0, value2 = 0;
251 unsigned num_values = 1;
252 if (index == -1) {
253 value = ready;
254 } else {
255 /* don't write a value if fence hasn't signalled and partial isn't set */
256 if (!ready && !(flags & PIPE_QUERY_PARTIAL))
257 return;
258
259 switch (pq->type) {
260 case PIPE_QUERY_OCCLUSION_COUNTER:
261 for (unsigned i = 0; i < num_threads; i++) {
262 value += pq->end[i];
263 }
264 break;
265 case PIPE_QUERY_OCCLUSION_PREDICATE:
266 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
267 for (unsigned i = 0; i < num_threads; i++) {
268 /* safer (still not guaranteed) when there's an overflow */
269 value = value || pq->end[i];
270 }
271 break;
272 case PIPE_QUERY_PRIMITIVES_GENERATED:
273 value = pq->num_primitives_generated[0];
274 break;
275 case PIPE_QUERY_PRIMITIVES_EMITTED:
276 value = pq->num_primitives_written[0];
277 break;
278 case PIPE_QUERY_TIMESTAMP:
279 for (unsigned i = 0; i < num_threads; i++) {
280 if (pq->end[i] > value) {
281 value = pq->end[i];
282 }
283 }
284 break;
285 case PIPE_QUERY_TIME_ELAPSED: {
286 uint64_t start = (uint64_t)-1, end = 0;
287 for (unsigned i = 0; i < num_threads; i++) {
288 if (pq->start[i] && pq->start[i] < start)
289 start = pq->start[i];
290 if (pq->end[i] && pq->end[i] > end)
291 end = pq->end[i];
292 }
293 value = end - start;
294 break;
295 }
296 case PIPE_QUERY_SO_STATISTICS:
297 value = pq->num_primitives_written[0];
298 value2 = pq->num_primitives_generated[0];
299 num_values = 2;
300 break;
301 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
302 value = 0;
303 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++)
304 value |= (pq->num_primitives_generated[s] > pq->num_primitives_written[s]);
305 break;
306 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
307 value = (pq->num_primitives_generated[0] > pq->num_primitives_written[0]);
308 break;
309 case PIPE_QUERY_PIPELINE_STATISTICS:
310 switch ((enum pipe_statistics_query_index)index) {
311 case PIPE_STAT_QUERY_IA_VERTICES:
312 value = pq->stats.ia_vertices;
313 break;
314 case PIPE_STAT_QUERY_IA_PRIMITIVES:
315 value = pq->stats.ia_primitives;
316 break;
317 case PIPE_STAT_QUERY_VS_INVOCATIONS:
318 value = pq->stats.vs_invocations;
319 break;
320 case PIPE_STAT_QUERY_GS_INVOCATIONS:
321 value = pq->stats.gs_invocations;
322 break;
323 case PIPE_STAT_QUERY_GS_PRIMITIVES:
324 value = pq->stats.gs_primitives;
325 break;
326 case PIPE_STAT_QUERY_C_INVOCATIONS:
327 value = pq->stats.c_invocations;
328 break;
329 case PIPE_STAT_QUERY_C_PRIMITIVES:
330 value = pq->stats.c_primitives;
331 break;
332 case PIPE_STAT_QUERY_PS_INVOCATIONS:
333 value = 0;
334 for (unsigned i = 0; i < num_threads; i++) {
335 value += pq->end[i];
336 }
337 value *= LP_RASTER_BLOCK_SIZE * LP_RASTER_BLOCK_SIZE;
338 break;
339 case PIPE_STAT_QUERY_HS_INVOCATIONS:
340 value = pq->stats.hs_invocations;
341 break;
342 case PIPE_STAT_QUERY_DS_INVOCATIONS:
343 value = pq->stats.ds_invocations;
344 break;
345 case PIPE_STAT_QUERY_CS_INVOCATIONS:
346 value = pq->stats.cs_invocations;
347 break;
348 case PIPE_STAT_QUERY_TS_INVOCATIONS:
349 value = pq->stats.ts_invocations;
350 break;
351 case PIPE_STAT_QUERY_MS_INVOCATIONS:
352 value = pq->stats.ms_invocations;
353 break;
354 }
355 break;
356 default:
357 fprintf(stderr, "Unknown query type %d\n", pq->type);
358 break;
359 }
360 }
361
362 uint8_t *dst = (uint8_t *) lpr->data + offset;
363
364 /* Write 1 or 2 result values */
365 for (unsigned i = 0; i < num_values; i++) {
366 if (i == 1) {
367 value = value2;
368 // advance dst pointer by 4 or 8 bytes
369 dst += (result_type == PIPE_QUERY_TYPE_I64 ||
370 result_type == PIPE_QUERY_TYPE_U64) ? 8 : 4;
371 }
372 switch (result_type) {
373 case PIPE_QUERY_TYPE_I32: {
374 int32_t *iptr = (int32_t *)dst;
375 *iptr = (int32_t) MIN2(value, INT32_MAX);
376 break;
377 }
378 case PIPE_QUERY_TYPE_U32: {
379 uint32_t *uptr = (uint32_t *)dst;
380 *uptr = (uint32_t) MIN2(value, UINT32_MAX);
381 break;
382 }
383 case PIPE_QUERY_TYPE_I64: {
384 int64_t *iptr = (int64_t *)dst;
385 *iptr = (int64_t)value;
386 break;
387 }
388 case PIPE_QUERY_TYPE_U64: {
389 uint64_t *uptr = (uint64_t *)dst;
390 *uptr = (uint64_t)value;
391 break;
392 }
393 }
394 }
395 }
396
397
398 static bool
llvmpipe_begin_query(struct pipe_context * pipe,struct pipe_query * q)399 llvmpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
400 {
401 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
402 struct llvmpipe_query *pq = llvmpipe_query(q);
403
404 /* Check if the query is already in the scene. If so, we need to
405 * flush the scene now. Real apps shouldn't re-use a query in a
406 * frame of rendering.
407 */
408 if (pq->fence && !lp_fence_issued(pq->fence)) {
409 llvmpipe_finish(pipe, __func__);
410 }
411
412 memset(pq->start, 0, sizeof(pq->start));
413 memset(pq->end, 0, sizeof(pq->end));
414 lp_setup_begin_query(llvmpipe->setup, pq);
415
416 switch (pq->type) {
417 case PIPE_QUERY_PRIMITIVES_EMITTED:
418 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
419 break;
420 case PIPE_QUERY_PRIMITIVES_GENERATED:
421 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
422 llvmpipe->active_primgen_queries++;
423 break;
424 case PIPE_QUERY_SO_STATISTICS:
425 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
426 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
427 break;
428 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
429 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
430 pq->num_primitives_written[s] = llvmpipe->so_stats[s].num_primitives_written;
431 pq->num_primitives_generated[s] = llvmpipe->so_stats[s].primitives_storage_needed;
432 }
433 break;
434 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
435 pq->num_primitives_written[0] = llvmpipe->so_stats[pq->index].num_primitives_written;
436 pq->num_primitives_generated[0] = llvmpipe->so_stats[pq->index].primitives_storage_needed;
437 break;
438 case PIPE_QUERY_PIPELINE_STATISTICS:
439 /* reset our cache */
440 if (llvmpipe->active_statistics_queries == 0) {
441 memset(&llvmpipe->pipeline_statistics, 0,
442 sizeof(llvmpipe->pipeline_statistics));
443 }
444 memcpy(&pq->stats, &llvmpipe->pipeline_statistics, sizeof(pq->stats));
445 llvmpipe->active_statistics_queries++;
446 break;
447 case PIPE_QUERY_OCCLUSION_COUNTER:
448 case PIPE_QUERY_OCCLUSION_PREDICATE:
449 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
450 llvmpipe->active_occlusion_queries++;
451 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
452 break;
453 default:
454 break;
455 }
456 return true;
457 }
458
459
460 static bool
llvmpipe_end_query(struct pipe_context * pipe,struct pipe_query * q)461 llvmpipe_end_query(struct pipe_context *pipe, struct pipe_query *q)
462 {
463 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
464 struct llvmpipe_query *pq = llvmpipe_query(q);
465
466 lp_setup_end_query(llvmpipe->setup, pq);
467
468 switch (pq->type) {
469
470 case PIPE_QUERY_PRIMITIVES_EMITTED:
471 pq->num_primitives_written[0] =
472 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
473 break;
474 case PIPE_QUERY_PRIMITIVES_GENERATED:
475 assert(llvmpipe->active_primgen_queries);
476 llvmpipe->active_primgen_queries--;
477 pq->num_primitives_generated[0] =
478 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
479 break;
480 case PIPE_QUERY_SO_STATISTICS:
481 pq->num_primitives_written[0] =
482 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
483 pq->num_primitives_generated[0] =
484 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
485 break;
486 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
487 for (unsigned s = 0; s < PIPE_MAX_VERTEX_STREAMS; s++) {
488 pq->num_primitives_written[s] =
489 llvmpipe->so_stats[s].num_primitives_written - pq->num_primitives_written[s];
490 pq->num_primitives_generated[s] =
491 llvmpipe->so_stats[s].primitives_storage_needed - pq->num_primitives_generated[s];
492 }
493 break;
494 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
495 pq->num_primitives_written[0] =
496 llvmpipe->so_stats[pq->index].num_primitives_written - pq->num_primitives_written[0];
497 pq->num_primitives_generated[0] =
498 llvmpipe->so_stats[pq->index].primitives_storage_needed - pq->num_primitives_generated[0];
499 break;
500 case PIPE_QUERY_PIPELINE_STATISTICS:
501 pq->stats.ia_vertices =
502 llvmpipe->pipeline_statistics.ia_vertices - pq->stats.ia_vertices;
503 pq->stats.ia_primitives =
504 llvmpipe->pipeline_statistics.ia_primitives - pq->stats.ia_primitives;
505 pq->stats.vs_invocations =
506 llvmpipe->pipeline_statistics.vs_invocations - pq->stats.vs_invocations;
507 pq->stats.gs_invocations =
508 llvmpipe->pipeline_statistics.gs_invocations - pq->stats.gs_invocations;
509 pq->stats.gs_primitives =
510 llvmpipe->pipeline_statistics.gs_primitives - pq->stats.gs_primitives;
511 pq->stats.c_invocations =
512 llvmpipe->pipeline_statistics.c_invocations - pq->stats.c_invocations;
513 pq->stats.c_primitives =
514 llvmpipe->pipeline_statistics.c_primitives - pq->stats.c_primitives;
515 pq->stats.ps_invocations =
516 llvmpipe->pipeline_statistics.ps_invocations - pq->stats.ps_invocations;
517 pq->stats.cs_invocations =
518 llvmpipe->pipeline_statistics.cs_invocations - pq->stats.cs_invocations;
519 pq->stats.hs_invocations =
520 llvmpipe->pipeline_statistics.hs_invocations - pq->stats.hs_invocations;
521 pq->stats.ds_invocations =
522 llvmpipe->pipeline_statistics.ds_invocations - pq->stats.ds_invocations;
523 pq->stats.ts_invocations =
524 llvmpipe->pipeline_statistics.ts_invocations - pq->stats.ts_invocations;
525 pq->stats.ms_invocations =
526 llvmpipe->pipeline_statistics.ms_invocations - pq->stats.ms_invocations;
527 llvmpipe->active_statistics_queries--;
528 break;
529 case PIPE_QUERY_OCCLUSION_COUNTER:
530 case PIPE_QUERY_OCCLUSION_PREDICATE:
531 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
532 assert(llvmpipe->active_occlusion_queries);
533 llvmpipe->active_occlusion_queries--;
534 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
535 break;
536 default:
537 break;
538 }
539
540 return true;
541 }
542
543
544 bool
llvmpipe_check_render_cond(struct llvmpipe_context * lp)545 llvmpipe_check_render_cond(struct llvmpipe_context *lp)
546 {
547 struct pipe_context *pipe = &lp->pipe;
548
549 if (lp->render_cond_buffer) {
550 uint32_t data = *(uint32_t *)((char *)lp->render_cond_buffer->data
551 + lp->render_cond_offset);
552 return (!data) == lp->render_cond_cond;
553 }
554 if (!lp->render_cond_query)
555 return true; /* no query predicate, draw normally */
556
557 bool wait = (lp->render_cond_mode == PIPE_RENDER_COND_WAIT ||
558 lp->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT);
559
560 uint64_t result;
561 bool b = pipe->get_query_result(pipe, lp->render_cond_query, wait,
562 (void*)&result);
563 if (b)
564 return ((!result) == lp->render_cond_cond);
565 else
566 return true;
567 }
568
569
570 static void
llvmpipe_set_active_query_state(struct pipe_context * pipe,bool enable)571 llvmpipe_set_active_query_state(struct pipe_context *pipe, bool enable)
572 {
573 struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
574
575 llvmpipe->queries_disabled = !enable;
576 /* for OQs we need to regenerate the fragment shader */
577 llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY;
578 }
579
580
581 void
llvmpipe_init_query_funcs(struct llvmpipe_context * llvmpipe)582 llvmpipe_init_query_funcs(struct llvmpipe_context *llvmpipe)
583 {
584 llvmpipe->pipe.create_query = llvmpipe_create_query;
585 llvmpipe->pipe.destroy_query = llvmpipe_destroy_query;
586 llvmpipe->pipe.begin_query = llvmpipe_begin_query;
587 llvmpipe->pipe.end_query = llvmpipe_end_query;
588 llvmpipe->pipe.get_query_result = llvmpipe_get_query_result;
589 llvmpipe->pipe.get_query_result_resource = llvmpipe_get_query_result_resource;
590 llvmpipe->pipe.set_active_query_state = llvmpipe_set_active_query_state;
591 }
592
593
594