• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Mesa 3-D graphics library
3  *
4  * Copyright (C) 2012-2013 LunarG, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Chia-I Wu <olv@lunarg.com>
26  */
27 
28 #include "util/u_prim.h"
29 #include "core/intel_winsys.h"
30 
31 #include "ilo_render.h"
32 #include "ilo_blit.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_query.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_draw.h"
39 
40 static void
ilo_draw_set_owner(struct ilo_context * ilo)41 ilo_draw_set_owner(struct ilo_context *ilo)
42 {
43    ilo_cp_set_owner(ilo->cp, INTEL_RING_RENDER, &ilo->draw.cp_owner);
44 }
45 
46 static uint64_t
query_timestamp_to_ns(const struct ilo_context * ilo,uint64_t timestamp)47 query_timestamp_to_ns(const struct ilo_context *ilo, uint64_t timestamp)
48 {
49    /* see ilo_get_timestamp() */
50    return (timestamp & 0xffffffff) * 80;
51 }
52 
53 /**
54  * Process the bo and accumulate the result.  The bo is emptied.
55  */
56 static void
query_process_bo(const struct ilo_context * ilo,struct ilo_query * q)57 query_process_bo(const struct ilo_context *ilo, struct ilo_query *q)
58 {
59    const uint64_t *vals;
60    uint64_t tmp;
61    int i;
62 
63    if (!q->used)
64       return;
65 
66    vals = intel_bo_map(q->bo, false);
67    if (!vals) {
68       q->used = 0;
69       return;
70    }
71 
72    switch (q->type) {
73    case PIPE_QUERY_OCCLUSION_COUNTER:
74    case PIPE_QUERY_OCCLUSION_PREDICATE:
75    case PIPE_QUERY_TIME_ELAPSED:
76    case PIPE_QUERY_PRIMITIVES_GENERATED:
77    case PIPE_QUERY_PRIMITIVES_EMITTED:
78       assert(q->stride == sizeof(*vals) * 2);
79 
80       tmp = 0;
81       for (i = 0; i < q->used; i++)
82          tmp += vals[2 * i + 1] - vals[2 * i];
83 
84       if (q->type == PIPE_QUERY_TIME_ELAPSED)
85          tmp = query_timestamp_to_ns(ilo, tmp);
86 
87       q->result.u64 += tmp;
88       break;
89    case PIPE_QUERY_TIMESTAMP:
90       assert(q->stride == sizeof(*vals));
91 
92       q->result.u64 = query_timestamp_to_ns(ilo, vals[q->used - 1]);
93       break;
94    case PIPE_QUERY_PIPELINE_STATISTICS:
95       assert(q->stride == sizeof(*vals) * 22);
96 
97       for (i = 0; i < q->used; i++) {
98          struct pipe_query_data_pipeline_statistics *stats =
99             &q->result.pipeline_statistics;
100          const uint64_t *begin = vals + 22 * i;
101          const uint64_t *end = begin + 11;
102 
103          stats->ia_vertices    += end[0] - begin[0];
104          stats->ia_primitives  += end[1] - begin[1];
105          stats->vs_invocations += end[2] - begin[2];
106          stats->gs_invocations += end[3] - begin[3];
107          stats->gs_primitives  += end[4] - begin[4];
108          stats->c_invocations  += end[5] - begin[5];
109          stats->c_primitives   += end[6] - begin[6];
110          stats->ps_invocations += end[7] - begin[7];
111          stats->hs_invocations += end[8] - begin[8];
112          stats->ds_invocations += end[9] - begin[9];
113          stats->cs_invocations += end[10] - begin[10];
114       }
115       break;
116    default:
117       break;
118    }
119 
120    intel_bo_unmap(q->bo);
121 
122    q->used = 0;
123 }
124 
125 static void
query_begin_bo(struct ilo_context * ilo,struct ilo_query * q)126 query_begin_bo(struct ilo_context *ilo, struct ilo_query *q)
127 {
128    /* bo is full */
129    if (q->used >= q->count)
130       query_process_bo(ilo, q);
131 
132    /* write the beginning value to the bo */
133    if (q->in_pairs)
134       ilo_render_emit_query(ilo->render, q, q->stride * q->used);
135 }
136 
137 static void
query_end_bo(struct ilo_context * ilo,struct ilo_query * q)138 query_end_bo(struct ilo_context *ilo, struct ilo_query *q)
139 {
140    uint32_t offset;
141 
142    assert(q->used < q->count);
143 
144    offset = q->stride * q->used;
145    if (q->in_pairs)
146       offset += q->stride >> 1;
147 
148    q->used++;
149 
150    /* write the ending value to the bo */
151    ilo_render_emit_query(ilo->render, q, offset);
152 }
153 
154 bool
ilo_init_draw_query(struct ilo_context * ilo,struct ilo_query * q)155 ilo_init_draw_query(struct ilo_context *ilo, struct ilo_query *q)
156 {
157    unsigned bo_size;
158 
159    switch (q->type) {
160    case PIPE_QUERY_OCCLUSION_COUNTER:
161    case PIPE_QUERY_OCCLUSION_PREDICATE:
162    case PIPE_QUERY_TIME_ELAPSED:
163    case PIPE_QUERY_PRIMITIVES_GENERATED:
164    case PIPE_QUERY_PRIMITIVES_EMITTED:
165       q->stride = sizeof(uint64_t);
166       q->in_pairs = true;
167       break;
168    case PIPE_QUERY_TIMESTAMP:
169       q->stride = sizeof(uint64_t);
170       q->in_pairs = false;
171       break;
172    case PIPE_QUERY_PIPELINE_STATISTICS:
173       q->stride = sizeof(uint64_t) * 11;
174       q->in_pairs = true;
175       break;
176    default:
177       return false;
178       break;
179    }
180 
181    q->cmd_len = ilo_render_get_query_len(ilo->render, q->type);
182 
183    /* double cmd_len and stride if in pairs */
184    q->cmd_len <<= q->in_pairs;
185    q->stride <<= q->in_pairs;
186 
187    bo_size = (q->stride > 4096) ? q->stride : 4096;
188    q->bo = intel_winsys_alloc_bo(ilo->winsys, "query", bo_size, false);
189    if (!q->bo)
190       return false;
191 
192    q->count = bo_size / q->stride;
193 
194    return true;
195 }
196 
197 void
ilo_begin_draw_query(struct ilo_context * ilo,struct ilo_query * q)198 ilo_begin_draw_query(struct ilo_context *ilo, struct ilo_query *q)
199 {
200    ilo_draw_set_owner(ilo);
201 
202    /* need to submit first */
203    if (!ilo_builder_validate(&ilo->cp->builder, 1, &q->bo) ||
204          ilo_cp_space(ilo->cp) < q->cmd_len) {
205       ilo_cp_submit(ilo->cp, "out of aperture or space");
206 
207       assert(ilo_builder_validate(&ilo->cp->builder, 1, &q->bo));
208       assert(ilo_cp_space(ilo->cp) >= q->cmd_len);
209 
210       ilo_draw_set_owner(ilo);
211    }
212 
213    /* reserve the space for ending/pausing the query */
214    ilo->draw.cp_owner.reserve += q->cmd_len >> q->in_pairs;
215 
216    query_begin_bo(ilo, q);
217 
218    if (q->in_pairs)
219       list_add(&q->list, &ilo->draw.queries);
220 }
221 
222 void
ilo_end_draw_query(struct ilo_context * ilo,struct ilo_query * q)223 ilo_end_draw_query(struct ilo_context *ilo, struct ilo_query *q)
224 {
225    ilo_draw_set_owner(ilo);
226 
227    /* reclaim the reserved space */
228    ilo->draw.cp_owner.reserve -= q->cmd_len >> q->in_pairs;
229    assert(ilo->draw.cp_owner.reserve >= 0);
230 
231    query_end_bo(ilo, q);
232 
233    list_delinit(&q->list);
234 }
235 
236 /**
237  * Process the raw query data.
238  */
239 void
ilo_process_draw_query(struct ilo_context * ilo,struct ilo_query * q)240 ilo_process_draw_query(struct ilo_context *ilo, struct ilo_query *q)
241 {
242    query_process_bo(ilo, q);
243 }
244 
245 static void
ilo_draw_own_cp(struct ilo_cp * cp,void * data)246 ilo_draw_own_cp(struct ilo_cp *cp, void *data)
247 {
248    struct ilo_context *ilo = data;
249 
250    /* multiply by 2 for both resuming and pausing */
251    if (ilo_cp_space(ilo->cp) < ilo->draw.cp_owner.reserve * 2) {
252       ilo_cp_submit(ilo->cp, "out of space");
253       assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve * 2);
254    }
255 
256    while (true) {
257       struct ilo_builder_snapshot snapshot;
258       struct ilo_query *q;
259 
260       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
261 
262       /* resume queries */
263       LIST_FOR_EACH_ENTRY(q, &ilo->draw.queries, list)
264          query_begin_bo(ilo, q);
265 
266       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
267          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
268 
269          if (ilo_builder_batch_used(&ilo->cp->builder)) {
270             ilo_cp_submit(ilo->cp, "out of aperture");
271             continue;
272          }
273       }
274 
275       break;
276    }
277 
278    assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve);
279 }
280 
281 static void
ilo_draw_release_cp(struct ilo_cp * cp,void * data)282 ilo_draw_release_cp(struct ilo_cp *cp, void *data)
283 {
284    struct ilo_context *ilo = data;
285    struct ilo_query *q;
286 
287    assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve);
288 
289    /* pause queries */
290    LIST_FOR_EACH_ENTRY(q, &ilo->draw.queries, list)
291       query_end_bo(ilo, q);
292 }
293 
294 static bool
draw_vbo(struct ilo_context * ilo,const struct ilo_state_vector * vec)295 draw_vbo(struct ilo_context *ilo, const struct ilo_state_vector *vec)
296 {
297    bool need_flush = false;
298    bool success = true;
299    int max_len, before_space;
300 
301    /* on Gen7 and Gen7.5, we need SOL_RESET to reset the SO write offsets */
302    if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7) &&
303        ilo_dev_gen(ilo->dev) <= ILO_GEN(7.5) &&
304        (vec->dirty & ILO_DIRTY_SO) && vec->so.enabled &&
305        !vec->so.append_bitmask) {
306       ilo_cp_submit(ilo->cp, "SOL_RESET");
307       ilo_cp_set_one_off_flags(ilo->cp, INTEL_EXEC_GEN7_SOL_RESET);
308    }
309 
310    if (ilo_builder_batch_used(&ilo->cp->builder)) {
311       /*
312        * Without a better tracking mechanism, when the framebuffer changes, we
313        * have to assume that the old framebuffer may be sampled from.  If that
314        * happens in the middle of a batch buffer, we need to insert manual
315        * flushes.
316        */
317       need_flush = (vec->dirty & ILO_DIRTY_FB);
318 
319       /* same to SO target changes */
320       need_flush |= (vec->dirty & ILO_DIRTY_SO);
321    }
322 
323    ilo_draw_set_owner(ilo);
324 
325    /* make sure there is enough room first */
326    max_len = ilo_render_get_draw_len(ilo->render, vec);
327    if (need_flush)
328       max_len += ilo_render_get_flush_len(ilo->render);
329 
330    if (max_len > ilo_cp_space(ilo->cp)) {
331       ilo_cp_submit(ilo->cp, "out of space");
332       need_flush = false;
333       assert(max_len <= ilo_cp_space(ilo->cp));
334    }
335 
336    /* space available before emission */
337    before_space = ilo_cp_space(ilo->cp);
338 
339    if (need_flush)
340       ilo_render_emit_flush(ilo->render);
341 
342    while (true) {
343       struct ilo_builder_snapshot snapshot;
344 
345       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
346 
347       ilo_render_emit_draw(ilo->render, vec);
348 
349       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
350          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
351 
352          /* flush and try again */
353          if (ilo_builder_batch_used(&ilo->cp->builder)) {
354             ilo_cp_submit(ilo->cp, "out of aperture");
355             continue;
356          }
357 
358          success = false;
359       }
360 
361       break;
362    }
363 
364    /* sanity check size estimation */
365    assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
366 
367    return success;
368 }
369 
370 void
ilo_draw_rectlist(struct ilo_context * ilo)371 ilo_draw_rectlist(struct ilo_context *ilo)
372 {
373    int max_len, before_space;
374    bool need_flush;
375 
376    need_flush = ilo_builder_batch_used(&ilo->cp->builder);
377 
378    ilo_draw_set_owner(ilo);
379 
380    max_len = ilo_render_get_rectlist_len(ilo->render, ilo->blitter);
381    max_len += ilo_render_get_flush_len(ilo->render) * 2;
382 
383    if (max_len > ilo_cp_space(ilo->cp)) {
384       ilo_cp_submit(ilo->cp, "out of space");
385       need_flush = false;
386       assert(max_len <= ilo_cp_space(ilo->cp));
387    }
388 
389    before_space = ilo_cp_space(ilo->cp);
390 
391    /*
392     * From the Sandy Bridge PRM, volume 2 part 1, page 313:
393     *
394     *     "If other rendering operations have preceded this clear, a
395     *      PIPE_CONTROL with write cache flush enabled and Z-inhibit
396     *      disabled must be issued before the rectangle primitive used for
397     *      the depth buffer clear operation."
398     *
399     * From the Sandy Bridge PRM, volume 2 part 1, page 314:
400     *
401     *     "Depth buffer clear pass must be followed by a PIPE_CONTROL
402     *      command with DEPTH_STALL bit set and Then followed by Depth
403     *      FLUSH"
404     *
405     * But the pipeline has to be flushed both before and after not only
406     * because of these workarounds.  We need them for reasons such as
407     *
408     *  - we may sample from a texture that was rendered to
409     *  - we may sample from the fb shortly after
410     *
411     * Skip checking blitter->op and do the flushes.
412     */
413    if (need_flush)
414       ilo_render_emit_flush(ilo->render);
415 
416    while (true) {
417       struct ilo_builder_snapshot snapshot;
418 
419       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
420 
421       ilo_render_emit_rectlist(ilo->render, ilo->blitter);
422 
423       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
424          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
425 
426          /* flush and try again */
427          if (ilo_builder_batch_used(&ilo->cp->builder)) {
428             ilo_cp_submit(ilo->cp, "out of aperture");
429             continue;
430          }
431       }
432 
433       break;
434    }
435 
436    ilo_render_invalidate_hw(ilo->render);
437 
438    ilo_render_emit_flush(ilo->render);
439 
440    /* sanity check size estimation */
441    assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
442 }
443 
444 static void
draw_vbo_with_sw_restart(struct ilo_context * ilo,const struct pipe_draw_info * info)445 draw_vbo_with_sw_restart(struct ilo_context *ilo,
446                          const struct pipe_draw_info *info)
447 {
448    const struct ilo_ib_state *ib = &ilo->state_vector.ib;
449    const struct ilo_vma *vma;
450    union {
451       const void *ptr;
452       const uint8_t *u8;
453       const uint16_t *u16;
454       const uint32_t *u32;
455    } u;
456 
457    /* we will draw with IB mapped */
458    if (ib->state.buffer) {
459       vma = ilo_resource_get_vma(ib->state.buffer);
460       u.ptr = intel_bo_map(vma->bo, false);
461       if (u.ptr)
462          u.u8 += vma->bo_offset + ib->state.offset;
463    } else {
464       vma = NULL;
465       u.ptr = ib->state.user_buffer;
466    }
467 
468    if (!u.ptr)
469       return;
470 
471 #define DRAW_VBO_WITH_SW_RESTART(pipe, info, ptr) do {   \
472    const unsigned end = (info)->start + (info)->count;   \
473    struct pipe_draw_info subinfo;                        \
474    unsigned i;                                           \
475                                                          \
476    subinfo = *(info);                                    \
477    subinfo.primitive_restart = false;                    \
478    for (i = (info)->start; i < end; i++) {               \
479       if ((ptr)[i] == (info)->restart_index) {           \
480          subinfo.count = i - subinfo.start;              \
481          if (subinfo.count)                              \
482             (pipe)->draw_vbo(pipe, &subinfo);            \
483          subinfo.start = i + 1;                          \
484       }                                                  \
485    }                                                     \
486    subinfo.count = i - subinfo.start;                    \
487    if (subinfo.count)                                    \
488       (pipe)->draw_vbo(pipe, &subinfo);                  \
489 } while (0)
490 
491    switch (ib->state.index_size) {
492    case 1:
493       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u8);
494       break;
495    case 2:
496       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u16);
497       break;
498    case 4:
499       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u32);
500       break;
501    default:
502       assert(!"unsupported index size");
503       break;
504    }
505 
506 #undef DRAW_VBO_WITH_SW_RESTART
507 
508    if (vma)
509       intel_bo_unmap(vma->bo);
510 }
511 
512 static bool
draw_vbo_need_sw_restart(const struct ilo_context * ilo,const struct pipe_draw_info * info)513 draw_vbo_need_sw_restart(const struct ilo_context *ilo,
514                          const struct pipe_draw_info *info)
515 {
516    /* the restart index is fixed prior to GEN7.5 */
517    if (ilo_dev_gen(ilo->dev) < ILO_GEN(7.5)) {
518       const unsigned cut_index =
519          (ilo->state_vector.ib.state.index_size == 1) ? 0xff :
520          (ilo->state_vector.ib.state.index_size == 2) ? 0xffff :
521          (ilo->state_vector.ib.state.index_size == 4) ? 0xffffffff : 0;
522 
523       if (info->restart_index < cut_index)
524          return true;
525    }
526 
527    switch (info->mode) {
528    case PIPE_PRIM_POINTS:
529    case PIPE_PRIM_LINES:
530    case PIPE_PRIM_LINE_STRIP:
531    case PIPE_PRIM_TRIANGLES:
532    case PIPE_PRIM_TRIANGLE_STRIP:
533       /* these never need software fallback */
534       return false;
535    case PIPE_PRIM_LINE_LOOP:
536    case PIPE_PRIM_POLYGON:
537    case PIPE_PRIM_QUAD_STRIP:
538    case PIPE_PRIM_QUADS:
539    case PIPE_PRIM_TRIANGLE_FAN:
540       /* these need software fallback prior to GEN7.5 */
541       return (ilo_dev_gen(ilo->dev) < ILO_GEN(7.5));
542    default:
543       /* the rest always needs software fallback */
544       return true;
545    }
546 }
547 
548 static void
ilo_draw_vbo(struct pipe_context * pipe,const struct pipe_draw_info * info)549 ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
550 {
551    struct ilo_context *ilo = ilo_context(pipe);
552    int vs_scratch_size, gs_scratch_size, fs_scratch_size;
553 
554    if (ilo_debug & ILO_DEBUG_DRAW) {
555       if (info->indexed) {
556          ilo_printf("indexed draw %s: "
557                "index start %d, count %d, vertex range [%d, %d]\n",
558                u_prim_name(info->mode), info->start, info->count,
559                info->min_index, info->max_index);
560       }
561       else {
562          ilo_printf("draw %s: vertex start %d, count %d\n",
563                u_prim_name(info->mode), info->start, info->count);
564       }
565 
566       ilo_state_vector_dump_dirty(&ilo->state_vector);
567    }
568 
569    if (ilo_skip_rendering(ilo))
570       return;
571 
572    if (info->primitive_restart && info->indexed &&
573        draw_vbo_need_sw_restart(ilo, info)) {
574       draw_vbo_with_sw_restart(ilo, info);
575       return;
576    }
577 
578    ilo_finalize_3d_states(ilo, info);
579 
580    /* upload kernels */
581    ilo_shader_cache_upload(ilo->shader_cache, &ilo->cp->builder);
582 
583    /* prepare scratch spaces */
584    ilo_shader_cache_get_max_scratch_sizes(ilo->shader_cache,
585          &vs_scratch_size, &gs_scratch_size, &fs_scratch_size);
586    ilo_render_prepare_scratch_spaces(ilo->render,
587          vs_scratch_size, gs_scratch_size, fs_scratch_size);
588 
589    ilo_blit_resolve_framebuffer(ilo);
590 
591    /* If draw_vbo ever fails, return immediately. */
592    if (!draw_vbo(ilo, &ilo->state_vector))
593       return;
594 
595    /* clear dirty status */
596    ilo->state_vector.dirty = 0x0;
597 
598    /* avoid dangling pointer reference */
599    ilo->state_vector.draw = NULL;
600 
601    if (ilo_debug & ILO_DEBUG_NOCACHE)
602       ilo_render_emit_flush(ilo->render);
603 }
604 
605 static void
ilo_texture_barrier(struct pipe_context * pipe,unsigned flags)606 ilo_texture_barrier(struct pipe_context *pipe, unsigned flags)
607 {
608    struct ilo_context *ilo = ilo_context(pipe);
609 
610    if (ilo->cp->ring != INTEL_RING_RENDER)
611       return;
612 
613    ilo_render_emit_flush(ilo->render);
614 
615    /* don't know why */
616    if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
617       ilo_cp_submit(ilo->cp, "texture barrier");
618 }
619 
620 static void
ilo_get_sample_position(struct pipe_context * pipe,unsigned sample_count,unsigned sample_index,float * out_value)621 ilo_get_sample_position(struct pipe_context *pipe,
622                         unsigned sample_count,
623                         unsigned sample_index,
624                         float *out_value)
625 {
626    struct ilo_context *ilo = ilo_context(pipe);
627 
628    ilo_render_get_sample_position(ilo->render,
629          sample_count, sample_index,
630          &out_value[0], &out_value[1]);
631 }
632 
633 void
ilo_init_draw(struct ilo_context * ilo)634 ilo_init_draw(struct ilo_context *ilo)
635 {
636    ilo->draw.cp_owner.own = ilo_draw_own_cp;
637    ilo->draw.cp_owner.release = ilo_draw_release_cp;
638    ilo->draw.cp_owner.data = (void *) ilo;
639    ilo->draw.cp_owner.reserve = 0;
640 
641    list_inithead(&ilo->draw.queries);
642 }
643 
644 /**
645  * Initialize 3D-related functions.
646  */
647 void
ilo_init_draw_functions(struct ilo_context * ilo)648 ilo_init_draw_functions(struct ilo_context *ilo)
649 {
650    ilo->base.draw_vbo = ilo_draw_vbo;
651    ilo->base.texture_barrier = ilo_texture_barrier;
652    ilo->base.get_sample_position = ilo_get_sample_position;
653 }
654