1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file v3d_job.c
25 *
26 * Functions for submitting V3D render jobs to the kernel.
27 */
28
29 #include <xf86drm.h>
30 #include <libsync.h>
31 #include "v3d_context.h"
32 /* The OQ/semaphore packets are the same across V3D versions. */
33 #define V3D_VERSION 42
34 #include "broadcom/cle/v3dx_pack.h"
35 #include "broadcom/common/v3d_macros.h"
36 #include "util/hash_table.h"
37 #include "util/ralloc.h"
38 #include "util/set.h"
39 #include "broadcom/clif/clif_dump.h"
40
41 void
v3d_job_free(struct v3d_context * v3d,struct v3d_job * job)42 v3d_job_free(struct v3d_context *v3d, struct v3d_job *job)
43 {
44 set_foreach(job->bos, entry) {
45 struct v3d_bo *bo = (struct v3d_bo *)entry->key;
46 v3d_bo_unreference(&bo);
47 }
48
49 _mesa_hash_table_remove_key(v3d->jobs, &job->key);
50
51 if (job->write_prscs) {
52 set_foreach(job->write_prscs, entry) {
53 const struct pipe_resource *prsc = entry->key;
54
55 _mesa_hash_table_remove_key(v3d->write_jobs, prsc);
56 }
57 }
58
59 for (int i = 0; i < job->nr_cbufs; i++) {
60 if (job->cbufs[i]) {
61 _mesa_hash_table_remove_key(v3d->write_jobs,
62 job->cbufs[i]->texture);
63 pipe_surface_reference(&job->cbufs[i], NULL);
64 }
65 }
66 if (job->zsbuf) {
67 struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
68 if (rsc->separate_stencil)
69 _mesa_hash_table_remove_key(v3d->write_jobs,
70 &rsc->separate_stencil->base);
71
72 _mesa_hash_table_remove_key(v3d->write_jobs,
73 job->zsbuf->texture);
74 pipe_surface_reference(&job->zsbuf, NULL);
75 }
76 if (job->bbuf)
77 pipe_surface_reference(&job->bbuf, NULL);
78
79 if (v3d->job == job)
80 v3d->job = NULL;
81
82 v3d_destroy_cl(&job->bcl);
83 v3d_destroy_cl(&job->rcl);
84 v3d_destroy_cl(&job->indirect);
85 v3d_bo_unreference(&job->tile_alloc);
86 v3d_bo_unreference(&job->tile_state);
87
88 ralloc_free(job);
89 }
90
91 struct v3d_job *
v3d_job_create(struct v3d_context * v3d)92 v3d_job_create(struct v3d_context *v3d)
93 {
94 struct v3d_job *job = rzalloc(v3d, struct v3d_job);
95
96 job->v3d = v3d;
97
98 v3d_init_cl(job, &job->bcl);
99 v3d_init_cl(job, &job->rcl);
100 v3d_init_cl(job, &job->indirect);
101
102 job->draw_min_x = ~0;
103 job->draw_min_y = ~0;
104 job->draw_max_x = 0;
105 job->draw_max_y = 0;
106
107 job->bos = _mesa_set_create(job,
108 _mesa_hash_pointer,
109 _mesa_key_pointer_equal);
110 return job;
111 }
112
113 void
v3d_job_add_bo(struct v3d_job * job,struct v3d_bo * bo)114 v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo)
115 {
116 if (!bo)
117 return;
118
119 if (_mesa_set_search(job->bos, bo))
120 return;
121
122 v3d_bo_reference(bo);
123 _mesa_set_add(job->bos, bo);
124 job->referenced_size += bo->size;
125
126 uint32_t *bo_handles = (void *)(uintptr_t)job->submit.bo_handles;
127
128 if (job->submit.bo_handle_count >= job->bo_handles_size) {
129 job->bo_handles_size = MAX2(4, job->bo_handles_size * 2);
130 bo_handles = reralloc(job, bo_handles,
131 uint32_t, job->bo_handles_size);
132 job->submit.bo_handles = (uintptr_t)(void *)bo_handles;
133 }
134 bo_handles[job->submit.bo_handle_count++] = bo->handle;
135 }
136
137 void
v3d_job_add_write_resource(struct v3d_job * job,struct pipe_resource * prsc)138 v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc)
139 {
140 struct v3d_context *v3d = job->v3d;
141
142 if (!job->write_prscs) {
143 job->write_prscs = _mesa_set_create(job,
144 _mesa_hash_pointer,
145 _mesa_key_pointer_equal);
146 }
147
148 _mesa_set_add(job->write_prscs, prsc);
149 _mesa_hash_table_insert(v3d->write_jobs, prsc, job);
150 }
151
152 void
v3d_flush_jobs_using_bo(struct v3d_context * v3d,struct v3d_bo * bo)153 v3d_flush_jobs_using_bo(struct v3d_context *v3d, struct v3d_bo *bo)
154 {
155 hash_table_foreach(v3d->jobs, entry) {
156 struct v3d_job *job = entry->data;
157
158 if (_mesa_set_search(job->bos, bo))
159 v3d_job_submit(v3d, job);
160 }
161 }
162
163 void
v3d_job_add_tf_write_resource(struct v3d_job * job,struct pipe_resource * prsc)164 v3d_job_add_tf_write_resource(struct v3d_job *job, struct pipe_resource *prsc)
165 {
166 v3d_job_add_write_resource(job, prsc);
167
168 if (!job->tf_write_prscs)
169 job->tf_write_prscs = _mesa_pointer_set_create(job);
170
171 _mesa_set_add(job->tf_write_prscs, prsc);
172 }
173
174 static bool
v3d_job_writes_resource_from_tf(struct v3d_job * job,struct pipe_resource * prsc)175 v3d_job_writes_resource_from_tf(struct v3d_job *job,
176 struct pipe_resource *prsc)
177 {
178 if (!job->tf_enabled)
179 return false;
180
181 if (!job->tf_write_prscs)
182 return false;
183
184 return _mesa_set_search(job->tf_write_prscs, prsc) != NULL;
185 }
186
187 void
v3d_flush_jobs_writing_resource(struct v3d_context * v3d,struct pipe_resource * prsc,enum v3d_flush_cond flush_cond,bool is_compute_pipeline)188 v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
189 struct pipe_resource *prsc,
190 enum v3d_flush_cond flush_cond,
191 bool is_compute_pipeline)
192 {
193 struct hash_entry *entry = _mesa_hash_table_search(v3d->write_jobs,
194 prsc);
195 if (!entry)
196 return;
197
198 struct v3d_resource *rsc = v3d_resource(prsc);
199
200 /* We need to sync if graphics pipeline reads a resource written
201 * by the compute pipeline. The same is needed for the case of
202 * graphics-compute dependency but flushing the job.
203 */
204 if (!is_compute_pipeline && rsc->bo != NULL && rsc->compute_written) {
205 v3d->sync_on_last_compute_job = true;
206 rsc->compute_written = false;
207 }
208 if (is_compute_pipeline && rsc->bo != NULL && rsc->graphics_written) {
209 flush_cond = V3D_FLUSH_ALWAYS;
210 rsc->graphics_written = false;
211 }
212
213 struct v3d_job *job = entry->data;
214
215 bool needs_flush;
216 switch (flush_cond) {
217 case V3D_FLUSH_ALWAYS:
218 needs_flush = true;
219 break;
220 case V3D_FLUSH_NOT_CURRENT_JOB:
221 needs_flush = !v3d->job || v3d->job != job;
222 break;
223 case V3D_FLUSH_DEFAULT:
224 default:
225 /* For writes from TF in the same job we use the "Wait for TF"
226 * feature provided by the hardware so we don't want to flush.
227 * The exception to this is when the caller is about to map the
228 * resource since in that case we don't have a 'Wait for TF'
229 * command the in command stream. In this scenario the caller
230 * is expected to set 'always_flush' to True.
231 */
232 needs_flush = !v3d_job_writes_resource_from_tf(job, prsc);
233 }
234
235 if (needs_flush)
236 v3d_job_submit(v3d, job);
237 }
238
239 void
v3d_flush_jobs_reading_resource(struct v3d_context * v3d,struct pipe_resource * prsc,enum v3d_flush_cond flush_cond,bool is_compute_pipeline)240 v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
241 struct pipe_resource *prsc,
242 enum v3d_flush_cond flush_cond,
243 bool is_compute_pipeline)
244 {
245 struct v3d_resource *rsc = v3d_resource(prsc);
246
247 /* We only need to force the flush on TF writes, which is the only
248 * case where we might skip the flush to use the 'Wait for TF'
249 * command. Here we are flushing for a read, which means that the
250 * caller intends to write to the resource, so we don't care if
251 * there was a previous TF write to it.
252 */
253 v3d_flush_jobs_writing_resource(v3d, prsc, flush_cond,
254 is_compute_pipeline);
255
256 hash_table_foreach(v3d->jobs, entry) {
257 struct v3d_job *job = entry->data;
258
259 if (!_mesa_set_search(job->bos, rsc->bo))
260 continue;
261
262 bool needs_flush;
263 switch (flush_cond) {
264 case V3D_FLUSH_NOT_CURRENT_JOB:
265 needs_flush = !v3d->job || v3d->job != job;
266 break;
267 case V3D_FLUSH_ALWAYS:
268 case V3D_FLUSH_DEFAULT:
269 default:
270 needs_flush = true;
271 }
272
273 if (needs_flush)
274 v3d_job_submit(v3d, job);
275
276 /* Reminder: v3d->jobs is safe to keep iterating even
277 * after deletion of an entry.
278 */
279 continue;
280 }
281 }
282
283 /**
284 * Returns a v3d_job structure for tracking V3D rendering to a particular FBO.
285 *
286 * If we've already started rendering to this FBO, then return the same job,
287 * otherwise make a new one. If we're beginning rendering to an FBO, make
288 * sure that any previous reads of the FBO (or writes to its color/Z surfaces)
289 * have been flushed.
290 */
291 struct v3d_job *
v3d_get_job(struct v3d_context * v3d,uint32_t nr_cbufs,struct pipe_surface ** cbufs,struct pipe_surface * zsbuf,struct pipe_surface * bbuf)292 v3d_get_job(struct v3d_context *v3d,
293 uint32_t nr_cbufs,
294 struct pipe_surface **cbufs,
295 struct pipe_surface *zsbuf,
296 struct pipe_surface *bbuf)
297 {
298 /* Return the existing job for this FBO if we have one */
299 struct v3d_job_key local_key = {
300 .cbufs = {
301 cbufs[0],
302 cbufs[1],
303 cbufs[2],
304 cbufs[3],
305 },
306 .zsbuf = zsbuf,
307 .bbuf = bbuf,
308 };
309 struct hash_entry *entry = _mesa_hash_table_search(v3d->jobs,
310 &local_key);
311 if (entry)
312 return entry->data;
313
314 /* Creating a new job. Make sure that any previous jobs reading or
315 * writing these buffers are flushed.
316 */
317 struct v3d_job *job = v3d_job_create(v3d);
318 job->nr_cbufs = nr_cbufs;
319
320 for (int i = 0; i < job->nr_cbufs; i++) {
321 if (cbufs[i]) {
322 v3d_flush_jobs_reading_resource(v3d, cbufs[i]->texture,
323 V3D_FLUSH_DEFAULT,
324 false);
325 pipe_surface_reference(&job->cbufs[i], cbufs[i]);
326
327 if (cbufs[i]->texture->nr_samples > 1)
328 job->msaa = true;
329 }
330 }
331 if (zsbuf) {
332 v3d_flush_jobs_reading_resource(v3d, zsbuf->texture,
333 V3D_FLUSH_DEFAULT,
334 false);
335 pipe_surface_reference(&job->zsbuf, zsbuf);
336 if (zsbuf->texture->nr_samples > 1)
337 job->msaa = true;
338 }
339 if (bbuf) {
340 pipe_surface_reference(&job->bbuf, bbuf);
341 if (bbuf->texture->nr_samples > 1)
342 job->msaa = true;
343 }
344
345 for (int i = 0; i < job->nr_cbufs; i++) {
346 if (cbufs[i])
347 _mesa_hash_table_insert(v3d->write_jobs,
348 cbufs[i]->texture, job);
349 }
350 if (zsbuf) {
351 _mesa_hash_table_insert(v3d->write_jobs, zsbuf->texture, job);
352
353 struct v3d_resource *rsc = v3d_resource(zsbuf->texture);
354 if (rsc->separate_stencil) {
355 v3d_flush_jobs_reading_resource(v3d,
356 &rsc->separate_stencil->base,
357 V3D_FLUSH_DEFAULT,
358 false);
359 _mesa_hash_table_insert(v3d->write_jobs,
360 &rsc->separate_stencil->base,
361 job);
362 }
363 }
364
365 job->double_buffer = V3D_DBG(DOUBLE_BUFFER) && !job->msaa;
366
367 memcpy(&job->key, &local_key, sizeof(local_key));
368 _mesa_hash_table_insert(v3d->jobs, &job->key, job);
369
370 return job;
371 }
372
373 struct v3d_job *
v3d_get_job_for_fbo(struct v3d_context * v3d)374 v3d_get_job_for_fbo(struct v3d_context *v3d)
375 {
376 if (v3d->job)
377 return v3d->job;
378
379 uint32_t nr_cbufs = v3d->framebuffer.nr_cbufs;
380 struct pipe_surface **cbufs = v3d->framebuffer.cbufs;
381 struct pipe_surface *zsbuf = v3d->framebuffer.zsbuf;
382 struct v3d_job *job = v3d_get_job(v3d, nr_cbufs, cbufs, zsbuf, NULL);
383
384 if (v3d->framebuffer.samples >= 1) {
385 job->msaa = true;
386 job->double_buffer = false;
387 }
388
389 v3d_get_tile_buffer_size(&v3d->screen->devinfo,
390 job->msaa, job->double_buffer,
391 job->nr_cbufs, job->cbufs, job->bbuf,
392 &job->tile_width,
393 &job->tile_height,
394 &job->internal_bpp);
395
396 /* The dirty flags are tracking what's been updated while v3d->job has
397 * been bound, so set them all to ~0 when switching between jobs. We
398 * also need to reset all state at the start of rendering.
399 */
400 v3d->dirty = ~0;
401
402 /* If we're binding to uninitialized buffers, no need to load their
403 * contents before drawing.
404 */
405 for (int i = 0; i < nr_cbufs; i++) {
406 if (cbufs[i]) {
407 struct v3d_resource *rsc = v3d_resource(cbufs[i]->texture);
408 if (!rsc->writes)
409 job->clear |= PIPE_CLEAR_COLOR0 << i;
410 }
411 }
412
413 if (zsbuf) {
414 struct v3d_resource *rsc = v3d_resource(zsbuf->texture);
415 if (!rsc->writes)
416 job->clear |= PIPE_CLEAR_DEPTH;
417
418 if (rsc->separate_stencil)
419 rsc = rsc->separate_stencil;
420
421 if (!rsc->writes)
422 job->clear |= PIPE_CLEAR_STENCIL;
423 }
424
425 job->draw_tiles_x = DIV_ROUND_UP(v3d->framebuffer.width,
426 job->tile_width);
427 job->draw_tiles_y = DIV_ROUND_UP(v3d->framebuffer.height,
428 job->tile_height);
429
430 v3d->job = job;
431
432 return job;
433 }
434
435 static void
v3d_clif_dump(struct v3d_context * v3d,struct v3d_job * job)436 v3d_clif_dump(struct v3d_context *v3d, struct v3d_job *job)
437 {
438 if (!(V3D_DBG(CL) ||
439 V3D_DBG(CL_NO_BIN) ||
440 V3D_DBG(CLIF)))
441 return;
442
443 struct clif_dump *clif = clif_dump_init(&v3d->screen->devinfo,
444 stderr,
445 V3D_DBG(CL) ||
446 V3D_DBG(CL_NO_BIN),
447 V3D_DBG(CL_NO_BIN));
448
449 set_foreach(job->bos, entry) {
450 struct v3d_bo *bo = (void *)entry->key;
451 char *name = ralloc_asprintf(NULL, "%s_0x%x",
452 bo->name, bo->offset);
453
454 v3d_bo_map(bo);
455 clif_dump_add_bo(clif, name, bo->offset, bo->size, bo->map);
456
457 ralloc_free(name);
458 }
459
460 clif_dump(clif, &job->submit);
461
462 clif_dump_destroy(clif);
463 }
464
465 static void
v3d_read_and_accumulate_primitive_counters(struct v3d_context * v3d)466 v3d_read_and_accumulate_primitive_counters(struct v3d_context *v3d)
467 {
468 assert(v3d->prim_counts);
469
470 perf_debug("stalling on TF counts readback\n");
471 struct v3d_resource *rsc = v3d_resource(v3d->prim_counts);
472 if (v3d_bo_wait(rsc->bo, OS_TIMEOUT_INFINITE, "prim-counts")) {
473 uint32_t *map = v3d_bo_map(rsc->bo) + v3d->prim_counts_offset;
474 v3d->tf_prims_generated += map[V3D_PRIM_COUNTS_TF_WRITTEN];
475 /* When we only have a vertex shader with no primitive
476 * restart, we determine the primitive count in the CPU so
477 * don't update it here again.
478 */
479 if (v3d->prog.gs || v3d->prim_restart) {
480 v3d->prims_generated += map[V3D_PRIM_COUNTS_WRITTEN];
481 uint8_t prim_mode =
482 v3d->prog.gs ? v3d->prog.gs->prog_data.gs->out_prim_type
483 : v3d->prim_mode;
484 uint32_t vertices_written =
485 map[V3D_PRIM_COUNTS_TF_WRITTEN] * mesa_vertices_per_prim(prim_mode);
486 for (int i = 0; i < v3d->streamout.num_targets; i++) {
487 v3d_stream_output_target(v3d->streamout.targets[i])->offset +=
488 vertices_written;
489 }
490 }
491 }
492 }
493
494 /**
495 * Submits the job to the kernel and then reinitializes it.
496 */
497 void
v3d_job_submit(struct v3d_context * v3d,struct v3d_job * job)498 v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job)
499 {
500 struct v3d_screen *screen = v3d->screen;
501 struct v3d_device_info *devinfo = &screen->devinfo;
502
503 if (!job->needs_flush)
504 goto done;
505
506 /* The GL_PRIMITIVES_GENERATED query is included with
507 * OES_geometry_shader.
508 */
509 job->needs_primitives_generated =
510 v3d->n_primitives_generated_queries_in_flight > 0 &&
511 v3d->prog.gs;
512
513 if (job->needs_primitives_generated)
514 v3d_ensure_prim_counts_allocated(v3d);
515
516 v3d_X(devinfo, emit_rcl)(job);
517
518 if (cl_offset(&job->bcl) > 0)
519 v3d_X(devinfo, bcl_epilogue)(v3d, job);
520
521 if (v3d->in_fence_fd >= 0) {
522 /* PIPE_CAP_NATIVE_FENCE */
523 if (drmSyncobjImportSyncFile(v3d->fd, v3d->in_syncobj,
524 v3d->in_fence_fd)) {
525 fprintf(stderr, "Failed to import native fence.\n");
526 } else {
527 job->submit.in_sync_bcl = v3d->in_syncobj;
528 }
529 close(v3d->in_fence_fd);
530 v3d->in_fence_fd = -1;
531 } else {
532 /* While the RCL will implicitly depend on the last RCL to have
533 * finished, we also need to block on any previous TFU job we
534 * may have dispatched.
535 */
536 job->submit.in_sync_rcl = v3d->out_sync;
537 }
538
539 /* Update the sync object for the last rendering by our context. */
540 job->submit.out_sync = v3d->out_sync;
541
542 job->submit.bcl_end = job->bcl.bo->offset + cl_offset(&job->bcl);
543 job->submit.rcl_end = job->rcl.bo->offset + cl_offset(&job->rcl);
544
545 if (v3d->active_perfmon) {
546 assert(screen->has_perfmon);
547 job->submit.perfmon_id = v3d->active_perfmon->kperfmon_id;
548 }
549
550 /* If we are submitting a job with a different perfmon, we need to
551 * ensure the previous one fully finishes before starting this;
552 * otherwise it would wrongly mix counter results.
553 */
554 if (v3d->active_perfmon != v3d->last_perfmon) {
555 v3d->last_perfmon = v3d->active_perfmon;
556 job->submit.in_sync_bcl = v3d->out_sync;
557 }
558
559 job->submit.flags = 0;
560 if (job->tmu_dirty_rcl && screen->has_cache_flush)
561 job->submit.flags |= DRM_V3D_SUBMIT_CL_FLUSH_CACHE;
562
563 /* On V3D 4.1, the tile alloc/state setup moved to register writes
564 * instead of binner packets.
565 */
566 if (devinfo->ver >= 42) {
567 v3d_job_add_bo(job, job->tile_alloc);
568 job->submit.qma = job->tile_alloc->offset;
569 job->submit.qms = job->tile_alloc->size;
570
571 v3d_job_add_bo(job, job->tile_state);
572 job->submit.qts = job->tile_state->offset;
573 }
574
575 v3d_clif_dump(v3d, job);
576
577 if (!V3D_DBG(NORAST)) {
578 int ret;
579
580 ret = v3d_ioctl(v3d->fd, DRM_IOCTL_V3D_SUBMIT_CL, &job->submit);
581 static bool warned = false;
582 if (ret && !warned) {
583 fprintf(stderr, "Draw call returned %s. "
584 "Expect corruption.\n", strerror(errno));
585 warned = true;
586 } else if (!ret) {
587 if (v3d->active_perfmon)
588 v3d->active_perfmon->job_submitted = true;
589 }
590
591 /* If we are submitting a job in the middle of transform
592 * feedback or there is a primitives generated query with a
593 * geometry shader then we need to read the primitive counts
594 * and accumulate them, otherwise they will be reset at the
595 * start of the next draw when we emit the Tile Binning Mode
596 * Configuration packet.
597 *
598 * If the job doesn't have any TF draw calls, then we know
599 * the primitive count must be zero and we can skip stalling
600 * for this. This also fixes a problem because it seems that
601 * in this scenario the counters are not reset with the Tile
602 * Binning Mode Configuration packet, which would translate
603 * to us reading an obsolete (possibly non-zero) value from
604 * the GPU counters.
605 */
606 if (job->needs_primitives_generated ||
607 (v3d->streamout.num_targets &&
608 job->tf_draw_calls_queued > 0))
609 v3d_read_and_accumulate_primitive_counters(v3d);
610 }
611
612 done:
613 v3d_job_free(v3d, job);
614 }
615
616 static bool
v3d_job_compare(const void * a,const void * b)617 v3d_job_compare(const void *a, const void *b)
618 {
619 return memcmp(a, b, sizeof(struct v3d_job_key)) == 0;
620 }
621
622 static uint32_t
v3d_job_hash(const void * key)623 v3d_job_hash(const void *key)
624 {
625 return _mesa_hash_data(key, sizeof(struct v3d_job_key));
626 }
627
628 void
v3d_job_init(struct v3d_context * v3d)629 v3d_job_init(struct v3d_context *v3d)
630 {
631 v3d->jobs = _mesa_hash_table_create(v3d,
632 v3d_job_hash,
633 v3d_job_compare);
634 v3d->write_jobs = _mesa_hash_table_create(v3d,
635 _mesa_hash_pointer,
636 _mesa_key_pointer_equal);
637 }
638
639