• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/hash_table.h"
28 #include "util/list.h"
29 #include "util/set.h"
30 #include "util/u_string.h"
31 
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_query_hw.h"
36 #include "freedreno_resource.h"
37 
38 static struct fd_ringbuffer *
alloc_ring(struct fd_batch * batch,unsigned sz,enum fd_ringbuffer_flags flags)39 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
40 {
41    struct fd_context *ctx = batch->ctx;
42 
43    /* if kernel is too old to support unlimited # of cmd buffers, we
44     * have no option but to allocate large worst-case sizes so that
45     * we don't need to grow the ringbuffer.  Performance is likely to
46     * suffer, but there is no good alternative.
47     *
48     * Otherwise if supported, allocate a growable ring with initial
49     * size of zero.
50     */
51    if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
52        !FD_DBG(NOGROW)) {
53       flags |= FD_RINGBUFFER_GROWABLE;
54       sz = 0;
55    }
56 
57    return fd_submit_new_ringbuffer(batch->submit, sz, flags);
58 }
59 
60 static void
batch_init(struct fd_batch * batch)61 batch_init(struct fd_batch *batch)
62 {
63    struct fd_context *ctx = batch->ctx;
64 
65    batch->submit = fd_submit_new(ctx->pipe);
66    if (batch->nondraw) {
67       batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY);
68       batch->draw = alloc_ring(batch, 0x100000, 0);
69    } else {
70       batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
71       batch->draw = alloc_ring(batch, 0x100000, 0);
72 
73       /* a6xx+ re-uses draw rb for both draw and binning pass: */
74       if (ctx->screen->gen < 6) {
75          batch->binning = alloc_ring(batch, 0x100000, 0);
76       }
77    }
78 
79    batch->in_fence_fd = -1;
80    batch->fence = NULL;
81 
82    /* Work around problems on earlier gens with submit merging, etc,
83     * by always creating a fence to request that the submit is flushed
84     * immediately:
85     */
86    if (ctx->screen->gen < 6)
87       batch->fence = fd_fence_create(batch);
88 
89    batch->cleared = 0;
90    batch->fast_cleared = 0;
91    batch->invalidated = 0;
92    batch->restore = batch->resolve = 0;
93    batch->needs_flush = false;
94    batch->flushed = false;
95    batch->gmem_reason = 0;
96    batch->num_draws = 0;
97    batch->num_vertices = 0;
98    batch->num_bins_per_pipe = 0;
99    batch->prim_strm_bits = 0;
100    batch->draw_strm_bits = 0;
101 
102    fd_reset_wfi(batch);
103 
104    util_dynarray_init(&batch->draw_patches, NULL);
105    util_dynarray_init(&batch->fb_read_patches, NULL);
106 
107    if (is_a2xx(ctx->screen)) {
108       util_dynarray_init(&batch->shader_patches, NULL);
109       util_dynarray_init(&batch->gmem_patches, NULL);
110    }
111 
112    if (is_a3xx(ctx->screen))
113       util_dynarray_init(&batch->rbrc_patches, NULL);
114 
115    assert(batch->resources->entries == 0);
116 
117    util_dynarray_init(&batch->samples, NULL);
118 
119    u_trace_init(&batch->trace, &ctx->trace_context);
120    batch->last_timestamp_cmd = NULL;
121 }
122 
123 struct fd_batch *
fd_batch_create(struct fd_context * ctx,bool nondraw)124 fd_batch_create(struct fd_context *ctx, bool nondraw)
125 {
126    struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
127 
128    if (!batch)
129       return NULL;
130 
131    DBG("%p", batch);
132 
133    pipe_reference_init(&batch->reference, 1);
134    batch->ctx = ctx;
135    batch->nondraw = nondraw;
136 
137    simple_mtx_init(&batch->submit_lock, mtx_plain);
138 
139    batch->resources =
140       _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
141 
142    batch_init(batch);
143 
144    return batch;
145 }
146 
147 static void
cleanup_submit(struct fd_batch * batch)148 cleanup_submit(struct fd_batch *batch)
149 {
150    if (!batch->submit)
151       return;
152 
153    fd_ringbuffer_del(batch->draw);
154    fd_ringbuffer_del(batch->gmem);
155 
156    if (batch->binning) {
157       fd_ringbuffer_del(batch->binning);
158       batch->binning = NULL;
159    }
160 
161    if (batch->prologue) {
162       fd_ringbuffer_del(batch->prologue);
163       batch->prologue = NULL;
164    }
165 
166    if (batch->epilogue) {
167       fd_ringbuffer_del(batch->epilogue);
168       batch->epilogue = NULL;
169    }
170 
171    if (batch->tile_setup) {
172       fd_ringbuffer_del(batch->tile_setup);
173       batch->tile_setup = NULL;
174    }
175 
176    if (batch->tile_fini) {
177       fd_ringbuffer_del(batch->tile_fini);
178       batch->tile_fini = NULL;
179    }
180 
181    if (batch->tessellation) {
182       fd_bo_del(batch->tessfactor_bo);
183       fd_bo_del(batch->tessparam_bo);
184       fd_ringbuffer_del(batch->tess_addrs_constobj);
185    }
186 
187    fd_submit_del(batch->submit);
188    batch->submit = NULL;
189 }
190 
191 static void
batch_fini(struct fd_batch * batch)192 batch_fini(struct fd_batch *batch)
193 {
194    DBG("%p", batch);
195 
196    pipe_resource_reference(&batch->query_buf, NULL);
197 
198    if (batch->in_fence_fd != -1)
199       close(batch->in_fence_fd);
200 
201    /* in case batch wasn't flushed but fence was created: */
202    if (batch->fence)
203       fd_fence_set_batch(batch->fence, NULL);
204 
205    fd_fence_ref(&batch->fence, NULL);
206 
207    cleanup_submit(batch);
208 
209    util_dynarray_fini(&batch->draw_patches);
210    util_dynarray_fini(&batch->fb_read_patches);
211 
212    if (is_a2xx(batch->ctx->screen)) {
213       util_dynarray_fini(&batch->shader_patches);
214       util_dynarray_fini(&batch->gmem_patches);
215    }
216 
217    if (is_a3xx(batch->ctx->screen))
218       util_dynarray_fini(&batch->rbrc_patches);
219 
220    while (batch->samples.size > 0) {
221       struct fd_hw_sample *samp =
222          util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
223       fd_hw_sample_reference(batch->ctx, &samp, NULL);
224    }
225    util_dynarray_fini(&batch->samples);
226 
227    u_trace_fini(&batch->trace);
228 }
229 
230 static void
batch_flush_dependencies(struct fd_batch * batch)231 batch_flush_dependencies(struct fd_batch *batch) assert_dt
232 {
233    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
234    struct fd_batch *dep;
235 
236    foreach_batch (dep, cache, batch->dependents_mask) {
237       fd_batch_flush(dep);
238       fd_batch_reference(&dep, NULL);
239    }
240 
241    batch->dependents_mask = 0;
242 }
243 
244 static void
batch_reset_dependencies(struct fd_batch * batch)245 batch_reset_dependencies(struct fd_batch *batch)
246 {
247    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
248    struct fd_batch *dep;
249 
250    foreach_batch (dep, cache, batch->dependents_mask) {
251       fd_batch_reference(&dep, NULL);
252    }
253 
254    batch->dependents_mask = 0;
255 }
256 
257 static void
batch_reset_resources(struct fd_batch * batch)258 batch_reset_resources(struct fd_batch *batch)
259 {
260    fd_screen_assert_locked(batch->ctx->screen);
261 
262    set_foreach (batch->resources, entry) {
263       struct fd_resource *rsc = (struct fd_resource *)entry->key;
264       _mesa_set_remove(batch->resources, entry);
265       debug_assert(rsc->track->batch_mask & (1 << batch->idx));
266       rsc->track->batch_mask &= ~(1 << batch->idx);
267       if (rsc->track->write_batch == batch)
268          fd_batch_reference_locked(&rsc->track->write_batch, NULL);
269    }
270 }
271 
272 static void
batch_reset(struct fd_batch * batch)273 batch_reset(struct fd_batch *batch) assert_dt
274 {
275    DBG("%p", batch);
276 
277    batch_reset_dependencies(batch);
278 
279    fd_screen_lock(batch->ctx->screen);
280    batch_reset_resources(batch);
281    fd_screen_unlock(batch->ctx->screen);
282 
283    batch_fini(batch);
284    batch_init(batch);
285 }
286 
287 void
fd_batch_reset(struct fd_batch * batch)288 fd_batch_reset(struct fd_batch *batch)
289 {
290    if (batch->needs_flush)
291       batch_reset(batch);
292 }
293 
294 void
__fd_batch_destroy(struct fd_batch * batch)295 __fd_batch_destroy(struct fd_batch *batch)
296 {
297    struct fd_context *ctx = batch->ctx;
298 
299    DBG("%p", batch);
300 
301    fd_screen_assert_locked(batch->ctx->screen);
302 
303    fd_bc_invalidate_batch(batch, true);
304 
305    batch_reset_resources(batch);
306    debug_assert(batch->resources->entries == 0);
307    _mesa_set_destroy(batch->resources, NULL);
308 
309    fd_screen_unlock(ctx->screen);
310    batch_reset_dependencies(batch);
311    debug_assert(batch->dependents_mask == 0);
312 
313    util_copy_framebuffer_state(&batch->framebuffer, NULL);
314    batch_fini(batch);
315 
316    simple_mtx_destroy(&batch->submit_lock);
317 
318    free(batch->key);
319    free(batch);
320    fd_screen_lock(ctx->screen);
321 }
322 
323 void
__fd_batch_describe(char * buf,const struct fd_batch * batch)324 __fd_batch_describe(char *buf, const struct fd_batch *batch)
325 {
326    sprintf(buf, "fd_batch<%u>", batch->seqno);
327 }
328 
329 /* Get per-batch prologue */
330 struct fd_ringbuffer *
fd_batch_get_prologue(struct fd_batch * batch)331 fd_batch_get_prologue(struct fd_batch *batch)
332 {
333    if (!batch->prologue)
334       batch->prologue = alloc_ring(batch, 0x1000, 0);
335    return batch->prologue;
336 }
337 
338 /* Only called from fd_batch_flush() */
339 static void
batch_flush(struct fd_batch * batch)340 batch_flush(struct fd_batch *batch) assert_dt
341 {
342    DBG("%p: needs_flush=%d", batch, batch->needs_flush);
343 
344    if (!fd_batch_lock_submit(batch))
345       return;
346 
347    batch->needs_flush = false;
348 
349    /* close out the draw cmds by making sure any active queries are
350     * paused:
351     */
352    fd_batch_finish_queries(batch);
353 
354    batch_flush_dependencies(batch);
355 
356    fd_screen_lock(batch->ctx->screen);
357    batch_reset_resources(batch);
358    /* NOTE: remove=false removes the batch from the hashtable, so future
359     * lookups won't cache-hit a flushed batch, but leaves the weak reference
360     * to the batch to avoid having multiple batches with same batch->idx, as
361     * that causes all sorts of hilarity.
362     */
363    fd_bc_invalidate_batch(batch, false);
364    batch->flushed = true;
365 
366    if (batch == batch->ctx->batch)
367       fd_batch_reference_locked(&batch->ctx->batch, NULL);
368 
369    fd_screen_unlock(batch->ctx->screen);
370 
371    if (batch->fence)
372       fd_fence_ref(&batch->ctx->last_fence, batch->fence);
373 
374    fd_gmem_render_tiles(batch);
375 
376    debug_assert(batch->reference.count > 0);
377 
378    cleanup_submit(batch);
379    fd_batch_unlock_submit(batch);
380 }
381 
382 /* NOTE: could drop the last ref to batch
383  */
384 void
fd_batch_flush(struct fd_batch * batch)385 fd_batch_flush(struct fd_batch *batch)
386 {
387    struct fd_batch *tmp = NULL;
388 
389    /* NOTE: we need to hold an extra ref across the body of flush,
390     * since the last ref to this batch could be dropped when cleaning
391     * up used_resources
392     */
393    fd_batch_reference(&tmp, batch);
394    batch_flush(tmp);
395    fd_batch_reference(&tmp, NULL);
396 }
397 
398 /* find a batches dependents mask, including recursive dependencies: */
399 static uint32_t
recursive_dependents_mask(struct fd_batch * batch)400 recursive_dependents_mask(struct fd_batch *batch)
401 {
402    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
403    struct fd_batch *dep;
404    uint32_t dependents_mask = batch->dependents_mask;
405 
406    foreach_batch (dep, cache, batch->dependents_mask)
407       dependents_mask |= recursive_dependents_mask(dep);
408 
409    return dependents_mask;
410 }
411 
412 void
fd_batch_add_dep(struct fd_batch * batch,struct fd_batch * dep)413 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
414 {
415    fd_screen_assert_locked(batch->ctx->screen);
416 
417    if (batch->dependents_mask & (1 << dep->idx))
418       return;
419 
420    /* a loop should not be possible */
421    debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
422 
423    struct fd_batch *other = NULL;
424    fd_batch_reference_locked(&other, dep);
425    batch->dependents_mask |= (1 << dep->idx);
426    DBG("%p: added dependency on %p", batch, dep);
427 }
428 
429 static void
flush_write_batch(struct fd_resource * rsc)430 flush_write_batch(struct fd_resource *rsc) assert_dt
431 {
432    struct fd_batch *b = NULL;
433    fd_batch_reference_locked(&b, rsc->track->write_batch);
434 
435    fd_screen_unlock(b->ctx->screen);
436    fd_batch_flush(b);
437    fd_screen_lock(b->ctx->screen);
438 
439    fd_batch_reference_locked(&b, NULL);
440 }
441 
442 static void
fd_batch_add_resource(struct fd_batch * batch,struct fd_resource * rsc)443 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
444 {
445 
446    if (likely(fd_batch_references_resource(batch, rsc))) {
447       debug_assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
448       return;
449    }
450 
451    debug_assert(!_mesa_set_search(batch->resources, rsc));
452 
453    _mesa_set_add_pre_hashed(batch->resources, rsc->hash, rsc);
454    rsc->track->batch_mask |= (1 << batch->idx);
455 }
456 
457 void
fd_batch_resource_write(struct fd_batch * batch,struct fd_resource * rsc)458 fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
459 {
460    fd_screen_assert_locked(batch->ctx->screen);
461 
462    DBG("%p: write %p", batch, rsc);
463 
464    /* Must do this before the early out, so we unset a previous resource
465     * invalidate (which may have left the write_batch state in place).
466     */
467    rsc->valid = true;
468 
469    if (rsc->track->write_batch == batch)
470       return;
471 
472    fd_batch_write_prep(batch, rsc);
473 
474    if (rsc->stencil)
475       fd_batch_resource_write(batch, rsc->stencil);
476 
477    /* note, invalidate write batch, to avoid further writes to rsc
478     * resulting in a write-after-read hazard.
479     */
480    /* if we are pending read or write by any other batch: */
481    if (unlikely(rsc->track->batch_mask & ~(1 << batch->idx))) {
482       struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
483       struct fd_batch *dep;
484 
485       if (rsc->track->write_batch)
486          flush_write_batch(rsc);
487 
488       foreach_batch (dep, cache, rsc->track->batch_mask) {
489          struct fd_batch *b = NULL;
490          if (dep == batch)
491             continue;
492          /* note that batch_add_dep could flush and unref dep, so
493           * we need to hold a reference to keep it live for the
494           * fd_bc_invalidate_batch()
495           */
496          fd_batch_reference(&b, dep);
497          fd_batch_add_dep(batch, b);
498          fd_bc_invalidate_batch(b, false);
499          fd_batch_reference_locked(&b, NULL);
500       }
501    }
502    fd_batch_reference_locked(&rsc->track->write_batch, batch);
503 
504    fd_batch_add_resource(batch, rsc);
505 }
506 
507 void
fd_batch_resource_read_slowpath(struct fd_batch * batch,struct fd_resource * rsc)508 fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
509 {
510    fd_screen_assert_locked(batch->ctx->screen);
511 
512    if (rsc->stencil)
513       fd_batch_resource_read(batch, rsc->stencil);
514 
515    DBG("%p: read %p", batch, rsc);
516 
517    /* If reading a resource pending a write, go ahead and flush the
518     * writer.  This avoids situations where we end up having to
519     * flush the current batch in _resource_used()
520     */
521    if (unlikely(rsc->track->write_batch && rsc->track->write_batch != batch))
522       flush_write_batch(rsc);
523 
524    fd_batch_add_resource(batch, rsc);
525 }
526 
527 void
fd_batch_check_size(struct fd_batch * batch)528 fd_batch_check_size(struct fd_batch *batch)
529 {
530    if (FD_DBG(FLUSH)) {
531       fd_batch_flush(batch);
532       return;
533    }
534 
535    /* Place a reasonable upper bound on prim/draw stream buffer size: */
536    const unsigned limit_bits = 8 * 8 * 1024 * 1024;
537    if ((batch->prim_strm_bits > limit_bits) ||
538        (batch->draw_strm_bits > limit_bits)) {
539       fd_batch_flush(batch);
540       return;
541    }
542 
543    if (!fd_ringbuffer_check_size(batch->draw))
544       fd_batch_flush(batch);
545 }
546 
547 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
548  * been one since last draw:
549  */
550 void
fd_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)551 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
552 {
553    if (batch->needs_wfi) {
554       if (batch->ctx->screen->gen >= 5)
555          OUT_WFI5(ring);
556       else
557          OUT_WFI(ring);
558       batch->needs_wfi = false;
559    }
560 }
561