• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/hash_table.h"
28 #include "util/list.h"
29 #include "util/set.h"
30 #include "util/u_string.h"
31 
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_query_hw.h"
36 #include "freedreno_resource.h"
37 
38 static struct fd_ringbuffer *
alloc_ring(struct fd_batch * batch,unsigned sz,enum fd_ringbuffer_flags flags)39 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
40 {
41    struct fd_context *ctx = batch->ctx;
42 
43    /* if kernel is too old to support unlimited # of cmd buffers, we
44     * have no option but to allocate large worst-case sizes so that
45     * we don't need to grow the ringbuffer.  Performance is likely to
46     * suffer, but there is no good alternative.
47     *
48     * Otherwise if supported, allocate a growable ring with initial
49     * size of zero.
50     */
51    if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
52        !FD_DBG(NOGROW)) {
53       flags |= FD_RINGBUFFER_GROWABLE;
54       sz = 0;
55    }
56 
57    return fd_submit_new_ringbuffer(batch->submit, sz, flags);
58 }
59 
60 static void
batch_init(struct fd_batch * batch)61 batch_init(struct fd_batch *batch)
62 {
63    struct fd_context *ctx = batch->ctx;
64 
65    batch->submit = fd_submit_new(ctx->pipe);
66    if (batch->nondraw) {
67       batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY);
68       batch->draw = alloc_ring(batch, 0x100000, 0);
69    } else {
70       batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
71       batch->draw = alloc_ring(batch, 0x100000, 0);
72 
73       /* a6xx+ re-uses draw rb for both draw and binning pass: */
74       if (ctx->screen->gen < 6) {
75          batch->binning = alloc_ring(batch, 0x100000, 0);
76       }
77    }
78 
79    batch->in_fence_fd = -1;
80    batch->fence = NULL;
81 
82    /* Work around problems on earlier gens with submit merging, etc,
83     * by always creating a fence to request that the submit is flushed
84     * immediately:
85     */
86    if (ctx->screen->gen < 6)
87       batch->fence = fd_fence_create(batch);
88 
89    batch->cleared = 0;
90    batch->fast_cleared = 0;
91    batch->invalidated = 0;
92    batch->restore = batch->resolve = 0;
93    batch->needs_flush = false;
94    batch->flushed = false;
95    batch->gmem_reason = 0;
96    batch->num_draws = 0;
97    batch->num_vertices = 0;
98    batch->num_bins_per_pipe = 0;
99    batch->prim_strm_bits = 0;
100    batch->draw_strm_bits = 0;
101 
102    fd_reset_wfi(batch);
103 
104    util_dynarray_init(&batch->draw_patches, NULL);
105    util_dynarray_init(&batch->fb_read_patches, NULL);
106 
107    if (is_a2xx(ctx->screen)) {
108       util_dynarray_init(&batch->shader_patches, NULL);
109       util_dynarray_init(&batch->gmem_patches, NULL);
110    }
111 
112    if (is_a3xx(ctx->screen))
113       util_dynarray_init(&batch->rbrc_patches, NULL);
114 
115    assert(batch->resources->entries == 0);
116 
117    util_dynarray_init(&batch->samples, NULL);
118 
119    u_trace_init(&batch->trace, &ctx->trace_context);
120    batch->last_timestamp_cmd = NULL;
121 }
122 
123 struct fd_batch *
fd_batch_create(struct fd_context * ctx,bool nondraw)124 fd_batch_create(struct fd_context *ctx, bool nondraw)
125 {
126    struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
127 
128    if (!batch)
129       return NULL;
130 
131    DBG("%p", batch);
132 
133    pipe_reference_init(&batch->reference, 1);
134    batch->ctx = ctx;
135    batch->nondraw = nondraw;
136 
137    simple_mtx_init(&batch->submit_lock, mtx_plain);
138 
139    batch->resources =
140       _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
141 
142    batch_init(batch);
143 
144    return batch;
145 }
146 
147 static void
cleanup_submit(struct fd_batch * batch)148 cleanup_submit(struct fd_batch *batch)
149 {
150    if (!batch->submit)
151       return;
152 
153    fd_ringbuffer_del(batch->draw);
154    fd_ringbuffer_del(batch->gmem);
155 
156    if (batch->binning) {
157       fd_ringbuffer_del(batch->binning);
158       batch->binning = NULL;
159    }
160 
161    if (batch->prologue) {
162       fd_ringbuffer_del(batch->prologue);
163       batch->prologue = NULL;
164    }
165 
166    if (batch->epilogue) {
167       fd_ringbuffer_del(batch->epilogue);
168       batch->epilogue = NULL;
169    }
170 
171    if (batch->tile_setup) {
172       fd_ringbuffer_del(batch->tile_setup);
173       batch->tile_setup = NULL;
174    }
175 
176    if (batch->tile_fini) {
177       fd_ringbuffer_del(batch->tile_fini);
178       batch->tile_fini = NULL;
179    }
180 
181    fd_submit_del(batch->submit);
182    batch->submit = NULL;
183 }
184 
185 static void
batch_fini(struct fd_batch * batch)186 batch_fini(struct fd_batch *batch)
187 {
188    DBG("%p", batch);
189 
190    pipe_resource_reference(&batch->query_buf, NULL);
191 
192    if (batch->in_fence_fd != -1)
193       close(batch->in_fence_fd);
194 
195    /* in case batch wasn't flushed but fence was created: */
196    if (batch->fence)
197       fd_fence_set_batch(batch->fence, NULL);
198 
199    fd_fence_ref(&batch->fence, NULL);
200 
201    cleanup_submit(batch);
202 
203    util_dynarray_fini(&batch->draw_patches);
204    util_dynarray_fini(&batch->fb_read_patches);
205 
206    if (is_a2xx(batch->ctx->screen)) {
207       util_dynarray_fini(&batch->shader_patches);
208       util_dynarray_fini(&batch->gmem_patches);
209    }
210 
211    if (is_a3xx(batch->ctx->screen))
212       util_dynarray_fini(&batch->rbrc_patches);
213 
214    while (batch->samples.size > 0) {
215       struct fd_hw_sample *samp =
216          util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
217       fd_hw_sample_reference(batch->ctx, &samp, NULL);
218    }
219    util_dynarray_fini(&batch->samples);
220 
221    u_trace_fini(&batch->trace);
222 }
223 
224 static void
batch_flush_dependencies(struct fd_batch * batch)225 batch_flush_dependencies(struct fd_batch *batch) assert_dt
226 {
227    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
228    struct fd_batch *dep;
229 
230    foreach_batch (dep, cache, batch->dependents_mask) {
231       fd_batch_flush(dep);
232       fd_batch_reference(&dep, NULL);
233    }
234 
235    batch->dependents_mask = 0;
236 }
237 
238 static void
batch_reset_dependencies(struct fd_batch * batch)239 batch_reset_dependencies(struct fd_batch *batch)
240 {
241    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
242    struct fd_batch *dep;
243 
244    foreach_batch (dep, cache, batch->dependents_mask) {
245       fd_batch_reference(&dep, NULL);
246    }
247 
248    batch->dependents_mask = 0;
249 }
250 
251 static void
batch_reset_resources(struct fd_batch * batch)252 batch_reset_resources(struct fd_batch *batch)
253 {
254    fd_screen_assert_locked(batch->ctx->screen);
255 
256    set_foreach (batch->resources, entry) {
257       struct fd_resource *rsc = (struct fd_resource *)entry->key;
258       _mesa_set_remove(batch->resources, entry);
259       assert(rsc->track->batch_mask & (1 << batch->idx));
260       rsc->track->batch_mask &= ~(1 << batch->idx);
261       if (rsc->track->write_batch == batch)
262          fd_batch_reference_locked(&rsc->track->write_batch, NULL);
263    }
264 }
265 
266 static void
batch_reset(struct fd_batch * batch)267 batch_reset(struct fd_batch *batch) assert_dt
268 {
269    DBG("%p", batch);
270 
271    batch_reset_dependencies(batch);
272 
273    fd_screen_lock(batch->ctx->screen);
274    batch_reset_resources(batch);
275    fd_screen_unlock(batch->ctx->screen);
276 
277    batch_fini(batch);
278    batch_init(batch);
279 }
280 
281 void
fd_batch_reset(struct fd_batch * batch)282 fd_batch_reset(struct fd_batch *batch)
283 {
284    if (batch->needs_flush)
285       batch_reset(batch);
286 }
287 
288 void
__fd_batch_destroy(struct fd_batch * batch)289 __fd_batch_destroy(struct fd_batch *batch)
290 {
291    struct fd_context *ctx = batch->ctx;
292 
293    DBG("%p", batch);
294 
295    fd_screen_assert_locked(batch->ctx->screen);
296 
297    fd_bc_invalidate_batch(batch, true);
298 
299    batch_reset_resources(batch);
300    assert(batch->resources->entries == 0);
301    _mesa_set_destroy(batch->resources, NULL);
302 
303    fd_screen_unlock(ctx->screen);
304    batch_reset_dependencies(batch);
305    assert(batch->dependents_mask == 0);
306 
307    util_copy_framebuffer_state(&batch->framebuffer, NULL);
308    batch_fini(batch);
309 
310    simple_mtx_destroy(&batch->submit_lock);
311 
312    free(batch->key);
313    free(batch);
314    fd_screen_lock(ctx->screen);
315 }
316 
317 void
__fd_batch_describe(char * buf,const struct fd_batch * batch)318 __fd_batch_describe(char *buf, const struct fd_batch *batch)
319 {
320    sprintf(buf, "fd_batch<%u>", batch->seqno);
321 }
322 
323 /* Get per-batch prologue */
324 struct fd_ringbuffer *
fd_batch_get_prologue(struct fd_batch * batch)325 fd_batch_get_prologue(struct fd_batch *batch)
326 {
327    if (!batch->prologue)
328       batch->prologue = alloc_ring(batch, 0x1000, 0);
329    return batch->prologue;
330 }
331 
332 /* Only called from fd_batch_flush() */
333 static void
batch_flush(struct fd_batch * batch)334 batch_flush(struct fd_batch *batch) assert_dt
335 {
336    DBG("%p: needs_flush=%d", batch, batch->needs_flush);
337 
338    if (!fd_batch_lock_submit(batch))
339       return;
340 
341    batch->needs_flush = false;
342 
343    /* close out the draw cmds by making sure any active queries are
344     * paused:
345     */
346    fd_batch_finish_queries(batch);
347 
348    batch_flush_dependencies(batch);
349 
350    fd_screen_lock(batch->ctx->screen);
351    batch_reset_resources(batch);
352    /* NOTE: remove=false removes the batch from the hashtable, so future
353     * lookups won't cache-hit a flushed batch, but leaves the weak reference
354     * to the batch to avoid having multiple batches with same batch->idx, as
355     * that causes all sorts of hilarity.
356     */
357    fd_bc_invalidate_batch(batch, false);
358    batch->flushed = true;
359 
360    if (batch == batch->ctx->batch)
361       fd_batch_reference_locked(&batch->ctx->batch, NULL);
362 
363    fd_screen_unlock(batch->ctx->screen);
364 
365    if (batch->fence)
366       fd_fence_ref(&batch->ctx->last_fence, batch->fence);
367 
368    fd_gmem_render_tiles(batch);
369 
370    assert(batch->reference.count > 0);
371 
372    cleanup_submit(batch);
373    fd_batch_unlock_submit(batch);
374 }
375 
376 /* NOTE: could drop the last ref to batch
377  */
378 void
fd_batch_flush(struct fd_batch * batch)379 fd_batch_flush(struct fd_batch *batch)
380 {
381    struct fd_batch *tmp = NULL;
382 
383    /* NOTE: we need to hold an extra ref across the body of flush,
384     * since the last ref to this batch could be dropped when cleaning
385     * up used_resources
386     */
387    fd_batch_reference(&tmp, batch);
388    batch_flush(tmp);
389    fd_batch_reference(&tmp, NULL);
390 }
391 
392 /* find a batches dependents mask, including recursive dependencies: */
393 static uint32_t
recursive_dependents_mask(struct fd_batch * batch)394 recursive_dependents_mask(struct fd_batch *batch)
395 {
396    struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
397    struct fd_batch *dep;
398    uint32_t dependents_mask = batch->dependents_mask;
399 
400    foreach_batch (dep, cache, batch->dependents_mask)
401       dependents_mask |= recursive_dependents_mask(dep);
402 
403    return dependents_mask;
404 }
405 
406 void
fd_batch_add_dep(struct fd_batch * batch,struct fd_batch * dep)407 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
408 {
409    fd_screen_assert_locked(batch->ctx->screen);
410 
411    if (batch->dependents_mask & (1 << dep->idx))
412       return;
413 
414    /* a loop should not be possible */
415    assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
416 
417    struct fd_batch *other = NULL;
418    fd_batch_reference_locked(&other, dep);
419    batch->dependents_mask |= (1 << dep->idx);
420    DBG("%p: added dependency on %p", batch, dep);
421 }
422 
423 static void
flush_write_batch(struct fd_resource * rsc)424 flush_write_batch(struct fd_resource *rsc) assert_dt
425 {
426    struct fd_batch *b = NULL;
427    fd_batch_reference_locked(&b, rsc->track->write_batch);
428 
429    fd_screen_unlock(b->ctx->screen);
430    fd_batch_flush(b);
431    fd_screen_lock(b->ctx->screen);
432 
433    fd_batch_reference_locked(&b, NULL);
434 }
435 
436 static void
fd_batch_add_resource(struct fd_batch * batch,struct fd_resource * rsc)437 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
438 {
439 
440    if (likely(fd_batch_references_resource(batch, rsc))) {
441       assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
442       return;
443    }
444 
445    assert(!_mesa_set_search(batch->resources, rsc));
446 
447    _mesa_set_add_pre_hashed(batch->resources, rsc->hash, rsc);
448    rsc->track->batch_mask |= (1 << batch->idx);
449 }
450 
451 void
fd_batch_resource_write(struct fd_batch * batch,struct fd_resource * rsc)452 fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
453 {
454    fd_screen_assert_locked(batch->ctx->screen);
455 
456    DBG("%p: write %p", batch, rsc);
457 
458    /* Must do this before the early out, so we unset a previous resource
459     * invalidate (which may have left the write_batch state in place).
460     */
461    rsc->valid = true;
462 
463    if (rsc->track->write_batch == batch)
464       return;
465 
466    fd_batch_write_prep(batch, rsc);
467 
468    if (rsc->stencil)
469       fd_batch_resource_write(batch, rsc->stencil);
470 
471    /* note, invalidate write batch, to avoid further writes to rsc
472     * resulting in a write-after-read hazard.
473     */
474    /* if we are pending read or write by any other batch: */
475    if (unlikely(rsc->track->batch_mask & ~(1 << batch->idx))) {
476       struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
477       struct fd_batch *dep;
478 
479       if (rsc->track->write_batch)
480          flush_write_batch(rsc);
481 
482       foreach_batch (dep, cache, rsc->track->batch_mask) {
483          struct fd_batch *b = NULL;
484          if (dep == batch)
485             continue;
486          /* note that batch_add_dep could flush and unref dep, so
487           * we need to hold a reference to keep it live for the
488           * fd_bc_invalidate_batch()
489           */
490          fd_batch_reference(&b, dep);
491          fd_batch_add_dep(batch, b);
492          fd_bc_invalidate_batch(b, false);
493          fd_batch_reference_locked(&b, NULL);
494       }
495    }
496    fd_batch_reference_locked(&rsc->track->write_batch, batch);
497 
498    fd_batch_add_resource(batch, rsc);
499 }
500 
501 void
fd_batch_resource_read_slowpath(struct fd_batch * batch,struct fd_resource * rsc)502 fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
503 {
504    fd_screen_assert_locked(batch->ctx->screen);
505 
506    if (rsc->stencil)
507       fd_batch_resource_read(batch, rsc->stencil);
508 
509    DBG("%p: read %p", batch, rsc);
510 
511    /* If reading a resource pending a write, go ahead and flush the
512     * writer.  This avoids situations where we end up having to
513     * flush the current batch in _resource_used()
514     */
515    if (unlikely(rsc->track->write_batch && rsc->track->write_batch != batch))
516       flush_write_batch(rsc);
517 
518    fd_batch_add_resource(batch, rsc);
519 }
520 
521 void
fd_batch_check_size(struct fd_batch * batch)522 fd_batch_check_size(struct fd_batch *batch)
523 {
524    if (FD_DBG(FLUSH)) {
525       fd_batch_flush(batch);
526       return;
527    }
528 
529    /* Place a reasonable upper bound on prim/draw stream buffer size: */
530    const unsigned limit_bits = 8 * 8 * 1024 * 1024;
531    if ((batch->prim_strm_bits > limit_bits) ||
532        (batch->draw_strm_bits > limit_bits)) {
533       fd_batch_flush(batch);
534       return;
535    }
536 
537    if (!fd_ringbuffer_check_size(batch->draw))
538       fd_batch_flush(batch);
539 }
540 
541 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
542  * been one since last draw:
543  */
544 void
fd_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)545 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
546 {
547    if (batch->needs_wfi) {
548       if (batch->ctx->screen->gen >= 5)
549          OUT_WFI5(ring);
550       else
551          OUT_WFI(ring);
552       batch->needs_wfi = false;
553    }
554 }
555