• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31 
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37 
38 static struct fd_ringbuffer *
alloc_ring(struct fd_batch * batch,unsigned sz,enum fd_ringbuffer_flags flags)39 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
40 {
41 	struct fd_context *ctx = batch->ctx;
42 
43 	/* if kernel is too old to support unlimited # of cmd buffers, we
44 	 * have no option but to allocate large worst-case sizes so that
45 	 * we don't need to grow the ringbuffer.  Performance is likely to
46 	 * suffer, but there is no good alternative.
47 	 *
48 	 * Otherwise if supported, allocate a growable ring with initial
49 	 * size of zero.
50 	 */
51 	if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
52 			!(fd_mesa_debug & FD_DBG_NOGROW)){
53 		flags |= FD_RINGBUFFER_GROWABLE;
54 		sz = 0;
55 	}
56 
57 	return fd_submit_new_ringbuffer(batch->submit, sz, flags);
58 }
59 
60 static void
batch_init(struct fd_batch * batch)61 batch_init(struct fd_batch *batch)
62 {
63 	struct fd_context *ctx = batch->ctx;
64 
65 	batch->submit = fd_submit_new(ctx->pipe);
66 	if (batch->nondraw) {
67 		batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY);
68 		batch->draw = alloc_ring(batch, 0x100000, 0);
69 	} else {
70 		batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
71 		batch->draw = alloc_ring(batch, 0x100000, 0);
72 
73 		/* a6xx+ re-uses draw rb for both draw and binning pass: */
74 		if (ctx->screen->gpu_id < 600) {
75 			batch->binning = alloc_ring(batch, 0x100000, 0);
76 		}
77 	}
78 
79 	batch->in_fence_fd = -1;
80 	batch->fence = fd_fence_create(batch);
81 
82 	batch->cleared = 0;
83 	batch->fast_cleared = 0;
84 	batch->invalidated = 0;
85 	batch->restore = batch->resolve = 0;
86 	batch->needs_flush = false;
87 	batch->flushed = false;
88 	batch->gmem_reason = 0;
89 	batch->num_draws = 0;
90 	batch->num_vertices = 0;
91 	batch->num_bins_per_pipe = 0;
92 	batch->prim_strm_bits = 0;
93 	batch->draw_strm_bits = 0;
94 	batch->stage = FD_STAGE_NULL;
95 
96 	fd_reset_wfi(batch);
97 
98 	util_dynarray_init(&batch->draw_patches, NULL);
99 	util_dynarray_init(&batch->fb_read_patches, NULL);
100 
101 	if (is_a2xx(ctx->screen)) {
102 		util_dynarray_init(&batch->shader_patches, NULL);
103 		util_dynarray_init(&batch->gmem_patches, NULL);
104 	}
105 
106 	if (is_a3xx(ctx->screen))
107 		util_dynarray_init(&batch->rbrc_patches, NULL);
108 
109 	assert(batch->resources->entries == 0);
110 
111 	util_dynarray_init(&batch->samples, NULL);
112 
113 	list_inithead(&batch->log_chunks);
114 }
115 
116 struct fd_batch *
fd_batch_create(struct fd_context * ctx,bool nondraw)117 fd_batch_create(struct fd_context *ctx, bool nondraw)
118 {
119 	struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
120 
121 	if (!batch)
122 		return NULL;
123 
124 	DBG("%p", batch);
125 
126 	pipe_reference_init(&batch->reference, 1);
127 	batch->ctx = ctx;
128 	batch->nondraw = nondraw;
129 
130 	batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
131 			_mesa_key_pointer_equal);
132 
133 	batch_init(batch);
134 
135 	fd_screen_assert_locked(ctx->screen);
136 	if (BATCH_DEBUG) {
137 		_mesa_set_add(ctx->screen->live_batches, batch);
138 	}
139 
140 	return batch;
141 }
142 
143 static void
batch_fini(struct fd_batch * batch)144 batch_fini(struct fd_batch *batch)
145 {
146 	DBG("%p", batch);
147 
148 	pipe_resource_reference(&batch->query_buf, NULL);
149 
150 	if (batch->in_fence_fd != -1)
151 		close(batch->in_fence_fd);
152 
153 	/* in case batch wasn't flushed but fence was created: */
154 	fd_fence_populate(batch->fence, 0, -1);
155 
156 	fd_fence_ref(&batch->fence, NULL);
157 
158 	fd_ringbuffer_del(batch->draw);
159 	fd_ringbuffer_del(batch->gmem);
160 
161 	if (batch->binning) {
162 		fd_ringbuffer_del(batch->binning);
163 		batch->binning = NULL;
164 	}
165 
166 	if (batch->prologue) {
167 		fd_ringbuffer_del(batch->prologue);
168 		batch->prologue = NULL;
169 	}
170 
171 	if (batch->epilogue) {
172 		fd_ringbuffer_del(batch->epilogue);
173 		batch->epilogue = NULL;
174 	}
175 
176 	if (batch->tile_setup) {
177 		fd_ringbuffer_del(batch->tile_setup);
178 		batch->tile_setup = NULL;
179 	}
180 
181 	if (batch->tile_fini) {
182 		fd_ringbuffer_del(batch->tile_fini);
183 		batch->tile_fini = NULL;
184 	}
185 
186 	if (batch->tessellation) {
187 		fd_bo_del(batch->tessfactor_bo);
188 		fd_bo_del(batch->tessparam_bo);
189 		fd_ringbuffer_del(batch->tess_addrs_constobj);
190 	}
191 
192 	fd_submit_del(batch->submit);
193 
194 	util_dynarray_fini(&batch->draw_patches);
195 	util_dynarray_fini(&batch->fb_read_patches);
196 
197 	if (is_a2xx(batch->ctx->screen)) {
198 		util_dynarray_fini(&batch->shader_patches);
199 		util_dynarray_fini(&batch->gmem_patches);
200 	}
201 
202 	if (is_a3xx(batch->ctx->screen))
203 		util_dynarray_fini(&batch->rbrc_patches);
204 
205 	while (batch->samples.size > 0) {
206 		struct fd_hw_sample *samp =
207 			util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
208 		fd_hw_sample_reference(batch->ctx, &samp, NULL);
209 	}
210 	util_dynarray_fini(&batch->samples);
211 
212 	assert(list_is_empty(&batch->log_chunks));
213 }
214 
215 static void
batch_flush_reset_dependencies(struct fd_batch * batch,bool flush)216 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
217 {
218 	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
219 	struct fd_batch *dep;
220 
221 	foreach_batch(dep, cache, batch->dependents_mask) {
222 		if (flush)
223 			fd_batch_flush(dep);
224 		fd_batch_reference(&dep, NULL);
225 	}
226 
227 	batch->dependents_mask = 0;
228 }
229 
230 static void
batch_reset_resources_locked(struct fd_batch * batch)231 batch_reset_resources_locked(struct fd_batch *batch)
232 {
233 	fd_screen_assert_locked(batch->ctx->screen);
234 
235 	set_foreach(batch->resources, entry) {
236 		struct fd_resource *rsc = (struct fd_resource *)entry->key;
237 		_mesa_set_remove(batch->resources, entry);
238 		debug_assert(rsc->batch_mask & (1 << batch->idx));
239 		rsc->batch_mask &= ~(1 << batch->idx);
240 		if (rsc->write_batch == batch)
241 			fd_batch_reference_locked(&rsc->write_batch, NULL);
242 	}
243 }
244 
245 static void
batch_reset_resources(struct fd_batch * batch)246 batch_reset_resources(struct fd_batch *batch)
247 {
248 	fd_screen_lock(batch->ctx->screen);
249 	batch_reset_resources_locked(batch);
250 	fd_screen_unlock(batch->ctx->screen);
251 }
252 
253 static void
batch_reset(struct fd_batch * batch)254 batch_reset(struct fd_batch *batch)
255 {
256 	DBG("%p", batch);
257 
258 	batch_flush_reset_dependencies(batch, false);
259 	batch_reset_resources(batch);
260 
261 	batch_fini(batch);
262 	batch_init(batch);
263 }
264 
265 void
fd_batch_reset(struct fd_batch * batch)266 fd_batch_reset(struct fd_batch *batch)
267 {
268 	if (batch->needs_flush)
269 		batch_reset(batch);
270 }
271 
272 void
__fd_batch_destroy(struct fd_batch * batch)273 __fd_batch_destroy(struct fd_batch *batch)
274 {
275 	struct fd_context *ctx = batch->ctx;
276 
277 	DBG("%p", batch);
278 
279 	fd_context_assert_locked(batch->ctx);
280 
281 	if (BATCH_DEBUG) {
282 		_mesa_set_remove_key(ctx->screen->live_batches, batch);
283 	}
284 
285 	fd_bc_invalidate_batch(batch, true);
286 
287 	batch_reset_resources_locked(batch);
288 	debug_assert(batch->resources->entries == 0);
289 	_mesa_set_destroy(batch->resources, NULL);
290 
291 	fd_context_unlock(ctx);
292 	batch_flush_reset_dependencies(batch, false);
293 	debug_assert(batch->dependents_mask == 0);
294 
295 	util_copy_framebuffer_state(&batch->framebuffer, NULL);
296 	batch_fini(batch);
297 	free(batch);
298 	fd_context_lock(ctx);
299 }
300 
301 void
__fd_batch_describe(char * buf,const struct fd_batch * batch)302 __fd_batch_describe(char* buf, const struct fd_batch *batch)
303 {
304 	sprintf(buf, "fd_batch<%u>", batch->seqno);
305 }
306 
307 static void
batch_flush(struct fd_batch * batch)308 batch_flush(struct fd_batch *batch)
309 {
310 	DBG("%p: needs_flush=%d", batch, batch->needs_flush);
311 
312 	if (batch->flushed)
313 		return;
314 
315 	batch->needs_flush = false;
316 
317 	/* close out the draw cmds by making sure any active queries are
318 	 * paused:
319 	 */
320 	fd_batch_set_stage(batch, FD_STAGE_NULL);
321 
322 	batch_flush_reset_dependencies(batch, true);
323 
324 	batch->flushed = true;
325 
326 	fd_fence_ref(&batch->ctx->last_fence, batch->fence);
327 
328 	fd_gmem_render_tiles(batch);
329 	batch_reset_resources(batch);
330 
331 	debug_assert(batch->reference.count > 0);
332 
333 	fd_screen_lock(batch->ctx->screen);
334 	fd_bc_invalidate_batch(batch, false);
335 	fd_screen_unlock(batch->ctx->screen);
336 }
337 
338 /* Get per-batch prologue */
339 struct fd_ringbuffer *
fd_batch_get_prologue(struct fd_batch * batch)340 fd_batch_get_prologue(struct fd_batch *batch)
341 {
342 	if (!batch->prologue)
343 		batch->prologue = alloc_ring(batch, 0x1000, 0);
344 	return batch->prologue;
345 }
346 
347 /* NOTE: could drop the last ref to batch
348  *
349  * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
350  *   to kernel before this returns, as opposed to just being queued to be
351  *   flushed
352  * @force: force a flush even if no rendering, mostly useful if you need
353  *   a fence to sync on
354  */
355 void
fd_batch_flush(struct fd_batch * batch)356 fd_batch_flush(struct fd_batch *batch)
357 {
358 	struct fd_batch *tmp = NULL;
359 
360 	/* NOTE: we need to hold an extra ref across the body of flush,
361 	 * since the last ref to this batch could be dropped when cleaning
362 	 * up used_resources
363 	 */
364 	fd_batch_reference(&tmp, batch);
365 
366 	batch_flush(tmp);
367 
368 	if (batch == batch->ctx->batch) {
369 		fd_batch_reference(&batch->ctx->batch, NULL);
370 	}
371 
372 	fd_batch_reference(&tmp, NULL);
373 }
374 
375 /* find a batches dependents mask, including recursive dependencies: */
376 static uint32_t
recursive_dependents_mask(struct fd_batch * batch)377 recursive_dependents_mask(struct fd_batch *batch)
378 {
379 	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
380 	struct fd_batch *dep;
381 	uint32_t dependents_mask = batch->dependents_mask;
382 
383 	foreach_batch(dep, cache, batch->dependents_mask)
384 		dependents_mask |= recursive_dependents_mask(dep);
385 
386 	return dependents_mask;
387 }
388 
389 void
fd_batch_add_dep(struct fd_batch * batch,struct fd_batch * dep)390 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
391 {
392 	fd_screen_assert_locked(batch->ctx->screen);
393 
394 	if (batch->dependents_mask & (1 << dep->idx))
395 		return;
396 
397 	/* a loop should not be possible */
398 	debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
399 
400 	struct fd_batch *other = NULL;
401 	fd_batch_reference_locked(&other, dep);
402 	batch->dependents_mask |= (1 << dep->idx);
403 	DBG("%p: added dependency on %p", batch, dep);
404 }
405 
406 static void
flush_write_batch(struct fd_resource * rsc)407 flush_write_batch(struct fd_resource *rsc)
408 {
409 	struct fd_batch *b = NULL;
410 	fd_batch_reference_locked(&b, rsc->write_batch);
411 
412 	fd_screen_unlock(b->ctx->screen);
413 	fd_batch_flush(b);
414 	fd_screen_lock(b->ctx->screen);
415 
416 	fd_bc_invalidate_batch(b, false);
417 	fd_batch_reference_locked(&b, NULL);
418 }
419 
420 static void
fd_batch_add_resource(struct fd_batch * batch,struct fd_resource * rsc)421 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
422 {
423 
424 	if (likely(fd_batch_references_resource(batch, rsc))) {
425 		debug_assert(_mesa_set_search(batch->resources, rsc));
426 		return;
427 	}
428 
429 	debug_assert(!_mesa_set_search(batch->resources, rsc));
430 
431 	_mesa_set_add(batch->resources, rsc);
432 	rsc->batch_mask |= (1 << batch->idx);
433 }
434 
435 void
fd_batch_resource_write(struct fd_batch * batch,struct fd_resource * rsc)436 fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
437 {
438 	fd_screen_assert_locked(batch->ctx->screen);
439 
440 	fd_batch_write_prep(batch, rsc);
441 
442 	if (rsc->stencil)
443 		fd_batch_resource_write(batch, rsc->stencil);
444 
445 	DBG("%p: write %p", batch, rsc);
446 
447 	rsc->valid = true;
448 
449 	/* note, invalidate write batch, to avoid further writes to rsc
450 	 * resulting in a write-after-read hazard.
451 	 */
452 	/* if we are pending read or write by any other batch: */
453 	if (unlikely(rsc->batch_mask & ~(1 << batch->idx))) {
454 		struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
455 		struct fd_batch *dep;
456 
457 		if (rsc->write_batch && rsc->write_batch != batch)
458 			flush_write_batch(rsc);
459 
460 		foreach_batch(dep, cache, rsc->batch_mask) {
461 			struct fd_batch *b = NULL;
462 			if (dep == batch)
463 				continue;
464 			/* note that batch_add_dep could flush and unref dep, so
465 			 * we need to hold a reference to keep it live for the
466 			 * fd_bc_invalidate_batch()
467 			 */
468 			fd_batch_reference(&b, dep);
469 			fd_batch_add_dep(batch, b);
470 			fd_bc_invalidate_batch(b, false);
471 			fd_batch_reference_locked(&b, NULL);
472 		}
473 	}
474 	fd_batch_reference_locked(&rsc->write_batch, batch);
475 
476 	fd_batch_add_resource(batch, rsc);
477 }
478 
479 void
fd_batch_resource_read_slowpath(struct fd_batch * batch,struct fd_resource * rsc)480 fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
481 {
482 	fd_screen_assert_locked(batch->ctx->screen);
483 
484 	if (rsc->stencil)
485 		fd_batch_resource_read(batch, rsc->stencil);
486 
487 	DBG("%p: read %p", batch, rsc);
488 
489 	/* If reading a resource pending a write, go ahead and flush the
490 	 * writer.  This avoids situations where we end up having to
491 	 * flush the current batch in _resource_used()
492 	 */
493 	if (unlikely(rsc->write_batch && rsc->write_batch != batch))
494 		flush_write_batch(rsc);
495 
496 	fd_batch_add_resource(batch, rsc);
497 }
498 
499 void
fd_batch_check_size(struct fd_batch * batch)500 fd_batch_check_size(struct fd_batch *batch)
501 {
502 	debug_assert(!batch->flushed);
503 
504 	if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
505 		fd_batch_flush(batch);
506 		return;
507 	}
508 
509 	if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
510 		return;
511 
512 	struct fd_ringbuffer *ring = batch->draw;
513 	if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
514 		fd_batch_flush(batch);
515 }
516 
517 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
518  * been one since last draw:
519  */
520 void
fd_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)521 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
522 {
523 	if (batch->needs_wfi) {
524 		if (batch->ctx->screen->gpu_id >= 500)
525 			OUT_WFI5(ring);
526 		else
527 			OUT_WFI(ring);
528 		batch->needs_wfi = false;
529 	}
530 }
531