• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31 
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35 #include "freedreno_query_hw.h"
36 
37 static void
batch_init(struct fd_batch * batch)38 batch_init(struct fd_batch *batch)
39 {
40 	struct fd_context *ctx = batch->ctx;
41 	unsigned size = 0;
42 
43 	if (ctx->screen->reorder)
44 		util_queue_fence_init(&batch->flush_fence);
45 
46 	/* if kernel is too old to support unlimited # of cmd buffers, we
47 	 * have no option but to allocate large worst-case sizes so that
48 	 * we don't need to grow the ringbuffer.  Performance is likely to
49 	 * suffer, but there is no good alternative.
50 	 */
51 	if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
52 			(fd_mesa_debug & FD_DBG_NOGROW)){
53 		size = 0x100000;
54 	}
55 
56 	batch->draw    = fd_ringbuffer_new(ctx->screen->pipe, size);
57 	batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
58 	batch->gmem    = fd_ringbuffer_new(ctx->screen->pipe, size);
59 
60 	fd_ringbuffer_set_parent(batch->gmem, NULL);
61 	fd_ringbuffer_set_parent(batch->draw, batch->gmem);
62 	fd_ringbuffer_set_parent(batch->binning, batch->gmem);
63 
64 	batch->in_fence_fd = -1;
65 
66 	batch->cleared = batch->partial_cleared = 0;
67 	batch->restore = batch->resolve = 0;
68 	batch->needs_flush = false;
69 	batch->gmem_reason = 0;
70 	batch->num_draws = 0;
71 	batch->stage = FD_STAGE_NULL;
72 
73 	fd_reset_wfi(batch);
74 
75 	/* reset maximal bounds: */
76 	batch->max_scissor.minx = batch->max_scissor.miny = ~0;
77 	batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
78 
79 	util_dynarray_init(&batch->draw_patches);
80 
81 	if (is_a3xx(ctx->screen))
82 		util_dynarray_init(&batch->rbrc_patches);
83 
84 	assert(batch->resources->entries == 0);
85 
86 	util_dynarray_init(&batch->samples);
87 }
88 
89 struct fd_batch *
fd_batch_create(struct fd_context * ctx)90 fd_batch_create(struct fd_context *ctx)
91 {
92 	struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
93 
94 	if (!batch)
95 		return NULL;
96 
97 	DBG("%p", batch);
98 
99 	pipe_reference_init(&batch->reference, 1);
100 	batch->ctx = ctx;
101 
102 	batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
103 			_mesa_key_pointer_equal);
104 
105 	batch_init(batch);
106 
107 	return batch;
108 }
109 
110 static void
batch_fini(struct fd_batch * batch)111 batch_fini(struct fd_batch *batch)
112 {
113 	pipe_resource_reference(&batch->query_buf, NULL);
114 
115 	if (batch->in_fence_fd != -1)
116 		close(batch->in_fence_fd);
117 
118 	fd_ringbuffer_del(batch->draw);
119 	fd_ringbuffer_del(batch->binning);
120 	fd_ringbuffer_del(batch->gmem);
121 
122 	util_dynarray_fini(&batch->draw_patches);
123 
124 	if (is_a3xx(batch->ctx->screen))
125 		util_dynarray_fini(&batch->rbrc_patches);
126 
127 	while (batch->samples.size > 0) {
128 		struct fd_hw_sample *samp =
129 			util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
130 		fd_hw_sample_reference(batch->ctx, &samp, NULL);
131 	}
132 	util_dynarray_fini(&batch->samples);
133 
134 	if (batch->ctx->screen->reorder)
135 		util_queue_fence_destroy(&batch->flush_fence);
136 }
137 
138 static void
batch_flush_reset_dependencies(struct fd_batch * batch,bool flush)139 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
140 {
141 	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
142 	struct fd_batch *dep;
143 
144 	foreach_batch(dep, cache, batch->dependents_mask) {
145 		if (flush)
146 			fd_batch_flush(dep, false);
147 		fd_batch_reference(&dep, NULL);
148 	}
149 
150 	batch->dependents_mask = 0;
151 }
152 
153 static void
batch_reset_resources_locked(struct fd_batch * batch)154 batch_reset_resources_locked(struct fd_batch *batch)
155 {
156 	struct set_entry *entry;
157 
158 	pipe_mutex_assert_locked(batch->ctx->screen->lock);
159 
160 	set_foreach(batch->resources, entry) {
161 		struct fd_resource *rsc = (struct fd_resource *)entry->key;
162 		_mesa_set_remove(batch->resources, entry);
163 		debug_assert(rsc->batch_mask & (1 << batch->idx));
164 		rsc->batch_mask &= ~(1 << batch->idx);
165 		if (rsc->write_batch == batch)
166 			fd_batch_reference_locked(&rsc->write_batch, NULL);
167 	}
168 }
169 
170 static void
batch_reset_resources(struct fd_batch * batch)171 batch_reset_resources(struct fd_batch *batch)
172 {
173 	pipe_mutex_lock(batch->ctx->screen->lock);
174 	batch_reset_resources_locked(batch);
175 	pipe_mutex_unlock(batch->ctx->screen->lock);
176 }
177 
178 static void
batch_reset(struct fd_batch * batch)179 batch_reset(struct fd_batch *batch)
180 {
181 	DBG("%p", batch);
182 
183 	fd_batch_sync(batch);
184 
185 	batch_flush_reset_dependencies(batch, false);
186 	batch_reset_resources(batch);
187 
188 	batch_fini(batch);
189 	batch_init(batch);
190 }
191 
192 void
fd_batch_reset(struct fd_batch * batch)193 fd_batch_reset(struct fd_batch *batch)
194 {
195 	if (batch->needs_flush)
196 		batch_reset(batch);
197 }
198 
199 void
__fd_batch_destroy(struct fd_batch * batch)200 __fd_batch_destroy(struct fd_batch *batch)
201 {
202 	DBG("%p", batch);
203 
204 	util_copy_framebuffer_state(&batch->framebuffer, NULL);
205 
206 	pipe_mutex_lock(batch->ctx->screen->lock);
207 	fd_bc_invalidate_batch(batch, true);
208 	pipe_mutex_unlock(batch->ctx->screen->lock);
209 
210 	batch_fini(batch);
211 
212 	batch_reset_resources(batch);
213 	debug_assert(batch->resources->entries == 0);
214 	_mesa_set_destroy(batch->resources, NULL);
215 
216 	batch_flush_reset_dependencies(batch, false);
217 	debug_assert(batch->dependents_mask == 0);
218 
219 	free(batch);
220 }
221 
222 void
__fd_batch_describe(char * buf,const struct fd_batch * batch)223 __fd_batch_describe(char* buf, const struct fd_batch *batch)
224 {
225 	util_sprintf(buf, "fd_batch<%u>", batch->seqno);
226 }
227 
228 void
fd_batch_sync(struct fd_batch * batch)229 fd_batch_sync(struct fd_batch *batch)
230 {
231 	if (!batch->ctx->screen->reorder)
232 		return;
233 	util_queue_job_wait(&batch->flush_fence);
234 }
235 
236 static void
batch_flush_func(void * job,int id)237 batch_flush_func(void *job, int id)
238 {
239 	struct fd_batch *batch = job;
240 
241 	fd_gmem_render_tiles(batch);
242 	batch_reset_resources(batch);
243 }
244 
245 static void
batch_cleanup_func(void * job,int id)246 batch_cleanup_func(void *job, int id)
247 {
248 	struct fd_batch *batch = job;
249 	fd_batch_reference(&batch, NULL);
250 }
251 
252 static void
batch_flush(struct fd_batch * batch)253 batch_flush(struct fd_batch *batch)
254 {
255 	DBG("%p: needs_flush=%d", batch, batch->needs_flush);
256 
257 	if (!batch->needs_flush)
258 		return;
259 
260 	batch->needs_flush = false;
261 
262 	/* close out the draw cmds by making sure any active queries are
263 	 * paused:
264 	 */
265 	fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_NULL);
266 
267 	batch->ctx->dirty = ~0;
268 	batch_flush_reset_dependencies(batch, true);
269 
270 	if (batch->ctx->screen->reorder) {
271 		struct fd_batch *tmp = NULL;
272 		fd_batch_reference(&tmp, batch);
273 
274 		if (!util_queue_is_initialized(&batch->ctx->flush_queue))
275 			util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1);
276 
277 		util_queue_add_job(&batch->ctx->flush_queue,
278 				batch, &batch->flush_fence,
279 				batch_flush_func, batch_cleanup_func);
280 	} else {
281 		fd_gmem_render_tiles(batch);
282 		batch_reset_resources(batch);
283 	}
284 
285 	debug_assert(batch->reference.count > 0);
286 
287 	if (batch == batch->ctx->batch) {
288 		batch_reset(batch);
289 	} else {
290 		pipe_mutex_lock(batch->ctx->screen->lock);
291 		fd_bc_invalidate_batch(batch, false);
292 		pipe_mutex_unlock(batch->ctx->screen->lock);
293 	}
294 }
295 
296 /* NOTE: could drop the last ref to batch */
297 void
fd_batch_flush(struct fd_batch * batch,bool sync)298 fd_batch_flush(struct fd_batch *batch, bool sync)
299 {
300 	/* NOTE: we need to hold an extra ref across the body of flush,
301 	 * since the last ref to this batch could be dropped when cleaning
302 	 * up used_resources
303 	 */
304 	struct fd_batch *tmp = NULL;
305 	fd_batch_reference(&tmp, batch);
306 	batch_flush(tmp);
307 	if (sync)
308 		fd_batch_sync(tmp);
309 	fd_batch_reference(&tmp, NULL);
310 }
311 
312 /* does 'batch' depend directly or indirectly on 'other' ? */
313 static bool
batch_depends_on(struct fd_batch * batch,struct fd_batch * other)314 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
315 {
316 	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
317 	struct fd_batch *dep;
318 
319 	if (batch->dependents_mask & (1 << other->idx))
320 		return true;
321 
322 	foreach_batch(dep, cache, batch->dependents_mask)
323 		if (batch_depends_on(batch, dep))
324 			return true;
325 
326 	return false;
327 }
328 
329 static void
batch_add_dep(struct fd_batch * batch,struct fd_batch * dep)330 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
331 {
332 	if (batch->dependents_mask & (1 << dep->idx))
333 		return;
334 
335 	/* if the new depedency already depends on us, we need to flush
336 	 * to avoid a loop in the dependency graph.
337 	 */
338 	if (batch_depends_on(dep, batch)) {
339 		DBG("%p: flush forced on %p!", batch, dep);
340 		pipe_mutex_unlock(batch->ctx->screen->lock);
341 		fd_batch_flush(dep, false);
342 		pipe_mutex_lock(batch->ctx->screen->lock);
343 	} else {
344 		struct fd_batch *other = NULL;
345 		fd_batch_reference_locked(&other, dep);
346 		batch->dependents_mask |= (1 << dep->idx);
347 		DBG("%p: added dependency on %p", batch, dep);
348 	}
349 }
350 
351 void
fd_batch_resource_used(struct fd_batch * batch,struct fd_resource * rsc,bool write)352 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
353 {
354 	pipe_mutex_assert_locked(batch->ctx->screen->lock);
355 
356 	if (rsc->stencil)
357 		fd_batch_resource_used(batch, rsc->stencil, write);
358 
359 	DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
360 
361 	/* note, invalidate write batch, to avoid further writes to rsc
362 	 * resulting in a write-after-read hazard.
363 	 */
364 
365 	if (write) {
366 		/* if we are pending read or write by any other batch: */
367 		if (rsc->batch_mask != (1 << batch->idx)) {
368 			struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
369 			struct fd_batch *dep;
370 			foreach_batch(dep, cache, rsc->batch_mask) {
371 				struct fd_batch *b = NULL;
372 				/* note that batch_add_dep could flush and unref dep, so
373 				 * we need to hold a reference to keep it live for the
374 				 * fd_bc_invalidate_batch()
375 				 */
376 				fd_batch_reference(&b, dep);
377 				batch_add_dep(batch, b);
378 				fd_bc_invalidate_batch(b, false);
379 				fd_batch_reference_locked(&b, NULL);
380 			}
381 		}
382 		fd_batch_reference_locked(&rsc->write_batch, batch);
383 	} else {
384 		if (rsc->write_batch) {
385 			batch_add_dep(batch, rsc->write_batch);
386 			fd_bc_invalidate_batch(rsc->write_batch, false);
387 		}
388 	}
389 
390 	if (rsc->batch_mask & (1 << batch->idx))
391 		return;
392 
393 	debug_assert(!_mesa_set_search(batch->resources, rsc));
394 
395 	_mesa_set_add(batch->resources, rsc);
396 	rsc->batch_mask |= (1 << batch->idx);
397 }
398 
399 void
fd_batch_check_size(struct fd_batch * batch)400 fd_batch_check_size(struct fd_batch *batch)
401 {
402 	if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
403 		return;
404 
405 	struct fd_ringbuffer *ring = batch->draw;
406 	if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
407 			(fd_mesa_debug & FD_DBG_FLUSH))
408 		fd_batch_flush(batch, true);
409 }
410 
411 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
412  * been one since last draw:
413  */
414 void
fd_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)415 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
416 {
417 	if (batch->needs_wfi) {
418 		if (batch->ctx->screen->gpu_id >= 500)
419 			OUT_WFI5(ring);
420 		else
421 			OUT_WFI(ring);
422 		batch->needs_wfi = false;
423 	}
424 }
425