• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef FREEDRENO_BATCH_H_
28 #define FREEDRENO_BATCH_H_
29 
30 #include "util/u_inlines.h"
31 #include "util/u_queue.h"
32 #include "util/list.h"
33 
34 #include "freedreno_util.h"
35 
36 struct fd_context;
37 struct fd_resource;
38 enum fd_resource_status;
39 
40 /* Bitmask of stages in rendering that a particular query query is
41  * active.  Queries will be automatically started/stopped (generating
42  * additional fd_hw_sample_period's) on entrance/exit from stages that
43  * are applicable to the query.
44  *
45  * NOTE: set the stage to NULL at end of IB to ensure no query is still
46  * active.  Things aren't going to work out the way you want if a query
47  * is active across IB's (or between tile IB and draw IB)
48  */
49 enum fd_render_stage {
50 	FD_STAGE_NULL     = 0x01,
51 	FD_STAGE_DRAW     = 0x02,
52 	FD_STAGE_CLEAR    = 0x04,
53 	/* used for driver internal draws (ie. util_blitter_blit()): */
54 	FD_STAGE_BLIT     = 0x08,
55 	FD_STAGE_ALL      = 0xff,
56 };
57 
58 #define MAX_HW_SAMPLE_PROVIDERS 4
59 struct fd_hw_sample_provider;
60 struct fd_hw_sample;
61 
62 /* A batch tracks everything about a cmdstream batch/submit, including the
63  * ringbuffers used for binning, draw, and gmem cmds, list of associated
64  * fd_resource-s, etc.
65  */
66 struct fd_batch {
67 	struct pipe_reference reference;
68 	unsigned seqno;
69 	unsigned idx;
70 
71 	int in_fence_fd;
72 	bool needs_out_fence_fd;
73 
74 	struct fd_context *ctx;
75 
76 	struct util_queue_fence flush_fence;
77 
78 	/* do we need to mem2gmem before rendering.  We don't, if for example,
79 	 * there was a glClear() that invalidated the entire previous buffer
80 	 * contents.  Keep track of which buffer(s) are cleared, or needs
81 	 * restore.  Masks of PIPE_CLEAR_*
82 	 *
83 	 * The 'cleared' bits will be set for buffers which are *entirely*
84 	 * cleared, and 'partial_cleared' bits will be set if you must
85 	 * check cleared_scissor.
86 	 */
87 	enum {
88 		/* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
89 		FD_BUFFER_COLOR   = PIPE_CLEAR_COLOR,
90 		FD_BUFFER_DEPTH   = PIPE_CLEAR_DEPTH,
91 		FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
92 		FD_BUFFER_ALL     = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
93 	} cleared, partial_cleared, restore, resolve;
94 
95 	bool needs_flush : 1;
96 	bool blit : 1;
97 	bool back_blit : 1;      /* only blit so far is resource shadowing back-blit */
98 
99 	/* Keep track if WAIT_FOR_IDLE is needed for registers we need
100 	 * to update via RMW:
101 	 */
102 	bool needs_wfi : 1;
103 
104 	/* To decide whether to render to system memory, keep track of the
105 	 * number of draws, and whether any of them require multisample,
106 	 * depth_test (or depth write), stencil_test, blending, and
107 	 * color_logic_Op (since those functions are disabled when by-
108 	 * passing GMEM.
109 	 */
110 	enum {
111 		FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
112 		FD_GMEM_DEPTH_ENABLED        = 0x02,
113 		FD_GMEM_STENCIL_ENABLED      = 0x04,
114 
115 		FD_GMEM_MSAA_ENABLED         = 0x08,
116 		FD_GMEM_BLEND_ENABLED        = 0x10,
117 		FD_GMEM_LOGICOP_ENABLED      = 0x20,
118 	} gmem_reason;
119 	unsigned num_draws;   /* number of draws in current batch */
120 
121 	/* Track the maximal bounds of the scissor of all the draws within a
122 	 * batch.  Used at the tile rendering step (fd_gmem_render_tiles(),
123 	 * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
124 	 */
125 	struct pipe_scissor_state max_scissor;
126 
127 	/* Track the cleared scissor for color/depth/stencil, so we know
128 	 * which, if any, tiles need to be restored (mem2gmem).  Only valid
129 	 * if the corresponding bit in ctx->cleared is set.
130 	 */
131 	struct {
132 		struct pipe_scissor_state color, depth, stencil;
133 	} cleared_scissor;
134 
135 	/* Keep track of DRAW initiators that need to be patched up depending
136 	 * on whether we using binning or not:
137 	 */
138 	struct util_dynarray draw_patches;
139 
140 	/* Keep track of writes to RB_RENDER_CONTROL which need to be patched
141 	 * once we know whether or not to use GMEM, and GMEM tile pitch.
142 	 *
143 	 * (only for a3xx.. but having gen specific subclasses of fd_batch
144 	 * seemed overkill for now)
145 	 */
146 	struct util_dynarray rbrc_patches;
147 
148 	struct pipe_framebuffer_state framebuffer;
149 
150 	/** draw pass cmdstream: */
151 	struct fd_ringbuffer *draw;
152 	/** binning pass cmdstream: */
153 	struct fd_ringbuffer *binning;
154 	/** tiling/gmem (IB0) cmdstream: */
155 	struct fd_ringbuffer *gmem;
156 
157 	/**
158 	 * hw query related state:
159 	 */
160 	/*@{*/
161 	/* next sample offset.. incremented for each sample in the batch/
162 	 * submit, reset to zero on next submit.
163 	 */
164 	uint32_t next_sample_offset;
165 
166 	/* cached samples (in case multiple queries need to reference
167 	 * the same sample snapshot)
168 	 */
169 	struct fd_hw_sample *sample_cache[MAX_HW_SAMPLE_PROVIDERS];
170 
171 	/* which sample providers were active in the current batch: */
172 	uint32_t active_providers;
173 
174 	/* tracking for current stage, to know when to start/stop
175 	 * any active queries:
176 	 */
177 	enum fd_render_stage stage;
178 
179 	/* list of samples in current batch: */
180 	struct util_dynarray samples;
181 
182 	/* current query result bo and tile stride: */
183 	struct pipe_resource *query_buf;
184 	uint32_t query_tile_stride;
185 	/*@}*/
186 
187 
188 	/* Set of resources used by currently-unsubmitted batch (read or
189 	 * write).. does not hold a reference to the resource.
190 	 */
191 	struct set *resources;
192 
193 	/** key in batch-cache (if not null): */
194 	const void *key;
195 	uint32_t hash;
196 
197 	/** set of dependent batches.. holds refs to dependent batches: */
198 	uint32_t dependents_mask;
199 };
200 
201 struct fd_batch * fd_batch_create(struct fd_context *ctx);
202 
203 void fd_batch_reset(struct fd_batch *batch);
204 void fd_batch_sync(struct fd_batch *batch);
205 void fd_batch_flush(struct fd_batch *batch, bool sync);
206 void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
207 void fd_batch_check_size(struct fd_batch *batch);
208 
209 /* not called directly: */
210 void __fd_batch_describe(char* buf, const struct fd_batch *batch);
211 void __fd_batch_destroy(struct fd_batch *batch);
212 
213 /*
214  * NOTE the rule is, you need to hold the screen->lock when destroying
215  * a batch..  so either use fd_batch_reference() (which grabs the lock
216  * for you) if you don't hold the lock, or fd_batch_reference_locked()
217  * if you do hold the lock.
218  *
219  * WARNING the _locked() version can briefly drop the lock.  Without
220  * recursive mutexes, I'm not sure there is much else we can do (since
221  * __fd_batch_destroy() needs to unref resources)
222  */
223 
224 static inline void
fd_batch_reference(struct fd_batch ** ptr,struct fd_batch * batch)225 fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
226 {
227 	struct fd_batch *old_batch = *ptr;
228 	if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
229 			(debug_reference_descriptor)__fd_batch_describe))
230 		__fd_batch_destroy(old_batch);
231 	*ptr = batch;
232 }
233 
234 /* fwd-decl prototypes to untangle header dependency :-/ */
235 static inline void fd_context_assert_locked(struct fd_context *ctx);
236 static inline void fd_context_lock(struct fd_context *ctx);
237 static inline void fd_context_unlock(struct fd_context *ctx);
238 
239 static inline void
fd_batch_reference_locked(struct fd_batch ** ptr,struct fd_batch * batch)240 fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
241 {
242 	struct fd_batch *old_batch = *ptr;
243 
244 	if (old_batch)
245 		fd_context_assert_locked(old_batch->ctx);
246 	else if (batch)
247 		fd_context_assert_locked(batch->ctx);
248 
249 	if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
250 			(debug_reference_descriptor)__fd_batch_describe)) {
251 		struct fd_context *ctx = old_batch->ctx;
252 		fd_context_unlock(ctx);
253 		__fd_batch_destroy(old_batch);
254 		fd_context_lock(ctx);
255 	}
256 	*ptr = batch;
257 }
258 
259 #include "freedreno_context.h"
260 
261 static inline void
fd_reset_wfi(struct fd_batch * batch)262 fd_reset_wfi(struct fd_batch *batch)
263 {
264 	batch->needs_wfi = true;
265 }
266 
267 void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring);
268 
269 /* emit a CP_EVENT_WRITE:
270  */
271 static inline void
fd_event_write(struct fd_batch * batch,struct fd_ringbuffer * ring,enum vgt_event_type evt)272 fd_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
273 		enum vgt_event_type evt)
274 {
275 	OUT_PKT3(ring, CP_EVENT_WRITE, 1);
276 	OUT_RING(ring, evt);
277 	fd_reset_wfi(batch);
278 }
279 
280 #endif /* FREEDRENO_BATCH_H_ */
281