• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/u_memory.h"
28 #include "util/u_inlines.h"
29 
30 #include "freedreno_query_acc.h"
31 #include "freedreno_context.h"
32 #include "freedreno_resource.h"
33 #include "freedreno_util.h"
34 
35 static void
fd_acc_destroy_query(struct fd_context * ctx,struct fd_query * q)36 fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q)
37 {
38 	struct fd_acc_query *aq = fd_acc_query(q);
39 
40 	DBG("%p", q);
41 
42 	pipe_resource_reference(&aq->prsc, NULL);
43 	list_del(&aq->node);
44 
45 	free(aq->query_data);
46 	free(aq);
47 }
48 
49 static void
realloc_query_bo(struct fd_context * ctx,struct fd_acc_query * aq)50 realloc_query_bo(struct fd_context *ctx, struct fd_acc_query *aq)
51 {
52 	struct fd_resource *rsc;
53 	void *map;
54 
55 	pipe_resource_reference(&aq->prsc, NULL);
56 
57 	aq->prsc = pipe_buffer_create(&ctx->screen->base,
58 			PIPE_BIND_QUERY_BUFFER, 0, 0x1000);
59 
60 	/* don't assume the buffer is zero-initialized: */
61 	rsc = fd_resource(aq->prsc);
62 
63 	fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_WRITE);
64 
65 	map = fd_bo_map(rsc->bo);
66 	memset(map, 0, aq->size);
67 	fd_bo_cpu_fini(rsc->bo);
68 }
69 
70 static void
fd_acc_query_pause(struct fd_acc_query * aq)71 fd_acc_query_pause(struct fd_acc_query *aq)
72 {
73 	const struct fd_acc_sample_provider *p = aq->provider;
74 
75 	if (!aq->batch)
76 		return;
77 
78 	p->pause(aq, aq->batch);
79 	aq->batch = NULL;
80 }
81 
82 static void
fd_acc_query_resume(struct fd_acc_query * aq,struct fd_batch * batch)83 fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
84 {
85 	const struct fd_acc_sample_provider *p = aq->provider;
86 
87 	aq->batch = batch;
88 	p->resume(aq, aq->batch);
89 
90 	fd_screen_lock(batch->ctx->screen);
91 	fd_batch_resource_write(batch, fd_resource(aq->prsc));
92 	fd_screen_unlock(batch->ctx->screen);
93 }
94 
95 static void
fd_acc_begin_query(struct fd_context * ctx,struct fd_query * q)96 fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
97 {
98 	struct fd_acc_query *aq = fd_acc_query(q);
99 
100 	DBG("%p", q);
101 
102 	/* ->begin_query() discards previous results, so realloc bo: */
103 	realloc_query_bo(ctx, aq);
104 
105 	/* Signal that we need to update the active queries on the next draw */
106 	ctx->update_active_queries = true;
107 
108 	/* add to active list: */
109 	assert(list_is_empty(&aq->node));
110 	list_addtail(&aq->node, &ctx->acc_active_queries);
111 
112 	/* TIMESTAMP/GPU_FINISHED and don't do normal bracketing at draw time, we
113 	 * need to just emit the capture at this moment.
114 	 */
115 	if (skip_begin_query(q->type))
116 		fd_acc_query_resume(aq, fd_context_batch(ctx));
117 }
118 
119 static void
fd_acc_end_query(struct fd_context * ctx,struct fd_query * q)120 fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
121 {
122 	struct fd_acc_query *aq = fd_acc_query(q);
123 
124 	DBG("%p", q);
125 
126 	fd_acc_query_pause(aq);
127 
128 	/* remove from active list: */
129 	list_delinit(&aq->node);
130 }
131 
132 static bool
fd_acc_get_query_result(struct fd_context * ctx,struct fd_query * q,bool wait,union pipe_query_result * result)133 fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
134 		bool wait, union pipe_query_result *result)
135 {
136 	struct fd_acc_query *aq = fd_acc_query(q);
137 	const struct fd_acc_sample_provider *p = aq->provider;
138 	struct fd_resource *rsc = fd_resource(aq->prsc);
139 
140 	DBG("%p: wait=%d", q, wait);
141 
142 	assert(list_is_empty(&aq->node));
143 
144 	/* if !wait, then check the last sample (the one most likely to
145 	 * not be ready yet) and bail if it is not ready:
146 	 */
147 	if (!wait) {
148 		int ret;
149 
150 		if (pending(rsc, false)) {
151 			/* piglit spec@arb_occlusion_query@occlusion_query_conform
152 			 * test, and silly apps perhaps, get stuck in a loop trying
153 			 * to get  query result forever with wait==false..  we don't
154 			 * wait to flush unnecessarily but we also don't want to
155 			 * spin forever:
156 			 */
157 			if (aq->no_wait_cnt++ > 5)
158 				fd_batch_flush(rsc->write_batch);
159 			return false;
160 		}
161 
162 		ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe,
163 				DRM_FREEDRENO_PREP_READ | DRM_FREEDRENO_PREP_NOSYNC);
164 		if (ret)
165 			return false;
166 
167 		fd_bo_cpu_fini(rsc->bo);
168 	}
169 
170 	if (rsc->write_batch)
171 		fd_batch_flush(rsc->write_batch);
172 
173 	/* get the result: */
174 	fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
175 
176 	void *ptr = fd_bo_map(rsc->bo);
177 	p->result(aq, ptr, result);
178 	fd_bo_cpu_fini(rsc->bo);
179 
180 	return true;
181 }
182 
183 static const struct fd_query_funcs acc_query_funcs = {
184 		.destroy_query    = fd_acc_destroy_query,
185 		.begin_query      = fd_acc_begin_query,
186 		.end_query        = fd_acc_end_query,
187 		.get_query_result = fd_acc_get_query_result,
188 };
189 
190 struct fd_query *
fd_acc_create_query2(struct fd_context * ctx,unsigned query_type,unsigned index,const struct fd_acc_sample_provider * provider)191 fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
192 		unsigned index, const struct fd_acc_sample_provider *provider)
193 {
194 	struct fd_acc_query *aq;
195 	struct fd_query *q;
196 
197 	aq = CALLOC_STRUCT(fd_acc_query);
198 	if (!aq)
199 		return NULL;
200 
201 	DBG("%p: query_type=%u", aq, query_type);
202 
203 	aq->provider = provider;
204 	aq->size = provider->size;
205 
206 	list_inithead(&aq->node);
207 
208 	q = &aq->base;
209 	q->funcs = &acc_query_funcs;
210 	q->type = query_type;
211 	q->index = index;
212 
213 	return q;
214 }
215 
216 struct fd_query *
fd_acc_create_query(struct fd_context * ctx,unsigned query_type,unsigned index)217 fd_acc_create_query(struct fd_context *ctx, unsigned query_type,
218 		unsigned index)
219 {
220 	int idx = pidx(query_type);
221 
222 	if ((idx < 0) || !ctx->acc_sample_providers[idx])
223 		return NULL;
224 
225 	return fd_acc_create_query2(ctx, query_type, index,
226 			ctx->acc_sample_providers[idx]);
227 }
228 
229 /* Called at clear/draw/blit time to enable/disable the appropriate queries in
230  * the batch (and transfer active querying between batches in the case of
231  * batch reordering).
232  */
233 void
fd_acc_query_set_stage(struct fd_batch * batch,enum fd_render_stage stage)234 fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
235 {
236 	struct fd_context *ctx = batch->ctx;
237 
238 	if (stage != batch->stage || ctx->update_active_queries) {
239 		struct fd_acc_query *aq;
240 		LIST_FOR_EACH_ENTRY(aq, &ctx->acc_active_queries, node) {
241 			bool batch_change = aq->batch != batch;
242 			bool was_active = aq->batch != NULL;
243 			bool now_active = stage != FD_STAGE_NULL &&
244 				(ctx->active_queries || aq->provider->always);
245 
246 			if (was_active && (!now_active || batch_change))
247 				fd_acc_query_pause(aq);
248 			if (now_active && (!was_active || batch_change))
249 				fd_acc_query_resume(aq, batch);
250 		}
251 	}
252 
253 	ctx->update_active_queries = false;
254 }
255 
256 void
fd_acc_query_register_provider(struct pipe_context * pctx,const struct fd_acc_sample_provider * provider)257 fd_acc_query_register_provider(struct pipe_context *pctx,
258 		const struct fd_acc_sample_provider *provider)
259 {
260 	struct fd_context *ctx = fd_context(pctx);
261 	int idx = pidx(provider->query_type);
262 
263 	assert((0 <= idx) && (idx < MAX_HW_SAMPLE_PROVIDERS));
264 	assert(!ctx->acc_sample_providers[idx]);
265 
266 	ctx->acc_sample_providers[idx] = provider;
267 }
268