• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/u_inlines.h"
28 #include "util/u_memory.h"
29 
30 #include "freedreno_context.h"
31 #include "freedreno_query_acc.h"
32 #include "freedreno_resource.h"
33 #include "freedreno_util.h"
34 
35 static void
fd_acc_destroy_query(struct fd_context * ctx,struct fd_query * q)36 fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q) assert_dt
37 {
38    struct fd_acc_query *aq = fd_acc_query(q);
39 
40    DBG("%p", q);
41 
42    pipe_resource_reference(&aq->prsc, NULL);
43    list_del(&aq->node);
44 
45    free(aq->query_data);
46    free(aq);
47 }
48 
49 static void
realloc_query_bo(struct fd_context * ctx,struct fd_acc_query * aq)50 realloc_query_bo(struct fd_context *ctx, struct fd_acc_query *aq)
51 {
52    struct fd_resource *rsc;
53    void *map;
54 
55    pipe_resource_reference(&aq->prsc, NULL);
56 
57    aq->prsc =
58       pipe_buffer_create(&ctx->screen->base, PIPE_BIND_QUERY_BUFFER, 0, 0x1000);
59 
60    /* don't assume the buffer is zero-initialized: */
61    rsc = fd_resource(aq->prsc);
62 
63    fd_bo_cpu_prep(rsc->bo, ctx->pipe, FD_BO_PREP_WRITE);
64 
65    map = fd_bo_map(rsc->bo);
66    memset(map, 0, aq->size);
67    fd_bo_cpu_fini(rsc->bo);
68 }
69 
70 static void
fd_acc_query_pause(struct fd_acc_query * aq)71 fd_acc_query_pause(struct fd_acc_query *aq) assert_dt
72 {
73    const struct fd_acc_sample_provider *p = aq->provider;
74 
75    if (!aq->batch)
76       return;
77 
78    fd_batch_needs_flush(aq->batch);
79    p->pause(aq, aq->batch);
80    aq->batch = NULL;
81 }
82 
83 static void
fd_acc_query_resume(struct fd_acc_query * aq,struct fd_batch * batch)84 fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
85 {
86    const struct fd_acc_sample_provider *p = aq->provider;
87 
88    aq->batch = batch;
89    fd_batch_needs_flush(aq->batch);
90    p->resume(aq, aq->batch);
91 
92    fd_screen_lock(batch->ctx->screen);
93    fd_batch_resource_write(batch, fd_resource(aq->prsc));
94    fd_screen_unlock(batch->ctx->screen);
95 }
96 
97 static void
fd_acc_begin_query(struct fd_context * ctx,struct fd_query * q)98 fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q) assert_dt
99 {
100    struct fd_acc_query *aq = fd_acc_query(q);
101 
102    DBG("%p", q);
103 
104    /* ->begin_query() discards previous results, so realloc bo: */
105    realloc_query_bo(ctx, aq);
106 
107    /* Signal that we need to update the active queries on the next draw */
108    ctx->update_active_queries = true;
109 
110    /* add to active list: */
111    assert(list_is_empty(&aq->node));
112    list_addtail(&aq->node, &ctx->acc_active_queries);
113 
114    /* TIMESTAMP/GPU_FINISHED and don't do normal bracketing at draw time, we
115     * need to just emit the capture at this moment.
116     */
117    if (skip_begin_query(q->type)) {
118       struct fd_batch *batch = fd_context_batch_locked(ctx);
119       fd_acc_query_resume(aq, batch);
120       fd_batch_unlock_submit(batch);
121       fd_batch_reference(&batch, NULL);
122    }
123 }
124 
125 static void
fd_acc_end_query(struct fd_context * ctx,struct fd_query * q)126 fd_acc_end_query(struct fd_context *ctx, struct fd_query *q) assert_dt
127 {
128    struct fd_acc_query *aq = fd_acc_query(q);
129 
130    DBG("%p", q);
131 
132    fd_acc_query_pause(aq);
133 
134    /* remove from active list: */
135    list_delinit(&aq->node);
136 }
137 
138 static bool
fd_acc_get_query_result(struct fd_context * ctx,struct fd_query * q,bool wait,union pipe_query_result * result)139 fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q, bool wait,
140                         union pipe_query_result *result)
141 {
142    struct fd_acc_query *aq = fd_acc_query(q);
143    const struct fd_acc_sample_provider *p = aq->provider;
144    struct fd_resource *rsc = fd_resource(aq->prsc);
145 
146    DBG("%p: wait=%d", q, wait);
147 
148    assert(list_is_empty(&aq->node));
149 
150    /* ARB_occlusion_query says:
151     *
152     *     "Querying the state for a given occlusion query forces that
153     *      occlusion query to complete within a finite amount of time."
154     *
155     * So, regardless of whether we are supposed to wait or not, we do need to
156     * flush now.
157     */
158    if (fd_get_query_result_in_driver_thread(q)) {
159       tc_assert_driver_thread(ctx->tc);
160       fd_context_access_begin(ctx);
161       fd_bc_flush_writer(ctx, rsc);
162       fd_context_access_end(ctx);
163    }
164 
165    if (!wait) {
166       int ret = fd_resource_wait(
167          ctx, rsc, FD_BO_PREP_READ | FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH);
168       if (ret)
169          return false;
170    } else {
171       fd_resource_wait(ctx, rsc, FD_BO_PREP_READ);
172    }
173 
174    void *ptr = fd_bo_map(rsc->bo);
175    p->result(aq, ptr, result);
176    fd_bo_cpu_fini(rsc->bo);
177 
178    return true;
179 }
180 
181 static const struct fd_query_funcs acc_query_funcs = {
182    .destroy_query = fd_acc_destroy_query,
183    .begin_query = fd_acc_begin_query,
184    .end_query = fd_acc_end_query,
185    .get_query_result = fd_acc_get_query_result,
186 };
187 
188 struct fd_query *
fd_acc_create_query2(struct fd_context * ctx,unsigned query_type,unsigned index,const struct fd_acc_sample_provider * provider)189 fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
190                      unsigned index,
191                      const struct fd_acc_sample_provider *provider)
192 {
193    struct fd_acc_query *aq;
194    struct fd_query *q;
195 
196    aq = CALLOC_STRUCT(fd_acc_query);
197    if (!aq)
198       return NULL;
199 
200    DBG("%p: query_type=%u", aq, query_type);
201 
202    aq->provider = provider;
203    aq->size = provider->size;
204 
205    list_inithead(&aq->node);
206 
207    q = &aq->base;
208    q->funcs = &acc_query_funcs;
209    q->type = query_type;
210    q->index = index;
211 
212    return q;
213 }
214 
215 struct fd_query *
fd_acc_create_query(struct fd_context * ctx,unsigned query_type,unsigned index)216 fd_acc_create_query(struct fd_context *ctx, unsigned query_type, unsigned index)
217 {
218    int idx = pidx(query_type);
219 
220    if ((idx < 0) || !ctx->acc_sample_providers[idx])
221       return NULL;
222 
223    return fd_acc_create_query2(ctx, query_type, index,
224                                ctx->acc_sample_providers[idx]);
225 }
226 
227 /* Called at clear/draw/blit time to enable/disable the appropriate queries in
228  * the batch (and transfer active querying between batches in the case of
229  * batch reordering).
230  */
231 void
fd_acc_query_update_batch(struct fd_batch * batch,bool disable_all)232 fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all)
233 {
234    struct fd_context *ctx = batch->ctx;
235 
236    if (disable_all || ctx->update_active_queries) {
237       struct fd_acc_query *aq;
238       LIST_FOR_EACH_ENTRY (aq, &ctx->acc_active_queries, node) {
239          bool batch_change = aq->batch != batch;
240          bool was_active = aq->batch != NULL;
241          bool now_active =
242             !disable_all && (ctx->active_queries || aq->provider->always);
243 
244          if (was_active && (!now_active || batch_change))
245             fd_acc_query_pause(aq);
246          if (now_active && (!was_active || batch_change))
247             fd_acc_query_resume(aq, batch);
248       }
249    }
250 
251    ctx->update_active_queries = false;
252 }
253 
254 void
fd_acc_query_register_provider(struct pipe_context * pctx,const struct fd_acc_sample_provider * provider)255 fd_acc_query_register_provider(struct pipe_context *pctx,
256                                const struct fd_acc_sample_provider *provider)
257 {
258    struct fd_context *ctx = fd_context(pctx);
259    int idx = pidx(provider->query_type);
260 
261    assert((0 <= idx) && (idx < MAX_HW_SAMPLE_PROVIDERS));
262    assert(!ctx->acc_sample_providers[idx]);
263 
264    ctx->acc_sample_providers[idx] = provider;
265 }
266