• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef FREEDRENO_QUERY_HW_H_
28 #define FREEDRENO_QUERY_HW_H_
29 
30 #include "util/list.h"
31 
32 #include "freedreno_context.h"
33 #include "freedreno_query.h"
34 
35 /*
36  * HW Queries:
37  *
38  * See: https://github.com/freedreno/freedreno/wiki/Queries#hardware-queries
39  *
40  * Hardware queries will be specific to gpu generation, but they need
41  * some common infrastructure for triggering start/stop samples at
42  * various points (for example, to exclude mem2gmem/gmem2mem or clear)
43  * as well as per tile tracking.
44  *
45  * NOTE: in at least some cases hw writes sample values to memory addr
46  * specified in some register.  So we don't really have the option to
47  * just sample the same counter multiple times for multiple different
48  * queries with the same query_type.  So we cache per sample provider
49  * the most recent sample since the last draw.  This way multiple
50  * sample periods for multiple queries can reference the same sample.
51  *
52  * fd_hw_sample_provider:
53  *   - one per query type, registered/implemented by gpu generation
54  *     specific code
55  *   - can construct fd_hw_samples on demand
56  *   - most recent sample (since last draw) cached so multiple
57  *     different queries can ref the same sample
58  *
59  * fd_hw_sample:
60  *   - abstracts one snapshot of counter value(s) across N tiles
61  *   - backing object not allocated until submit time when number
62  *     of samples and number of tiles is known
63  *
64  * fd_hw_sample_period:
65  *   - consists of start and stop sample
66  *   - a query accumulates a list of sample periods
67  *   - the query result is the sum of the sample periods
68  */
69 
70 struct fd_hw_sample_provider {
71    unsigned query_type;
72 
73    /* Set if the provider should still count while !ctx->active_queries */
74    bool always;
75 
76    /* Optional hook for enabling a counter.  Guaranteed to happen
77     * at least once before the first ->get_sample() in a batch.
78     */
79    void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring) dt;
80 
81    /* when a new sample is required, emit appropriate cmdstream
82     * and return a sample object:
83     */
84    struct fd_hw_sample *(*get_sample)(struct fd_batch *batch,
85                                       struct fd_ringbuffer *ring)dt;
86 
87    /* accumulate the results from specified sample period: */
88    void (*accumulate_result)(struct fd_context *ctx, const void *start,
89                              const void *end, union pipe_query_result *result);
90 };
91 
92 struct fd_hw_sample {
93    struct pipe_reference reference; /* keep this first */
94 
95    /* offset and size of the sample are know at the time the
96     * sample is constructed.
97     */
98    uint32_t size;
99    uint32_t offset;
100 
101    /* backing object, offset/stride/etc are determined not when
102     * the sample is constructed, but when the batch is submitted.
103     * This way we can defer allocation until total # of requested
104     * samples, and total # of tiles, is known.
105     */
106    struct pipe_resource *prsc;
107    uint32_t num_tiles;
108    uint32_t tile_stride;
109 };
110 
111 struct fd_hw_sample_period;
112 
113 struct fd_hw_query {
114    struct fd_query base;
115 
116    const struct fd_hw_sample_provider *provider;
117 
118    /* list of fd_hw_sample_periods: */
119    struct list_head periods;
120 
121    /* if active and not paused, the current sample period (not
122     * yet added to current_periods):
123     */
124    struct fd_hw_sample_period *period;
125 
126    struct list_head list; /* list-node in batch->active_queries */
127 };
128 
129 static inline struct fd_hw_query *
fd_hw_query(struct fd_query * q)130 fd_hw_query(struct fd_query *q)
131 {
132    return (struct fd_hw_query *)q;
133 }
134 
135 struct fd_query *fd_hw_create_query(struct fd_context *ctx, unsigned query_type,
136                                     unsigned index);
137 /* helper for sample providers: */
138 struct fd_hw_sample *fd_hw_sample_init(struct fd_batch *batch, uint32_t size);
139 /* don't call directly, use fd_hw_sample_reference() */
140 void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
141 void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles) assert_dt;
142 void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
143                               struct fd_ringbuffer *ring) assert_dt;
144 void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch) assert_dt;
145 void fd_hw_query_enable(struct fd_batch *batch,
146                         struct fd_ringbuffer *ring) assert_dt;
147 void
148 fd_hw_query_register_provider(struct pipe_context *pctx,
149                               const struct fd_hw_sample_provider *provider);
150 void fd_hw_query_init(struct pipe_context *pctx);
151 void fd_hw_query_fini(struct pipe_context *pctx);
152 
153 static inline void
fd_hw_sample_reference(struct fd_context * ctx,struct fd_hw_sample ** ptr,struct fd_hw_sample * samp)154 fd_hw_sample_reference(struct fd_context *ctx, struct fd_hw_sample **ptr,
155                        struct fd_hw_sample *samp)
156 {
157    struct fd_hw_sample *old_samp = *ptr;
158 
159    if (pipe_reference(&(*ptr)->reference, &samp->reference))
160       __fd_hw_sample_destroy(ctx, old_samp);
161    *ptr = samp;
162 }
163 
164 #endif /* FREEDRENO_QUERY_HW_H_ */
165