• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 Jonathan Marek <jonathan@marek.ca>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Jonathan Marek <jonathan@marek.ca>
25  *    Rob Clark <robclark@freedesktop.org>
26  */
27 
28 /* NOTE: perfcntrs are 48-bits but we only have 32-bit accumulate (?)
29  * so we work with 32-bits only. we accumulate start/stop separately,
30  * which differs from a5xx but works with only accumulate (no add/neg)
31  */
32 
33 #include "freedreno_query_acc.h"
34 #include "freedreno_resource.h"
35 
36 #include "fd2_context.h"
37 #include "fd2_query.h"
38 
39 struct PACKED fd2_query_sample {
40    struct fd_acc_query_sample base;
41    uint32_t start;
42    uint32_t stop;
43 };
44 DEFINE_CAST(fd_acc_query_sample, fd2_query_sample);
45 
46 /* offset of a single field of an array of fd2_query_sample: */
47 #define query_sample_idx(aq, idx, field)                                       \
48    fd_resource((aq)->prsc)->bo,                                                \
49       (idx * sizeof(struct fd2_query_sample)) +                                \
50          offsetof(struct fd2_query_sample, field),                             \
51       0, 0
52 
53 /* offset of a single field of fd2_query_sample: */
54 #define query_sample(aq, field) query_sample_idx(aq, 0, field)
55 
56 /*
57  * Performance Counter (batch) queries:
58  *
59  * Only one of these is active at a time, per design of the gallium
60  * batch_query API design.  On perfcntr query tracks N query_types,
61  * each of which has a 'fd_batch_query_entry' that maps it back to
62  * the associated group and counter.
63  */
64 
65 struct fd_batch_query_entry {
66    uint8_t gid; /* group-id */
67    uint8_t cid; /* countable-id within the group */
68 };
69 
70 struct fd_batch_query_data {
71    struct fd_screen *screen;
72    unsigned num_query_entries;
73    struct fd_batch_query_entry query_entries[];
74 };
75 
76 static void
perfcntr_resume(struct fd_acc_query * aq,struct fd_batch * batch)77 perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
78 {
79    struct fd_batch_query_data *data = aq->query_data;
80    struct fd_screen *screen = data->screen;
81    struct fd_ringbuffer *ring = batch->draw;
82 
83    unsigned counters_per_group[screen->num_perfcntr_groups];
84    memset(counters_per_group, 0, sizeof(counters_per_group));
85 
86    fd_wfi(batch, ring);
87 
88    /* configure performance counters for the requested queries: */
89    for (unsigned i = 0; i < data->num_query_entries; i++) {
90       struct fd_batch_query_entry *entry = &data->query_entries[i];
91       const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
92       unsigned counter_idx = counters_per_group[entry->gid]++;
93 
94       assert(counter_idx < g->num_counters);
95 
96       OUT_PKT0(ring, g->counters[counter_idx].select_reg, 1);
97       OUT_RING(ring, g->countables[entry->cid].selector);
98    }
99 
100    memset(counters_per_group, 0, sizeof(counters_per_group));
101 
102    /* and snapshot the start values */
103    for (unsigned i = 0; i < data->num_query_entries; i++) {
104       struct fd_batch_query_entry *entry = &data->query_entries[i];
105       const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
106       unsigned counter_idx = counters_per_group[entry->gid]++;
107       const struct fd_perfcntr_counter *counter = &g->counters[counter_idx];
108 
109       OUT_PKT3(ring, CP_REG_TO_MEM, 2);
110       OUT_RING(ring, counter->counter_reg_lo | CP_REG_TO_MEM_0_ACCUMULATE);
111       OUT_RELOC(ring, query_sample_idx(aq, i, start));
112    }
113 }
114 
115 static void
perfcntr_pause(struct fd_acc_query * aq,struct fd_batch * batch)116 perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
117 {
118    struct fd_batch_query_data *data = aq->query_data;
119    struct fd_screen *screen = data->screen;
120    struct fd_ringbuffer *ring = batch->draw;
121 
122    unsigned counters_per_group[screen->num_perfcntr_groups];
123    memset(counters_per_group, 0, sizeof(counters_per_group));
124 
125    fd_wfi(batch, ring);
126 
127    /* TODO do we need to bother to turn anything off? */
128 
129    /* snapshot the end values: */
130    for (unsigned i = 0; i < data->num_query_entries; i++) {
131       struct fd_batch_query_entry *entry = &data->query_entries[i];
132       const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
133       unsigned counter_idx = counters_per_group[entry->gid]++;
134       const struct fd_perfcntr_counter *counter = &g->counters[counter_idx];
135 
136       OUT_PKT3(ring, CP_REG_TO_MEM, 2);
137       OUT_RING(ring, counter->counter_reg_lo | CP_REG_TO_MEM_0_ACCUMULATE);
138       OUT_RELOC(ring, query_sample_idx(aq, i, stop));
139    }
140 }
141 
142 static void
perfcntr_accumulate_result(struct fd_acc_query * aq,struct fd_acc_query_sample * s,union pipe_query_result * result)143 perfcntr_accumulate_result(struct fd_acc_query *aq,
144                            struct fd_acc_query_sample *s,
145                            union pipe_query_result *result)
146 {
147    struct fd_batch_query_data *data = aq->query_data;
148    struct fd2_query_sample *sp = fd2_query_sample(s);
149 
150    for (unsigned i = 0; i < data->num_query_entries; i++)
151       result->batch[i].u64 = sp[i].stop - sp[i].start;
152 }
153 
154 static const struct fd_acc_sample_provider perfcntr = {
155    .query_type = FD_QUERY_FIRST_PERFCNTR,
156    .always = true,
157    .resume = perfcntr_resume,
158    .pause = perfcntr_pause,
159    .result = perfcntr_accumulate_result,
160 };
161 
162 static struct pipe_query *
fd2_create_batch_query(struct pipe_context * pctx,unsigned num_queries,unsigned * query_types)163 fd2_create_batch_query(struct pipe_context *pctx, unsigned num_queries,
164                        unsigned *query_types)
165 {
166    struct fd_context *ctx = fd_context(pctx);
167    struct fd_screen *screen = ctx->screen;
168    struct fd_query *q;
169    struct fd_acc_query *aq;
170    struct fd_batch_query_data *data;
171 
172    data = CALLOC_VARIANT_LENGTH_STRUCT(
173       fd_batch_query_data, num_queries * sizeof(data->query_entries[0]));
174 
175    data->screen = screen;
176    data->num_query_entries = num_queries;
177 
178    /* validate the requested query_types and ensure we don't try
179     * to request more query_types of a given group than we have
180     * counters:
181     */
182    unsigned counters_per_group[screen->num_perfcntr_groups];
183    memset(counters_per_group, 0, sizeof(counters_per_group));
184 
185    for (unsigned i = 0; i < num_queries; i++) {
186       unsigned idx = query_types[i] - FD_QUERY_FIRST_PERFCNTR;
187 
188       /* verify valid query_type, ie. is it actually a perfcntr? */
189       if ((query_types[i] < FD_QUERY_FIRST_PERFCNTR) ||
190           (idx >= screen->num_perfcntr_queries)) {
191          mesa_loge("invalid batch query query_type: %u", query_types[i]);
192          goto error;
193       }
194 
195       struct fd_batch_query_entry *entry = &data->query_entries[i];
196       struct pipe_driver_query_info *pq = &screen->perfcntr_queries[idx];
197 
198       entry->gid = pq->group_id;
199 
200       /* the perfcntr_queries[] table flattens all the countables
201        * for each group in series, ie:
202        *
203        *   (G0,C0), .., (G0,Cn), (G1,C0), .., (G1,Cm), ...
204        *
205        * So to find the countable index just step back through the
206        * table to find the first entry with the same group-id.
207        */
208       while (pq > screen->perfcntr_queries) {
209          pq--;
210          if (pq->group_id == entry->gid)
211             entry->cid++;
212       }
213 
214       if (counters_per_group[entry->gid] >=
215           screen->perfcntr_groups[entry->gid].num_counters) {
216          mesa_loge("too many counters for group %u", entry->gid);
217          goto error;
218       }
219 
220       counters_per_group[entry->gid]++;
221    }
222 
223    q = fd_acc_create_query2(ctx, 0, 0, &perfcntr);
224    aq = fd_acc_query(q);
225 
226    /* sample buffer size is based on # of queries: */
227    aq->size = num_queries * sizeof(struct fd2_query_sample);
228    aq->query_data = data;
229 
230    return (struct pipe_query *)q;
231 
232 error:
233    free(data);
234    return NULL;
235 }
236 
237 void
fd2_query_context_init(struct pipe_context * pctx)238 fd2_query_context_init(struct pipe_context *pctx) disable_thread_safety_analysis
239 {
240    struct fd_context *ctx = fd_context(pctx);
241 
242    ctx->create_query = fd_acc_create_query;
243    ctx->query_update_batch = fd_acc_query_update_batch;
244 
245    pctx->create_batch_query = fd2_create_batch_query;
246 }
247