1 /*
2 * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_QUERY_ACC_H_
28 #define FREEDRENO_QUERY_ACC_H_
29
30 #include "util/list.h"
31
32 #include "freedreno_context.h"
33 #include "freedreno_query.h"
34 #include "freedreno_resource.h"
35
36 BEGINC;
37
38 /*
39 * Accumulated HW Queries:
40 *
41 * Unlike the original HW Queries in earlier adreno generations (see
42 * freedreno_query_hw.[ch], later generations can accumulate the per-
43 * tile results of some (a4xx) or all (a5xx+?) queries in the cmdstream.
44 * But we still need to handle pausing/resuming the query across stage
45 * changes (in particular when switching between batches).
46 *
47 * fd_acc_sample_provider:
48 * - one per accumulated query type, registered/implemented by gpu
49 * generation specific code
50 * - knows how to emit cmdstream to pause/resume a query instance
51 *
52 * fd_acc_query:
53 * - one instance per query object
54 * - each query object has it's own result buffer, which may
55 * span multiple batches, etc.
56 */
57
58 struct fd_acc_query;
59
60 /**
61 * Base class for all query samples, on the GPU 'avail' is written to
62 * one when the query result is available.
63 */
64 struct PACKED fd_acc_query_sample {
65 uint64_t avail;
66 };
67
68
69 /**
70 * Helper to assert sample struct field has required alignment (ie. to
71 * catch issues at compile time if struct fd_acc_query_sample header
72 * ever changed, and to make the hw requirements more obvious)
73 */
74 #define ASSERT_ALIGNED(type, field, nbytes) \
75 STATIC_ASSERT((offsetof(type, field) % nbytes) == 0)
76
77 struct fd_acc_sample_provider {
78 unsigned query_type;
79
80 /* Set if the provider should still count while !ctx->active_queries */
81 bool always;
82
83 unsigned size;
84
85 void (*resume)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
86 void (*pause)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
87
88 void (*result)(struct fd_acc_query *aq, struct fd_acc_query_sample *s,
89 union pipe_query_result *result);
90 void (*result_resource)(struct fd_acc_query *aq, struct fd_ringbuffer *ring,
91 enum pipe_query_value_type result_type, int index,
92 struct fd_resource *dst, unsigned offset);
93 };
94
95 struct fd_acc_query {
96 struct fd_query base;
97
98 const struct fd_acc_sample_provider *provider;
99
100 struct pipe_resource *prsc;
101
102 /* Pointer to the batch that our query has had resume() called on (if
103 * any).
104 */
105 struct fd_batch *batch;
106
107 /* usually the same as provider->size but for batch queries we
108 * need to calculate the size dynamically when the query is
109 * allocated:
110 */
111 unsigned size;
112
113 struct list_head node; /* list-node in ctx->active_acc_queries */
114
115 void *query_data; /* query specific data */
116 };
117
118 static inline struct fd_acc_query *
fd_acc_query(struct fd_query * q)119 fd_acc_query(struct fd_query *q)
120 {
121 return (struct fd_acc_query *)q;
122 }
123
124 struct fd_query *fd_acc_create_query(struct fd_context *ctx,
125 unsigned query_type, unsigned index);
126 struct fd_query *
127 fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
128 unsigned index,
129 const struct fd_acc_sample_provider *provider);
130 void fd_acc_query_update_batch(struct fd_batch *batch,
131 bool disable_all) assert_dt;
132 void
133 fd_acc_query_register_provider(struct pipe_context *pctx,
134 const struct fd_acc_sample_provider *provider);
135
136 static inline void
copy_result(struct fd_ringbuffer * ring,enum pipe_query_value_type result_type,struct fd_resource * dst,unsigned dst_offset,struct fd_resource * src,unsigned src_offset)137 copy_result(struct fd_ringbuffer *ring, enum pipe_query_value_type result_type,
138 struct fd_resource *dst, unsigned dst_offset,
139 struct fd_resource *src, unsigned src_offset)
140 {
141 fd_ringbuffer_attach_bo(ring, dst->bo);
142 fd_ringbuffer_attach_bo(ring, src->bo);
143
144 OUT_PKT7(ring, CP_MEM_TO_MEM, 5);
145 OUT_RING(ring, COND(result_type >= PIPE_QUERY_TYPE_I64, CP_MEM_TO_MEM_0_DOUBLE));
146 OUT_RELOC(ring, dst->bo, dst_offset, 0, 0);
147 OUT_RELOC(ring, src->bo, src_offset, 0, 0);
148 }
149
150 ENDC;
151
152 #endif /* FREEDRENO_QUERY_ACC_H_ */
153