• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef FREEDRENO_QUERY_HW_H_
28 #define FREEDRENO_QUERY_HW_H_
29 
30 #include "util/list.h"
31 
32 #include "freedreno_query.h"
33 #include "freedreno_context.h"
34 
35 
36 /*
37  * HW Queries:
38  *
39  * See: https://github.com/freedreno/freedreno/wiki/Queries#hardware-queries
40  *
41  * Hardware queries will be specific to gpu generation, but they need
42  * some common infrastructure for triggering start/stop samples at
43  * various points (for example, to exclude mem2gmem/gmem2mem or clear)
44  * as well as per tile tracking.
45  *
46  * NOTE: in at least some cases hw writes sample values to memory addr
47  * specified in some register.  So we don't really have the option to
48  * just sample the same counter multiple times for multiple different
49  * queries with the same query_type.  So we cache per sample provider
50  * the most recent sample since the last draw.  This way multiple
51  * sample periods for multiple queries can reference the same sample.
52  *
53  * fd_hw_sample_provider:
54  *   - one per query type, registered/implemented by gpu generation
55  *     specific code
56  *   - can construct fd_hw_samples on demand
57  *   - most recent sample (since last draw) cached so multiple
58  *     different queries can ref the same sample
59  *
60  * fd_hw_sample:
61  *   - abstracts one snapshot of counter value(s) across N tiles
62  *   - backing object not allocated until submit time when number
63  *     of samples and number of tiles is known
64  *
65  * fd_hw_sample_period:
66  *   - consists of start and stop sample
67  *   - a query accumulates a list of sample periods
68  *   - the query result is the sum of the sample periods
69  */
70 
71 struct fd_hw_sample_provider {
72 	unsigned query_type;
73 
74 	/* stages applicable to the query type: */
75 	enum fd_render_stage active;
76 
77 	/* Optional hook for enabling a counter.  Guaranteed to happen
78 	 * at least once before the first ->get_sample() in a batch.
79 	 */
80 	void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring);
81 
82 	/* when a new sample is required, emit appropriate cmdstream
83 	 * and return a sample object:
84 	 */
85 	struct fd_hw_sample *(*get_sample)(struct fd_batch *batch,
86 			struct fd_ringbuffer *ring);
87 
88 	/* accumulate the results from specified sample period: */
89 	void (*accumulate_result)(struct fd_context *ctx,
90 			const void *start, const void *end,
91 			union pipe_query_result *result);
92 };
93 
94 struct fd_hw_sample {
95 	struct pipe_reference reference;  /* keep this first */
96 
97 	/* offset and size of the sample are know at the time the
98 	 * sample is constructed.
99 	 */
100 	uint32_t size;
101 	uint32_t offset;
102 
103 	/* backing object, offset/stride/etc are determined not when
104 	 * the sample is constructed, but when the batch is submitted.
105 	 * This way we can defer allocation until total # of requested
106 	 * samples, and total # of tiles, is known.
107 	 */
108 	struct pipe_resource *prsc;
109 	uint32_t num_tiles;
110 	uint32_t tile_stride;
111 };
112 
113 struct fd_hw_sample_period;
114 
115 struct fd_hw_query {
116 	struct fd_query base;
117 
118 	const struct fd_hw_sample_provider *provider;
119 
120 	/* list of fd_hw_sample_periods: */
121 	struct list_head periods;
122 
123 	/* if active and not paused, the current sample period (not
124 	 * yet added to current_periods):
125 	 */
126 	struct fd_hw_sample_period *period;
127 
128 	struct list_head list;   /* list-node in batch->active_queries */
129 
130 	int no_wait_cnt;         /* see fd_hw_get_query_result */
131 };
132 
133 static inline struct fd_hw_query *
fd_hw_query(struct fd_query * q)134 fd_hw_query(struct fd_query *q)
135 {
136 	return (struct fd_hw_query *)q;
137 }
138 
139 struct fd_query * fd_hw_create_query(struct fd_context *ctx, unsigned query_type, unsigned index);
140 /* helper for sample providers: */
141 struct fd_hw_sample * fd_hw_sample_init(struct fd_batch *batch, uint32_t size);
142 /* don't call directly, use fd_hw_sample_reference() */
143 void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
144 void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
145 void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
146 		struct fd_ringbuffer *ring);
147 void fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
148 void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
149 void fd_hw_query_register_provider(struct pipe_context *pctx,
150 		const struct fd_hw_sample_provider *provider);
151 void fd_hw_query_init(struct pipe_context *pctx);
152 void fd_hw_query_fini(struct pipe_context *pctx);
153 
154 static inline void
fd_hw_sample_reference(struct fd_context * ctx,struct fd_hw_sample ** ptr,struct fd_hw_sample * samp)155 fd_hw_sample_reference(struct fd_context *ctx,
156 		struct fd_hw_sample **ptr, struct fd_hw_sample *samp)
157 {
158 	struct fd_hw_sample *old_samp = *ptr;
159 
160 	if (pipe_reference(&(*ptr)->reference, &samp->reference))
161 		__fd_hw_sample_destroy(ctx, old_samp);
162 	*ptr = samp;
163 }
164 
165 #endif /* FREEDRENO_QUERY_HW_H_ */
166