• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Maciej Cencora <m.cencora@gmail.com>
25  *
26  */
27 #include "radeon_common.h"
28 #include "radeon_queryobj.h"
29 #include "radeon_debug.h"
30 
31 
32 #include "main/queryobj.h"
33 
34 #include <inttypes.h>
35 
radeonQueryGetResult(struct gl_context * ctx,struct gl_query_object * q)36 static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
37 {
38 	struct radeon_query_object *query = (struct radeon_query_object *)q;
39         uint32_t *result;
40 	int i;
41 
42 	radeon_print(RADEON_STATE, RADEON_VERBOSE,
43 			"%s: query id %d, result %d\n",
44 			__func__, query->Base.Id, (int) query->Base.Result);
45 
46 	radeon_bo_map(query->bo, GL_FALSE);
47         result = query->bo->ptr;
48 
49 	query->Base.Result = 0;
50 	for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
51 		query->Base.Result += LE32_TO_CPU(result[i]);
52 		radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
53 	}
54 
55 	radeon_bo_unmap(query->bo);
56 }
57 
radeonNewQueryObject(struct gl_context * ctx,GLuint id)58 static struct gl_query_object * radeonNewQueryObject(struct gl_context *ctx, GLuint id)
59 {
60 	struct radeon_query_object *query;
61 
62 	query = calloc(1, sizeof(struct radeon_query_object));
63 
64 	query->Base.Id = id;
65 	query->Base.Result = 0;
66 	query->Base.Active = GL_FALSE;
67 	query->Base.Ready = GL_TRUE;
68 
69 	radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __func__, query->Base.Id);
70 
71 	return &query->Base;
72 }
73 
radeonDeleteQuery(struct gl_context * ctx,struct gl_query_object * q)74 static void radeonDeleteQuery(struct gl_context *ctx, struct gl_query_object *q)
75 {
76 	struct radeon_query_object *query = (struct radeon_query_object *)q;
77 
78 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
79 
80 	if (query->bo) {
81 		radeon_bo_unref(query->bo);
82 	}
83 
84 	_mesa_delete_query(ctx, q);
85 }
86 
radeonWaitQuery(struct gl_context * ctx,struct gl_query_object * q)87 static void radeonWaitQuery(struct gl_context *ctx, struct gl_query_object *q)
88 {
89 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
90 	struct radeon_query_object *query = (struct radeon_query_object *)q;
91 
92 	/* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
93 	if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
94 		ctx->Driver.Flush(ctx);
95 
96 	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __func__, q->Id, query->bo, query->curr_offset);
97 
98 	radeonQueryGetResult(ctx, q);
99 
100 	query->Base.Ready = GL_TRUE;
101 }
102 
103 
radeonBeginQuery(struct gl_context * ctx,struct gl_query_object * q)104 static void radeonBeginQuery(struct gl_context *ctx, struct gl_query_object *q)
105 {
106 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
107 	struct radeon_query_object *query = (struct radeon_query_object *)q;
108 
109 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
110 
111 	assert(radeon->query.current == NULL);
112 
113 	if (radeon->dma.flush)
114 		radeon->dma.flush(&radeon->glCtx);
115 
116 	if (!query->bo) {
117 		query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
118 	}
119 	query->curr_offset = 0;
120 
121 	radeon->query.current = query;
122 
123 	radeon->query.queryobj.dirty = GL_TRUE;
124 	radeon->hw.is_dirty = GL_TRUE;
125 }
126 
radeonEmitQueryEnd(struct gl_context * ctx)127 void radeonEmitQueryEnd(struct gl_context *ctx)
128 {
129 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
130 	struct radeon_query_object *query = radeon->query.current;
131 
132 	if (!query)
133 		return;
134 
135 	if (query->emitted_begin == GL_FALSE)
136 		return;
137 
138 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __func__, query->Base.Id, query->bo, query->curr_offset);
139 
140 	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
141 				      query->bo,
142 				      0, RADEON_GEM_DOMAIN_GTT);
143 
144 	radeon->vtbl.emit_query_finish(radeon);
145 }
146 
radeonEndQuery(struct gl_context * ctx,struct gl_query_object * q)147 static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q)
148 {
149 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
150 
151 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
152 
153 	if (radeon->dma.flush)
154 		radeon->dma.flush(&radeon->glCtx);
155 	radeonEmitQueryEnd(ctx);
156 
157 	radeon->query.current = NULL;
158 }
159 
radeonCheckQuery(struct gl_context * ctx,struct gl_query_object * q)160 static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
161 {
162 	radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __func__, q->Id);
163 \
164 #ifdef DRM_RADEON_GEM_BUSY
165 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
166 
167 	struct radeon_query_object *query = (struct radeon_query_object *)q;
168 	uint32_t domain;
169 
170 	/* Need to perform a flush, as per ARB_occlusion_query spec */
171 	if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
172 		ctx->Driver.Flush(ctx);
173 	}
174 
175 	if (radeon_bo_is_busy(query->bo, &domain) == 0) {
176 		radeonQueryGetResult(ctx, q);
177 		query->Base.Ready = GL_TRUE;
178 	}
179 #else
180 	radeonWaitQuery(ctx, q);
181 #endif
182 }
183 
radeonInitQueryObjFunctions(struct dd_function_table * functions)184 void radeonInitQueryObjFunctions(struct dd_function_table *functions)
185 {
186 	functions->NewQueryObject = radeonNewQueryObject;
187 	functions->DeleteQuery = radeonDeleteQuery;
188 	functions->BeginQuery = radeonBeginQuery;
189 	functions->EndQuery = radeonEndQuery;
190 	functions->CheckQuery = radeonCheckQuery;
191 	functions->WaitQuery = radeonWaitQuery;
192 }
193 
radeon_check_query_active(struct gl_context * ctx,struct radeon_state_atom * atom)194 int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom)
195 {
196 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
197 	struct radeon_query_object *query = radeon->query.current;
198 
199 	if (!query || query->emitted_begin)
200 		return 0;
201 	return atom->cmd_size;
202 }
203 
radeon_emit_queryobj(struct gl_context * ctx,struct radeon_state_atom * atom)204 void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
205 {
206 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
207 	BATCH_LOCALS(radeon);
208 	int dwords;
209 
210 	dwords = atom->check(ctx, atom);
211 
212 	BEGIN_BATCH(dwords);
213 	OUT_BATCH_TABLE(atom->cmd, dwords);
214 	END_BATCH();
215 
216 	radeon->query.current->emitted_begin = GL_TRUE;
217 }
218