• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "ir3/ir3_compiler.h"
25 
26 #include "util/u_math.h"
27 
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31 
32 #include "main.h"
33 #include "ir3_asm.h"
34 
35 struct a6xx_backend {
36 	struct backend base;
37 
38 	struct ir3_compiler *compiler;
39 	struct fd_device *dev;
40 
41 	unsigned seqno;
42 	struct fd_bo *control_mem;
43 
44 	struct fd_bo *query_mem;
45 	const struct perfcntr *perfcntrs;
46 	unsigned num_perfcntrs;
47 };
48 define_cast(backend, a6xx_backend);
49 
50 /*
51  * Data structures shared with GPU:
52  */
53 
54 /* This struct defines the layout of the fd6_context::control buffer: */
55 struct fd6_control {
56 	uint32_t seqno;          /* seqno for async CP_EVENT_WRITE, etc */
57 	uint32_t _pad0;
58 	volatile uint32_t vsc_overflow;
59 	uint32_t _pad1;
60 	/* flag set from cmdstream when VSC overflow detected: */
61 	uint32_t vsc_scratch;
62 	uint32_t _pad2;
63 	uint32_t _pad3;
64 	uint32_t _pad4;
65 
66 	/* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
67 	struct {
68 		uint32_t offset;
69 		uint32_t pad[7];
70 	} flush_base[4];
71 };
72 
73 #define control_ptr(a6xx_backend, member)  \
74 	(a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
75 
76 
77 struct PACKED fd6_query_sample {
78 	uint64_t start;
79 	uint64_t result;
80 	uint64_t stop;
81 };
82 
83 
84 /* offset of a single field of an array of fd6_query_sample: */
85 #define query_sample_idx(a6xx_backend, idx, field)    \
86 	(a6xx_backend)->query_mem,                        \
87 	(idx * sizeof(struct fd6_query_sample)) +         \
88 	offsetof(struct fd6_query_sample, field),         \
89 	0, 0
90 
91 
92 /*
93  * Backend implementation:
94  */
95 
96 static struct kernel *
a6xx_assemble(struct backend * b,FILE * in)97 a6xx_assemble(struct backend *b, FILE *in)
98 {
99 	struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
100 	struct ir3_kernel *ir3_kernel =
101 		ir3_asm_assemble(a6xx_backend->compiler, in);
102 	ir3_kernel->backend = b;
103 	return &ir3_kernel->base;
104 }
105 
106 static void
a6xx_disassemble(struct kernel * kernel,FILE * out)107 a6xx_disassemble(struct kernel *kernel, FILE *out)
108 {
109 	ir3_asm_disassemble(to_ir3_kernel(kernel), out);
110 }
111 
112 static void
cs_program_emit(struct fd_ringbuffer * ring,struct kernel * kernel)113 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
114 {
115 	struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
116 	struct ir3_shader_variant *v = ir3_kernel->v;
117 	const struct ir3_info *i = &v->info;
118 	enum a3xx_threadsize thrsz = FOUR_QUADS;
119 
120 	OUT_PKT4(ring, REG_A6XX_SP_MODE_CONTROL, 1);
121 	OUT_RING(ring, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
122 
123 	OUT_PKT4(ring, REG_A6XX_HLSQ_INVALIDATE_CMD, 1);
124 	OUT_RING(ring, A6XX_HLSQ_INVALIDATE_CMD_VS_STATE |
125                    A6XX_HLSQ_INVALIDATE_CMD_HS_STATE |
126                    A6XX_HLSQ_INVALIDATE_CMD_DS_STATE |
127                    A6XX_HLSQ_INVALIDATE_CMD_GS_STATE |
128                    A6XX_HLSQ_INVALIDATE_CMD_FS_STATE |
129                    A6XX_HLSQ_INVALIDATE_CMD_CS_STATE |
130                    A6XX_HLSQ_INVALIDATE_CMD_CS_IBO |
131                    A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO);
132 
133 	unsigned constlen = align(v->constlen, 4);
134 	OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
135 	OUT_RING(ring, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) |
136 			A6XX_HLSQ_CS_CNTL_ENABLED);
137 
138 	OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
139 	OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
140 		A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
141 		A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
142 		A6XX_SP_CS_CONFIG_NSAMP(v->num_samp));    /* SP_VS_CONFIG */
143 	OUT_RING(ring, v->instrlen);                      /* SP_VS_INSTRLEN */
144 
145 	OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
146 	OUT_RING(ring, A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
147 		A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
148 		A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
149 		COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
150 		A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v->branchstack) |
151 		COND(v->need_pixlod, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE));
152 
153 	OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
154 	OUT_RING(ring, 0x41);
155 
156 	uint32_t local_invocation_id, work_group_id;
157 	local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
158 	work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
159 
160 	OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
161 	OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
162 		A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
163 		A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
164 		A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
165 	OUT_RING(ring, 0x2fc);             /* HLSQ_CS_UNKNOWN_B998 */
166 
167 	OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
168 	OUT_RELOC(ring, v->bo, 0, 0, 0);   /* SP_CS_OBJ_START_LO/HI */
169 
170 	OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
171 	OUT_RING(ring, v->instrlen);
172 
173 	OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
174 	OUT_RELOC(ring, v->bo, 0, 0, 0);
175 
176 	OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
177 	OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
178 		CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
179 		CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
180 		CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
181 		CP_LOAD_STATE6_0_NUM_UNIT(v->instrlen));
182 	OUT_RELOC(ring, v->bo, 0, 0, 0);
183 }
184 
185 static void
emit_const(struct fd_ringbuffer * ring,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)186 emit_const(struct fd_ringbuffer *ring, uint32_t regid,
187 		uint32_t sizedwords, const uint32_t *dwords)
188 {
189 	uint32_t align_sz;
190 
191 	debug_assert((regid % 4) == 0);
192 
193 	align_sz = align(sizedwords, 4);
194 
195 	OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
196 	OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid/4) |
197 		CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
198 		CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
199 		CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
200 		CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
201 	OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
202 	OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
203 
204 	for (uint32_t i = 0; i < sizedwords; i++) {
205 		OUT_RING(ring, dwords[i]);
206 	}
207 
208 	/* Zero-pad to multiple of 4 dwords */
209 	for (uint32_t i = sizedwords; i < align_sz; i++) {
210 		OUT_RING(ring, 0);
211 	}
212 }
213 
214 
215 static void
cs_const_emit(struct fd_ringbuffer * ring,struct kernel * kernel,uint32_t grid[3])216 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel, uint32_t grid[3])
217 {
218 	struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
219 	struct ir3_shader_variant *v = ir3_kernel->v;
220 
221 	const struct ir3_const_state *const_state = ir3_const_state(v);
222 	uint32_t base = const_state->offsets.immediate;
223 	int size = DIV_ROUND_UP(const_state->immediates_count, 4);
224 
225 	if (ir3_kernel->info.numwg != INVALID_REG) {
226 		assert((ir3_kernel->info.numwg & 0x3) == 0);
227 		int idx = ir3_kernel->info.numwg >> 2;
228 		const_state->immediates[idx * 4 + 0] = grid[0];
229 		const_state->immediates[idx * 4 + 1] = grid[1];
230 		const_state->immediates[idx * 4 + 2] = grid[2];
231 	}
232 
233 	/* truncate size to avoid writing constants that shader
234 	 * does not use:
235 	 */
236 	size = MIN2(size + base, v->constlen) - base;
237 
238 	/* convert out of vec4: */
239 	base *= 4;
240 	size *= 4;
241 
242 	if (size > 0) {
243 		emit_const(ring, base, size, const_state->immediates);
244 	}
245 }
246 
247 static void
cs_ibo_emit(struct fd_ringbuffer * ring,struct fd_submit * submit,struct kernel * kernel)248 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
249 		struct kernel *kernel)
250 {
251 	struct fd_ringbuffer *state =
252 		fd_submit_new_ringbuffer(submit,
253 				kernel->num_bufs * 16 * 4,
254 				FD_RINGBUFFER_STREAMING);
255 
256 	for (unsigned i = 0; i < kernel->num_bufs; i++) {
257 		/* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
258 		 * in units of elements:
259 		 */
260 		unsigned sz = kernel->buf_sizes[i];
261 		unsigned width  = sz & MASK(15);
262 		unsigned height = sz >> 15;
263 
264 		OUT_RING(state, A6XX_IBO_0_FMT(FMT6_32_UINT) |
265 			A6XX_IBO_0_TILE_MODE(0));
266 		OUT_RING(state, A6XX_IBO_1_WIDTH(width) |
267 			A6XX_IBO_1_HEIGHT(height));
268 		OUT_RING(state, A6XX_IBO_2_PITCH(0) |
269 			A6XX_IBO_2_UNK4 | A6XX_IBO_2_UNK31 |
270 			A6XX_IBO_2_TYPE(A6XX_TEX_1D));
271 		OUT_RING(state, A6XX_IBO_3_ARRAY_PITCH(0));
272 		OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
273 		OUT_RING(state, 0x00000000);
274 		OUT_RING(state, 0x00000000);
275 		OUT_RING(state, 0x00000000);
276 		OUT_RING(state, 0x00000000);
277 		OUT_RING(state, 0x00000000);
278 		OUT_RING(state, 0x00000000);
279 		OUT_RING(state, 0x00000000);
280 		OUT_RING(state, 0x00000000);
281 		OUT_RING(state, 0x00000000);
282 		OUT_RING(state, 0x00000000);
283 	}
284 
285 	OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
286 	OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
287 		CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
288 		CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
289 		CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
290 		CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
291 	OUT_RB(ring, state);
292 
293 	OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_LO, 2);
294 	OUT_RB(ring, state);
295 
296 	OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
297 	OUT_RING(ring, kernel->num_bufs);
298 
299 	fd_ringbuffer_del(state);
300 }
301 
302 static inline unsigned
event_write(struct fd_ringbuffer * ring,struct kernel * kernel,enum vgt_event_type evt,bool timestamp)303 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
304 		enum vgt_event_type evt, bool timestamp)
305 {
306 	unsigned seqno = 0;
307 
308 	OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
309 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
310 	if (timestamp) {
311 		struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
312 		struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
313 		seqno = ++a6xx_backend->seqno;
314 		OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));  /* ADDR_LO/HI */
315 		OUT_RING(ring, seqno);
316 	}
317 
318 	return seqno;
319 }
320 
321 static inline void
cache_flush(struct fd_ringbuffer * ring,struct kernel * kernel)322 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
323 {
324 	struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
325 	struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
326 	unsigned seqno;
327 
328 	seqno = event_write(ring, kernel, RB_DONE_TS, true);
329 
330 	OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
331 	OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
332 		CP_WAIT_REG_MEM_0_POLL_MEMORY);
333 	OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
334 	OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
335 	OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
336 	OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
337 
338 	seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
339 
340 	OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
341 	OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
342 	OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
343 	OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
344 }
345 
346 static void
a6xx_emit_grid(struct kernel * kernel,uint32_t grid[3],struct fd_submit * submit)347 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3], struct fd_submit *submit)
348 {
349 	struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
350 	struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
351 	struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(submit, 0,
352 			FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
353 
354 	cs_program_emit(ring, kernel);
355 	cs_const_emit(ring, kernel, grid);
356 	cs_ibo_emit(ring, submit, kernel);
357 
358 	OUT_PKT7(ring, CP_SET_MARKER, 1);
359 	OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
360 
361 	const unsigned *local_size = kernel->local_size;
362 	const unsigned *num_groups = grid;
363 
364 	unsigned work_dim = 0;
365 	for (int i = 0; i < 3; i++) {
366 		if (!grid[i])
367 			break;
368 		work_dim++;
369 	}
370 
371 	OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
372 	OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
373 		A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
374 		A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
375 		A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
376 	OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
377 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
378 	OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
379 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
380 	OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
381 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
382 
383 	OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
384 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_X */
385 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_Y */
386 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_Z */
387 
388 	if (a6xx_backend->num_perfcntrs > 0) {
389 		a6xx_backend->query_mem = fd_bo_new(a6xx_backend->dev,
390 			a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample),
391 			DRM_FREEDRENO_GEM_TYPE_KMEM, "query");
392 
393 		/* configure the performance counters to count the requested
394 		 * countables:
395 		 */
396 		for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
397 			const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
398 
399 			OUT_PKT4(ring, counter->select_reg, 1);
400 			OUT_RING(ring, counter->selector);
401 		}
402 
403 		OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
404 
405 		/* and snapshot the start values: */
406 		for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
407 			const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
408 
409 			OUT_PKT7(ring, CP_REG_TO_MEM, 3);
410 			OUT_RING(ring, CP_REG_TO_MEM_0_64B |
411 				CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
412 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
413 		}
414 	}
415 
416 	OUT_PKT7(ring, CP_EXEC_CS, 4);
417 	OUT_RING(ring, 0x00000000);
418 	OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
419 	OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
420 	OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
421 
422 	OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
423 
424 	if (a6xx_backend->num_perfcntrs > 0) {
425 		/* snapshot the end values: */
426 		for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
427 			const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
428 
429 			OUT_PKT7(ring, CP_REG_TO_MEM, 3);
430 			OUT_RING(ring, CP_REG_TO_MEM_0_64B |
431 				CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
432 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
433 		}
434 
435 		/* and compute the result: */
436 		for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
437 			/* result += stop - start: */
438 			OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
439 			OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE |
440 					CP_MEM_TO_MEM_0_NEG_C);
441 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result));     /* dst */
442 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result));      /* srcA */
443 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));        /* srcB */
444 			OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));       /* srcC */
445 		}
446 	}
447 
448 	cache_flush(ring, kernel);
449 }
450 
451 static void
a6xx_set_perfcntrs(struct backend * b,const struct perfcntr * perfcntrs,unsigned num_perfcntrs)452 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
453 		unsigned num_perfcntrs)
454 {
455 	struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
456 
457 	a6xx_backend->perfcntrs = perfcntrs;
458 	a6xx_backend->num_perfcntrs = num_perfcntrs;
459 }
460 
461 static void
a6xx_read_perfcntrs(struct backend * b,uint64_t * results)462 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
463 {
464 	struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
465 
466 	fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, DRM_FREEDRENO_PREP_READ);
467 	struct fd6_query_sample *samples = fd_bo_map(a6xx_backend->query_mem);
468 
469 	for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
470 		results[i] = samples[i].result;
471 	}
472 }
473 
474 struct backend *
a6xx_init(struct fd_device * dev,uint32_t gpu_id)475 a6xx_init(struct fd_device *dev, uint32_t gpu_id)
476 {
477 	struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
478 
479 	a6xx_backend->base = (struct backend) {
480 		.assemble = a6xx_assemble,
481 		.disassemble = a6xx_disassemble,
482 		.emit_grid = a6xx_emit_grid,
483 		.set_perfcntrs = a6xx_set_perfcntrs,
484 		.read_perfcntrs = a6xx_read_perfcntrs,
485 	};
486 
487 	a6xx_backend->compiler = ir3_compiler_create(dev, gpu_id);
488 	a6xx_backend->dev = dev;
489 
490 	a6xx_backend->control_mem = fd_bo_new(dev, 0x1000,
491 		DRM_FREEDRENO_GEM_TYPE_KMEM, "control");
492 
493 	return &a6xx_backend->base;
494 }
495