1 /*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3/ir3_compiler.h"
25
26 #include "util/u_math.h"
27
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31
32 #include "common/freedreno_dev_info.h"
33
34 #include "ir3_asm.h"
35 #include "main.h"
36
37 struct a6xx_backend {
38 struct backend base;
39
40 struct ir3_compiler *compiler;
41 struct fd_device *dev;
42
43 const struct fd_dev_info *info;
44
45 unsigned seqno;
46 struct fd_bo *control_mem;
47
48 struct fd_bo *query_mem;
49 const struct perfcntr *perfcntrs;
50 unsigned num_perfcntrs;
51 };
52 define_cast(backend, a6xx_backend);
53
54 /*
55 * Data structures shared with GPU:
56 */
57
58 /* This struct defines the layout of the fd6_context::control buffer: */
59 struct fd6_control {
60 uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
61 uint32_t _pad0;
62 volatile uint32_t vsc_overflow;
63 uint32_t _pad1;
64 /* flag set from cmdstream when VSC overflow detected: */
65 uint32_t vsc_scratch;
66 uint32_t _pad2;
67 uint32_t _pad3;
68 uint32_t _pad4;
69
70 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
71 struct {
72 uint32_t offset;
73 uint32_t pad[7];
74 } flush_base[4];
75 };
76
77 #define control_ptr(a6xx_backend, member) \
78 (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
79
80 struct PACKED fd6_query_sample {
81 uint64_t start;
82 uint64_t result;
83 uint64_t stop;
84 };
85
86 /* offset of a single field of an array of fd6_query_sample: */
87 #define query_sample_idx(a6xx_backend, idx, field) \
88 (a6xx_backend)->query_mem, \
89 (idx * sizeof(struct fd6_query_sample)) + \
90 offsetof(struct fd6_query_sample, field), \
91 0, 0
92
93 /*
94 * Backend implementation:
95 */
96
97 static struct kernel *
a6xx_assemble(struct backend * b,FILE * in)98 a6xx_assemble(struct backend *b, FILE *in)
99 {
100 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
101 struct ir3_kernel *ir3_kernel = ir3_asm_assemble(a6xx_backend->compiler, in);
102 ir3_kernel->backend = b;
103 return &ir3_kernel->base;
104 }
105
106 static void
a6xx_disassemble(struct kernel * kernel,FILE * out)107 a6xx_disassemble(struct kernel *kernel, FILE *out)
108 {
109 ir3_asm_disassemble(to_ir3_kernel(kernel), out);
110 }
111
112 static void
cs_program_emit(struct fd_ringbuffer * ring,struct kernel * kernel)113 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
114 {
115 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
116 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
117 struct ir3_shader_variant *v = ir3_kernel->v;
118 const struct ir3_info *i = &v->info;
119 enum a6xx_threadsize thrsz = i->double_threadsize ? THREAD128 : THREAD64;
120
121 OUT_PKT4(ring, REG_A6XX_SP_MODE_CONTROL, 1);
122 OUT_RING(ring, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
123
124 OUT_PKT4(ring, REG_A6XX_SP_PERFCTR_ENABLE, 1);
125 OUT_RING(ring, A6XX_SP_PERFCTR_ENABLE_CS);
126
127 OUT_PKT4(ring, REG_A6XX_SP_FLOAT_CNTL, 1);
128 OUT_RING(ring, 0);
129
130 OUT_PKT4(ring, REG_A6XX_HLSQ_INVALIDATE_CMD, 1);
131 OUT_RING(
132 ring,
133 A6XX_HLSQ_INVALIDATE_CMD_VS_STATE | A6XX_HLSQ_INVALIDATE_CMD_HS_STATE |
134 A6XX_HLSQ_INVALIDATE_CMD_DS_STATE | A6XX_HLSQ_INVALIDATE_CMD_GS_STATE |
135 A6XX_HLSQ_INVALIDATE_CMD_FS_STATE | A6XX_HLSQ_INVALIDATE_CMD_CS_STATE |
136 A6XX_HLSQ_INVALIDATE_CMD_CS_IBO | A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO);
137
138 unsigned constlen = align(v->constlen, 4);
139 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
140 OUT_RING(ring,
141 A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) | A6XX_HLSQ_CS_CNTL_ENABLED);
142
143 OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
144 OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
145 A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
146 A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
147 A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
148 OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
149
150 OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
151 OUT_RING(ring,
152 A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
153 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
154 A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
155 COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
156 COND(ir3_kernel->info.early_preamble, A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE) |
157 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)));
158
159 OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
160 OUT_RING(ring, 0x41);
161
162 if (a6xx_backend->info->a6xx.has_lpac) {
163 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_UNKNOWN_B9D0, 1);
164 OUT_RING(ring, A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(1) |
165 A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6);
166 }
167
168 uint32_t local_invocation_id, work_group_id;
169 local_invocation_id =
170 ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
171 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
172
173 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
174 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
175 A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
176 A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
177 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
178 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
179 A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
180
181 if (a6xx_backend->info->a6xx.has_lpac) {
182 OUT_PKT4(ring, REG_A6XX_SP_CS_CNTL_0, 2);
183 OUT_RING(ring, A6XX_SP_CS_CNTL_0_WGIDCONSTID(work_group_id) |
184 A6XX_SP_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
185 A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
186 A6XX_SP_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
187 OUT_RING(ring, A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
188 A6XX_SP_CS_CNTL_1_THREADSIZE(thrsz));
189 }
190
191 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
192 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
193
194 OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
195 OUT_RING(ring, v->instrlen);
196
197 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
198 OUT_RELOC(ring, v->bo, 0, 0, 0);
199
200 uint32_t shader_preload_size =
201 MIN2(v->instrlen, a6xx_backend->info->a6xx.instr_cache_size);
202 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
203 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
204 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
205 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
206 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
207 CP_LOAD_STATE6_0_NUM_UNIT(shader_preload_size));
208 OUT_RELOC(ring, v->bo, 0, 0, 0);
209
210 if (v->pvtmem_size > 0) {
211 uint32_t per_fiber_size = ALIGN(v->pvtmem_size, 512);
212 uint32_t per_sp_size =
213 ALIGN(per_fiber_size * a6xx_backend->info->a6xx.fibers_per_sp, 1 << 12);
214 uint32_t total_size = per_sp_size * a6xx_backend->info->num_sp_cores;
215
216 struct fd_bo *pvtmem = fd_bo_new(a6xx_backend->dev, total_size, 0, "pvtmem");
217 OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_PARAM, 4);
218 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(per_fiber_size));
219 OUT_RELOC(ring, pvtmem, 0, 0, 0);
220 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(per_sp_size) |
221 COND(v->pvtmem_per_wave,
222 A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));
223
224 OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET, 1);
225 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(per_sp_size));
226 }
227 }
228
229 static void
emit_const(struct fd_ringbuffer * ring,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)230 emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
231 const uint32_t *dwords)
232 {
233 uint32_t align_sz;
234
235 assert((regid % 4) == 0);
236
237 align_sz = align(sizedwords, 4);
238
239 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
240 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
241 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
242 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
243 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
244 CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
245 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
246 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
247
248 for (uint32_t i = 0; i < sizedwords; i++) {
249 OUT_RING(ring, dwords[i]);
250 }
251
252 /* Zero-pad to multiple of 4 dwords */
253 for (uint32_t i = sizedwords; i < align_sz; i++) {
254 OUT_RING(ring, 0);
255 }
256 }
257
258 static void
cs_const_emit(struct fd_ringbuffer * ring,struct kernel * kernel,uint32_t grid[3])259 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel,
260 uint32_t grid[3])
261 {
262 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
263 struct ir3_shader_variant *v = ir3_kernel->v;
264
265 const struct ir3_const_state *const_state = ir3_const_state(v);
266 uint32_t base = const_state->offsets.immediate;
267 int size = DIV_ROUND_UP(const_state->immediates_count, 4);
268
269 if (ir3_kernel->info.numwg != INVALID_REG) {
270 assert((ir3_kernel->info.numwg & 0x3) == 0);
271 int idx = ir3_kernel->info.numwg >> 2;
272 const_state->immediates[idx * 4 + 0] = grid[0];
273 const_state->immediates[idx * 4 + 1] = grid[1];
274 const_state->immediates[idx * 4 + 2] = grid[2];
275 }
276
277 for (int i = 0; i < MAX_BUFS; i++) {
278 if (kernel->buf_addr_regs[i] != INVALID_REG) {
279 assert((kernel->buf_addr_regs[i] & 0x3) == 0);
280 int idx = kernel->buf_addr_regs[i] >> 2;
281
282 uint64_t iova = fd_bo_get_iova(kernel->bufs[i]);
283
284 const_state->immediates[idx * 4 + 1] = iova >> 32;
285 const_state->immediates[idx * 4 + 0] = (iova << 32) >> 32;
286 }
287 }
288
289 /* truncate size to avoid writing constants that shader
290 * does not use:
291 */
292 size = MIN2(size + base, v->constlen) - base;
293
294 /* convert out of vec4: */
295 base *= 4;
296 size *= 4;
297
298 if (size > 0) {
299 emit_const(ring, base, size, const_state->immediates);
300 }
301 }
302
303 static void
cs_ibo_emit(struct fd_ringbuffer * ring,struct fd_submit * submit,struct kernel * kernel)304 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
305 struct kernel *kernel)
306 {
307 struct fd_ringbuffer *state = fd_submit_new_ringbuffer(
308 submit, kernel->num_bufs * 16 * 4, FD_RINGBUFFER_STREAMING);
309
310 for (unsigned i = 0; i < kernel->num_bufs; i++) {
311 /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
312 * in units of elements:
313 */
314 unsigned sz = kernel->buf_sizes[i];
315 unsigned width = sz & MASK(15);
316 unsigned height = sz >> 15;
317
318 OUT_RING(state, A6XX_TEX_CONST_0_FMT(FMT6_32_UINT) | A6XX_TEX_CONST_0_TILE_MODE(0));
319 OUT_RING(state, A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height));
320 OUT_RING(state, A6XX_TEX_CONST_2_PITCH(0) | A6XX_TEX_CONST_2_BUFFER |
321 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_BUFFER));
322 OUT_RING(state, A6XX_TEX_CONST_3_ARRAY_PITCH(0));
323 OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
324 OUT_RING(state, 0x00000000);
325 OUT_RING(state, 0x00000000);
326 OUT_RING(state, 0x00000000);
327 OUT_RING(state, 0x00000000);
328 OUT_RING(state, 0x00000000);
329 OUT_RING(state, 0x00000000);
330 OUT_RING(state, 0x00000000);
331 OUT_RING(state, 0x00000000);
332 OUT_RING(state, 0x00000000);
333 OUT_RING(state, 0x00000000);
334 }
335
336 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
337 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
338 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
339 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
340 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
341 CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
342 OUT_RB(ring, state);
343
344 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO, 2);
345 OUT_RB(ring, state);
346
347 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
348 OUT_RING(ring, kernel->num_bufs);
349
350 fd_ringbuffer_del(state);
351 }
352
353 static inline unsigned
event_write(struct fd_ringbuffer * ring,struct kernel * kernel,enum vgt_event_type evt,bool timestamp)354 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
355 enum vgt_event_type evt, bool timestamp)
356 {
357 unsigned seqno = 0;
358
359 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
360 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
361 if (timestamp) {
362 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
363 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
364 seqno = ++a6xx_backend->seqno;
365 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
366 OUT_RING(ring, seqno);
367 }
368
369 return seqno;
370 }
371
372 static inline void
cache_flush(struct fd_ringbuffer * ring,struct kernel * kernel)373 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
374 {
375 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
376 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
377 unsigned seqno;
378
379 seqno = event_write(ring, kernel, RB_DONE_TS, true);
380
381 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
382 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
383 CP_WAIT_REG_MEM_0_POLL_MEMORY);
384 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
385 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
386 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
387 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
388
389 seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
390
391 OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
392 OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
393 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
394 OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
395 }
396
397 static void
a6xx_emit_grid(struct kernel * kernel,uint32_t grid[3],struct fd_submit * submit)398 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3],
399 struct fd_submit *submit)
400 {
401 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
402 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
403 struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
404 submit, 0, FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
405
406 cs_program_emit(ring, kernel);
407 cs_const_emit(ring, kernel, grid);
408 cs_ibo_emit(ring, submit, kernel);
409
410 OUT_PKT7(ring, CP_SET_MARKER, 1);
411 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
412
413 const unsigned *local_size = kernel->local_size;
414 const unsigned *num_groups = grid;
415
416 unsigned work_dim = 0;
417 for (int i = 0; i < 3; i++) {
418 if (!grid[i])
419 break;
420 work_dim++;
421 }
422
423 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
424 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
425 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
426 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
427 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
428 OUT_RING(ring,
429 A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
430 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
431 OUT_RING(ring,
432 A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
433 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
434 OUT_RING(ring,
435 A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
436 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
437
438 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
439 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
440 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
441 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
442
443 if (a6xx_backend->num_perfcntrs > 0) {
444 a6xx_backend->query_mem = fd_bo_new(
445 a6xx_backend->dev,
446 a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample), 0, "query");
447
448 /* configure the performance counters to count the requested
449 * countables:
450 */
451 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
452 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
453
454 OUT_PKT4(ring, counter->select_reg, 1);
455 OUT_RING(ring, counter->selector);
456 }
457
458 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
459
460 /* and snapshot the start values: */
461 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
462 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
463
464 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
465 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
466 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
467 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
468 }
469 }
470
471 OUT_PKT7(ring, CP_EXEC_CS, 4);
472 OUT_RING(ring, 0x00000000);
473 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
474 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
475 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
476
477 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
478
479 if (a6xx_backend->num_perfcntrs > 0) {
480 /* snapshot the end values: */
481 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
482 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
483
484 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
485 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
486 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
487 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
488 }
489
490 /* and compute the result: */
491 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
492 /* result += stop - start: */
493 OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
494 OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
495 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
496 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
497 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop)); /* srcB */
498 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start)); /* srcC */
499 }
500 }
501
502 cache_flush(ring, kernel);
503 }
504
505 static void
a6xx_set_perfcntrs(struct backend * b,const struct perfcntr * perfcntrs,unsigned num_perfcntrs)506 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
507 unsigned num_perfcntrs)
508 {
509 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
510
511 a6xx_backend->perfcntrs = perfcntrs;
512 a6xx_backend->num_perfcntrs = num_perfcntrs;
513 }
514
515 static void
a6xx_read_perfcntrs(struct backend * b,uint64_t * results)516 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
517 {
518 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
519
520 fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, FD_BO_PREP_READ);
521 struct fd6_query_sample *samples = fd_bo_map(a6xx_backend->query_mem);
522
523 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
524 results[i] = samples[i].result;
525 }
526 }
527
528 struct backend *
a6xx_init(struct fd_device * dev,const struct fd_dev_id * dev_id)529 a6xx_init(struct fd_device *dev, const struct fd_dev_id *dev_id)
530 {
531 struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
532
533 a6xx_backend->base = (struct backend){
534 .assemble = a6xx_assemble,
535 .disassemble = a6xx_disassemble,
536 .emit_grid = a6xx_emit_grid,
537 .set_perfcntrs = a6xx_set_perfcntrs,
538 .read_perfcntrs = a6xx_read_perfcntrs,
539 };
540
541 a6xx_backend->compiler = ir3_compiler_create(dev, dev_id,
542 &(struct ir3_compiler_options){});
543 a6xx_backend->dev = dev;
544
545 a6xx_backend->info = fd_dev_info(dev_id);
546
547 a6xx_backend->control_mem =
548 fd_bo_new(dev, 0x1000, 0, "control");
549
550 return &a6xx_backend->base;
551 }
552