• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "ir3/ir3_compiler.h"
25 
26 #include "util/u_math.h"
27 
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31 
32 #include "common/freedreno_dev_info.h"
33 
34 #include "ir3_asm.h"
35 #include "main.h"
36 
37 struct a6xx_backend {
38    struct backend base;
39 
40    struct ir3_compiler *compiler;
41    struct fd_device *dev;
42 
43    const struct fd_dev_info *info;
44 
45    unsigned seqno;
46    struct fd_bo *control_mem;
47 
48    struct fd_bo *query_mem;
49    const struct perfcntr *perfcntrs;
50    unsigned num_perfcntrs;
51 };
52 define_cast(backend, a6xx_backend);
53 
54 /*
55  * Data structures shared with GPU:
56  */
57 
58 /* This struct defines the layout of the fd6_context::control buffer: */
59 struct fd6_control {
60    uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
61    uint32_t _pad0;
62    volatile uint32_t vsc_overflow;
63    uint32_t _pad1;
64    /* flag set from cmdstream when VSC overflow detected: */
65    uint32_t vsc_scratch;
66    uint32_t _pad2;
67    uint32_t _pad3;
68    uint32_t _pad4;
69 
70    /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
71    struct {
72       uint32_t offset;
73       uint32_t pad[7];
74    } flush_base[4];
75 };
76 
77 #define control_ptr(a6xx_backend, member)                                      \
78    (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
79 
80 struct PACKED fd6_query_sample {
81    uint64_t start;
82    uint64_t result;
83    uint64_t stop;
84 };
85 
86 /* offset of a single field of an array of fd6_query_sample: */
87 #define query_sample_idx(a6xx_backend, idx, field)                             \
88    (a6xx_backend)->query_mem,                                                  \
89       (idx * sizeof(struct fd6_query_sample)) +                                \
90          offsetof(struct fd6_query_sample, field),                             \
91       0, 0
92 
93 /*
94  * Backend implementation:
95  */
96 
97 static struct kernel *
a6xx_assemble(struct backend * b,FILE * in)98 a6xx_assemble(struct backend *b, FILE *in)
99 {
100    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
101    struct ir3_kernel *ir3_kernel = ir3_asm_assemble(a6xx_backend->compiler, in);
102    ir3_kernel->backend = b;
103    return &ir3_kernel->base;
104 }
105 
106 static void
a6xx_disassemble(struct kernel * kernel,FILE * out)107 a6xx_disassemble(struct kernel *kernel, FILE *out)
108 {
109    ir3_asm_disassemble(to_ir3_kernel(kernel), out);
110 }
111 
112 static void
cs_program_emit(struct fd_ringbuffer * ring,struct kernel * kernel)113 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
114 {
115    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
116    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
117    struct ir3_shader_variant *v = ir3_kernel->v;
118    const struct ir3_info *i = &v->info;
119    enum a6xx_threadsize thrsz = i->double_threadsize ? THREAD128 : THREAD64;
120 
121    OUT_PKT4(ring, REG_A6XX_SP_MODE_CONTROL, 1);
122    OUT_RING(ring, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
123 
124    OUT_PKT4(ring, REG_A6XX_SP_PERFCTR_ENABLE, 1);
125    OUT_RING(ring, A6XX_SP_PERFCTR_ENABLE_CS);
126 
127    OUT_PKT4(ring, REG_A6XX_SP_FLOAT_CNTL, 1);
128    OUT_RING(ring, 0);
129 
130    OUT_PKT4(ring, REG_A6XX_HLSQ_INVALIDATE_CMD, 1);
131    OUT_RING(
132       ring,
133       A6XX_HLSQ_INVALIDATE_CMD_VS_STATE | A6XX_HLSQ_INVALIDATE_CMD_HS_STATE |
134          A6XX_HLSQ_INVALIDATE_CMD_DS_STATE | A6XX_HLSQ_INVALIDATE_CMD_GS_STATE |
135          A6XX_HLSQ_INVALIDATE_CMD_FS_STATE | A6XX_HLSQ_INVALIDATE_CMD_CS_STATE |
136          A6XX_HLSQ_INVALIDATE_CMD_CS_IBO | A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO);
137 
138    unsigned constlen = align(v->constlen, 4);
139    OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
140    OUT_RING(ring,
141             A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) | A6XX_HLSQ_CS_CNTL_ENABLED);
142 
143    OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
144    OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
145                      A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
146                      A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
147                      A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
148    OUT_RING(ring, v->instrlen);                             /* SP_VS_INSTRLEN */
149 
150    OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
151    OUT_RING(ring,
152             A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
153                A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
154                A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
155                COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
156                A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)));
157 
158    OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
159    OUT_RING(ring, 0x41);
160 
161    uint32_t local_invocation_id, work_group_id;
162    local_invocation_id =
163       ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
164    work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
165 
166    OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
167    OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
168                      A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
169                      A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
170                      A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
171    OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
172                      A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
173 
174    OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
175    OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
176 
177    OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
178    OUT_RING(ring, v->instrlen);
179 
180    OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
181    OUT_RELOC(ring, v->bo, 0, 0, 0);
182 
183    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
184    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
185                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
186                      CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
187                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
188                      CP_LOAD_STATE6_0_NUM_UNIT(v->instrlen));
189    OUT_RELOC(ring, v->bo, 0, 0, 0);
190 
191    if (v->pvtmem_size > 0) {
192       uint32_t per_fiber_size = ALIGN(v->pvtmem_size, 512);
193       uint32_t per_sp_size =
194          ALIGN(per_fiber_size * a6xx_backend->info->a6xx.fibers_per_sp, 1 << 12);
195       uint32_t total_size = per_sp_size * a6xx_backend->info->num_sp_cores;
196 
197       struct fd_bo *pvtmem = fd_bo_new(a6xx_backend->dev, total_size, 0, "pvtmem");
198       OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_PARAM, 4);
199       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(per_fiber_size));
200       OUT_RELOC(ring, pvtmem, 0, 0, 0);
201       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(per_sp_size) |
202                      COND(v->pvtmem_per_wave,
203                           A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));
204 
205       OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET, 1);
206       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(per_sp_size));
207    }
208 }
209 
210 static void
emit_const(struct fd_ringbuffer * ring,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)211 emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
212            const uint32_t *dwords)
213 {
214    uint32_t align_sz;
215 
216    debug_assert((regid % 4) == 0);
217 
218    align_sz = align(sizedwords, 4);
219 
220    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
221    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
222                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
223                      CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
224                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
225                      CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
226    OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
227    OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
228 
229    for (uint32_t i = 0; i < sizedwords; i++) {
230       OUT_RING(ring, dwords[i]);
231    }
232 
233    /* Zero-pad to multiple of 4 dwords */
234    for (uint32_t i = sizedwords; i < align_sz; i++) {
235       OUT_RING(ring, 0);
236    }
237 }
238 
239 static void
cs_const_emit(struct fd_ringbuffer * ring,struct kernel * kernel,uint32_t grid[3])240 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel,
241               uint32_t grid[3])
242 {
243    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
244    struct ir3_shader_variant *v = ir3_kernel->v;
245 
246    const struct ir3_const_state *const_state = ir3_const_state(v);
247    uint32_t base = const_state->offsets.immediate;
248    int size = DIV_ROUND_UP(const_state->immediates_count, 4);
249 
250    if (ir3_kernel->info.numwg != INVALID_REG) {
251       assert((ir3_kernel->info.numwg & 0x3) == 0);
252       int idx = ir3_kernel->info.numwg >> 2;
253       const_state->immediates[idx * 4 + 0] = grid[0];
254       const_state->immediates[idx * 4 + 1] = grid[1];
255       const_state->immediates[idx * 4 + 2] = grid[2];
256    }
257 
258    for (int i = 0; i < MAX_BUFS; i++) {
259       if (kernel->buf_addr_regs[i] != INVALID_REG) {
260          assert((kernel->buf_addr_regs[i] & 0x3) == 0);
261          int idx = kernel->buf_addr_regs[i] >> 2;
262 
263          uint64_t iova = fd_bo_get_iova(kernel->bufs[i]);
264 
265          const_state->immediates[idx * 4 + 1] = iova >> 32;
266          const_state->immediates[idx * 4 + 0] = (iova << 32) >> 32;
267       }
268    }
269 
270    /* truncate size to avoid writing constants that shader
271     * does not use:
272     */
273    size = MIN2(size + base, v->constlen) - base;
274 
275    /* convert out of vec4: */
276    base *= 4;
277    size *= 4;
278 
279    if (size > 0) {
280       emit_const(ring, base, size, const_state->immediates);
281    }
282 }
283 
284 static void
cs_ibo_emit(struct fd_ringbuffer * ring,struct fd_submit * submit,struct kernel * kernel)285 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
286             struct kernel *kernel)
287 {
288    struct fd_ringbuffer *state = fd_submit_new_ringbuffer(
289       submit, kernel->num_bufs * 16 * 4, FD_RINGBUFFER_STREAMING);
290 
291    for (unsigned i = 0; i < kernel->num_bufs; i++) {
292       /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
293        * in units of elements:
294        */
295       unsigned sz = kernel->buf_sizes[i];
296       unsigned width = sz & MASK(15);
297       unsigned height = sz >> 15;
298 
299       OUT_RING(state, A6XX_IBO_0_FMT(FMT6_32_UINT) | A6XX_IBO_0_TILE_MODE(0));
300       OUT_RING(state, A6XX_IBO_1_WIDTH(width) | A6XX_IBO_1_HEIGHT(height));
301       OUT_RING(state, A6XX_IBO_2_PITCH(0) | A6XX_IBO_2_UNK4 | A6XX_IBO_2_UNK31 |
302                          A6XX_IBO_2_TYPE(A6XX_TEX_1D));
303       OUT_RING(state, A6XX_IBO_3_ARRAY_PITCH(0));
304       OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
305       OUT_RING(state, 0x00000000);
306       OUT_RING(state, 0x00000000);
307       OUT_RING(state, 0x00000000);
308       OUT_RING(state, 0x00000000);
309       OUT_RING(state, 0x00000000);
310       OUT_RING(state, 0x00000000);
311       OUT_RING(state, 0x00000000);
312       OUT_RING(state, 0x00000000);
313       OUT_RING(state, 0x00000000);
314       OUT_RING(state, 0x00000000);
315    }
316 
317    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
318    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
319                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
320                      CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
321                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
322                      CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
323    OUT_RB(ring, state);
324 
325    OUT_PKT4(ring, REG_A6XX_SP_CS_IBO, 2);
326    OUT_RB(ring, state);
327 
328    OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
329    OUT_RING(ring, kernel->num_bufs);
330 
331    fd_ringbuffer_del(state);
332 }
333 
334 static inline unsigned
event_write(struct fd_ringbuffer * ring,struct kernel * kernel,enum vgt_event_type evt,bool timestamp)335 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
336             enum vgt_event_type evt, bool timestamp)
337 {
338    unsigned seqno = 0;
339 
340    OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
341    OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
342    if (timestamp) {
343       struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
344       struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
345       seqno = ++a6xx_backend->seqno;
346       OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
347       OUT_RING(ring, seqno);
348    }
349 
350    return seqno;
351 }
352 
353 static inline void
cache_flush(struct fd_ringbuffer * ring,struct kernel * kernel)354 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
355 {
356    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
357    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
358    unsigned seqno;
359 
360    seqno = event_write(ring, kernel, RB_DONE_TS, true);
361 
362    OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
363    OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
364                      CP_WAIT_REG_MEM_0_POLL_MEMORY);
365    OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
366    OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
367    OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
368    OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
369 
370    seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
371 
372    OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
373    OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
374    OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
375    OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
376 }
377 
378 static void
a6xx_emit_grid(struct kernel * kernel,uint32_t grid[3],struct fd_submit * submit)379 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3],
380                struct fd_submit *submit)
381 {
382    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
383    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
384    struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
385       submit, 0, FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
386 
387    cs_program_emit(ring, kernel);
388    cs_const_emit(ring, kernel, grid);
389    cs_ibo_emit(ring, submit, kernel);
390 
391    OUT_PKT7(ring, CP_SET_MARKER, 1);
392    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
393 
394    const unsigned *local_size = kernel->local_size;
395    const unsigned *num_groups = grid;
396 
397    unsigned work_dim = 0;
398    for (int i = 0; i < 3; i++) {
399       if (!grid[i])
400          break;
401       work_dim++;
402    }
403 
404    OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
405    OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
406                      A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
407                      A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
408                      A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
409    OUT_RING(ring,
410             A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
411    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
412    OUT_RING(ring,
413             A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
414    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
415    OUT_RING(ring,
416             A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
417    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
418 
419    OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
420    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
421    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
422    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
423 
424    if (a6xx_backend->num_perfcntrs > 0) {
425       a6xx_backend->query_mem = fd_bo_new(
426          a6xx_backend->dev,
427          a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample), 0, "query");
428 
429       /* configure the performance counters to count the requested
430        * countables:
431        */
432       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
433          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
434 
435          OUT_PKT4(ring, counter->select_reg, 1);
436          OUT_RING(ring, counter->selector);
437       }
438 
439       OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
440 
441       /* and snapshot the start values: */
442       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
443          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
444 
445          OUT_PKT7(ring, CP_REG_TO_MEM, 3);
446          OUT_RING(ring, CP_REG_TO_MEM_0_64B |
447                            CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
448          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
449       }
450    }
451 
452    OUT_PKT7(ring, CP_EXEC_CS, 4);
453    OUT_RING(ring, 0x00000000);
454    OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
455    OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
456    OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
457 
458    OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
459 
460    if (a6xx_backend->num_perfcntrs > 0) {
461       /* snapshot the end values: */
462       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
463          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
464 
465          OUT_PKT7(ring, CP_REG_TO_MEM, 3);
466          OUT_RING(ring, CP_REG_TO_MEM_0_64B |
467                            CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
468          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
469       }
470 
471       /* and compute the result: */
472       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
473          /* result += stop - start: */
474          OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
475          OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
476          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
477          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
478          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));   /* srcB */
479          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));  /* srcC */
480       }
481    }
482 
483    cache_flush(ring, kernel);
484 }
485 
486 static void
a6xx_set_perfcntrs(struct backend * b,const struct perfcntr * perfcntrs,unsigned num_perfcntrs)487 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
488                    unsigned num_perfcntrs)
489 {
490    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
491 
492    a6xx_backend->perfcntrs = perfcntrs;
493    a6xx_backend->num_perfcntrs = num_perfcntrs;
494 }
495 
496 static void
a6xx_read_perfcntrs(struct backend * b,uint64_t * results)497 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
498 {
499    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
500 
501    fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, FD_BO_PREP_READ);
502    struct fd6_query_sample *samples = fd_bo_map(a6xx_backend->query_mem);
503 
504    for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
505       results[i] = samples[i].result;
506    }
507 }
508 
509 struct backend *
a6xx_init(struct fd_device * dev,const struct fd_dev_id * dev_id)510 a6xx_init(struct fd_device *dev, const struct fd_dev_id *dev_id)
511 {
512    struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
513 
514    a6xx_backend->base = (struct backend){
515       .assemble = a6xx_assemble,
516       .disassemble = a6xx_disassemble,
517       .emit_grid = a6xx_emit_grid,
518       .set_perfcntrs = a6xx_set_perfcntrs,
519       .read_perfcntrs = a6xx_read_perfcntrs,
520    };
521 
522    a6xx_backend->compiler = ir3_compiler_create(dev, dev_id, false);
523    a6xx_backend->dev = dev;
524 
525    a6xx_backend->info = fd_dev_info(dev_id);
526 
527    a6xx_backend->control_mem =
528       fd_bo_new(dev, 0x1000, 0, "control");
529 
530    return &a6xx_backend->base;
531 }
532