• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "ir3/ir3_compiler.h"
25 
26 #include "util/u_math.h"
27 
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31 
32 #include "common/freedreno_dev_info.h"
33 
34 #include "ir3_asm.h"
35 #include "main.h"
36 
37 #define FD_BO_NO_HARDPIN 1
38 #include "common/fd6_pack.h"
39 
40 struct a6xx_backend {
41    struct backend base;
42 
43    struct ir3_compiler *compiler;
44    struct fd_device *dev;
45 
46    const struct fd_dev_info *info;
47 
48    unsigned seqno;
49    struct fd_bo *control_mem;
50 
51    struct fd_bo *query_mem;
52    const struct perfcntr *perfcntrs;
53    unsigned num_perfcntrs;
54 };
55 define_cast(backend, a6xx_backend);
56 
57 /*
58  * Data structures shared with GPU:
59  */
60 
61 /* This struct defines the layout of the fd6_context::control buffer: */
62 struct fd6_control {
63    uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
64    uint32_t _pad0;
65    volatile uint32_t vsc_overflow;
66    uint32_t _pad1;
67    /* flag set from cmdstream when VSC overflow detected: */
68    uint32_t vsc_scratch;
69    uint32_t _pad2;
70    uint32_t _pad3;
71    uint32_t _pad4;
72 
73    /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
74    struct {
75       uint32_t offset;
76       uint32_t pad[7];
77    } flush_base[4];
78 };
79 
80 #define control_ptr(a6xx_backend, member)                                      \
81    (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
82 
83 struct PACKED fd6_query_sample {
84    uint64_t start;
85    uint64_t result;
86    uint64_t stop;
87 };
88 
89 /* offset of a single field of an array of fd6_query_sample: */
90 #define query_sample_idx(a6xx_backend, idx, field)                             \
91    (a6xx_backend)->query_mem,                                                  \
92       (idx * sizeof(struct fd6_query_sample)) +                                \
93          offsetof(struct fd6_query_sample, field),                             \
94       0, 0
95 
96 /*
97  * Backend implementation:
98  */
99 
100 static struct kernel *
a6xx_assemble(struct backend * b,FILE * in)101 a6xx_assemble(struct backend *b, FILE *in)
102 {
103    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
104    struct ir3_kernel *ir3_kernel = ir3_asm_assemble(a6xx_backend->compiler, in);
105    ir3_kernel->backend = b;
106    return &ir3_kernel->base;
107 }
108 
109 static void
a6xx_disassemble(struct kernel * kernel,FILE * out)110 a6xx_disassemble(struct kernel *kernel, FILE *out)
111 {
112    ir3_asm_disassemble(to_ir3_kernel(kernel), out);
113 }
114 
115 template<chip CHIP>
116 static void
cs_program_emit(struct fd_ringbuffer * ring,struct kernel * kernel)117 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
118 {
119    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
120    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
121    struct ir3_shader_variant *v = ir3_kernel->v;
122    const unsigned *local_size = kernel->local_size;
123    const struct ir3_info *i = &v->info;
124    enum a6xx_threadsize thrsz = i->double_threadsize ? THREAD128 : THREAD64;
125 
126    OUT_REG(ring, A6XX_SP_MODE_CONTROL(.constant_demotion_enable = true,
127                                       .isammode = ISAMMODE_GL,
128                                       .shared_consts_enable = false));
129 
130    OUT_PKT4(ring, REG_A6XX_SP_PERFCTR_ENABLE, 1);
131    OUT_RING(ring, A6XX_SP_PERFCTR_ENABLE_CS);
132 
133    OUT_PKT4(ring, REG_A6XX_SP_FLOAT_CNTL, 1);
134    OUT_RING(ring, 0);
135 
136    for (size_t i = 0; i < ARRAY_SIZE(a6xx_backend->info->a6xx.magic_raw); i++) {
137       auto magic_reg = a6xx_backend->info->a6xx.magic_raw[i];
138       if (!magic_reg.reg)
139          break;
140 
141       OUT_PKT4(ring, magic_reg.reg, 1);
142       OUT_RING(ring, magic_reg.value);
143    }
144 
145    OUT_REG(ring, HLSQ_INVALIDATE_CMD(CHIP,
146       .vs_state = true,
147       .hs_state = true,
148       .ds_state = true,
149       .gs_state = true,
150       .fs_state = true,
151       .cs_state = true,
152       .gfx_ibo = true,
153    ));
154 
155    unsigned constlen = align(v->constlen, 4);
156    OUT_REG(ring, HLSQ_CS_CNTL(CHIP, .constlen = constlen, .enabled = true, ));
157 
158    OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
159    OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
160                      A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
161                      A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
162                      A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
163    OUT_RING(ring, v->instrlen);                             /* SP_VS_INSTRLEN */
164 
165    OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
166    OUT_RING(ring,
167             A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
168                A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
169                A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
170                COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
171                COND(v->early_preamble, A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE) |
172                A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)));
173    if (CHIP == A7XX) {
174       OUT_REG(ring, HLSQ_FS_CNTL_0(CHIP, .threadsize = THREAD64));
175 
176       OUT_REG(ring, HLSQ_CONTROL_2_REG(CHIP, .dword = 0xfcfcfcfc),
177               HLSQ_CONTROL_3_REG(CHIP, .dword = 0xfcfcfcfc),
178               HLSQ_CONTROL_4_REG(CHIP, .dword = 0xfcfcfcfc),
179               HLSQ_CONTROL_5_REG(CHIP, .dword = 0x0000fc00), );
180    }
181 
182    uint32_t shared_size = MAX2(((int)v->shared_size - 1) / 1024, 1);
183    OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
184    OUT_RING(ring, A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(shared_size) |
185                   A6XX_SP_CS_UNKNOWN_A9B1_UNK6);
186 
187    if (CHIP == A6XX && a6xx_backend->info->a6xx.has_lpac) {
188       OUT_PKT4(ring, REG_A6XX_HLSQ_CS_UNKNOWN_B9D0, 1);
189       OUT_RING(ring, A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(1) |
190                         A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6);
191    }
192 
193    uint32_t local_invocation_id, work_group_id;
194    local_invocation_id =
195       ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
196    work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
197 
198    if (CHIP == A6XX) {
199       OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
200       OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
201                         A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
202                         A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
203                         A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
204       OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
205                         A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
206    } else {
207       unsigned tile_height = (local_size[1] % 8 == 0)   ? 3
208                              : (local_size[1] % 4 == 0) ? 5
209                              : (local_size[1] % 2 == 0) ? 9
210                                                         : 17;
211 
212       OUT_REG(ring,
213          HLSQ_CS_CNTL_1(CHIP,
214             .linearlocalidregid = regid(63, 0),
215             .threadsize = thrsz,
216             .workgrouprastorderzfirsten = true,
217             .wgtilewidth = 4,
218             .wgtileheight = tile_height,
219          )
220       );
221    }
222 
223    if (CHIP == A7XX || a6xx_backend->info->a6xx.has_lpac) {
224       OUT_PKT4(ring, REG_A6XX_SP_CS_CNTL_0, 1);
225       OUT_RING(ring, A6XX_SP_CS_CNTL_0_WGIDCONSTID(work_group_id) |
226                         A6XX_SP_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
227                         A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
228                         A6XX_SP_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
229       if (CHIP == A7XX) {
230          /* TODO allow the shader to control the tiling */
231          OUT_REG(ring,
232             SP_CS_CNTL_1(A7XX, .linearlocalidregid = regid(63, 0),
233                                .threadsize = thrsz,
234                                .workitemrastorder = WORKITEMRASTORDER_LINEAR));
235       } else {
236          OUT_REG(ring,
237             SP_CS_CNTL_1(CHIP, .linearlocalidregid = regid(63, 0),
238                                .threadsize = thrsz));
239       }
240    }
241 
242    OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
243    OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
244 
245    OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
246    OUT_RING(ring, v->instrlen);
247 
248    OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
249    OUT_RELOC(ring, v->bo, 0, 0, 0);
250 
251    uint32_t shader_preload_size =
252       MIN2(v->instrlen, a6xx_backend->info->a6xx.instr_cache_size);
253    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
254    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
255                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
256                      CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
257                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
258                      CP_LOAD_STATE6_0_NUM_UNIT(shader_preload_size));
259    OUT_RELOC(ring, v->bo, 0, 0, 0);
260 
261    if (v->pvtmem_size > 0) {
262       uint32_t per_fiber_size = v->pvtmem_size;
263       uint32_t per_sp_size =
264          ALIGN(per_fiber_size * a6xx_backend->info->fibers_per_sp, 1 << 12);
265       uint32_t total_size = per_sp_size * a6xx_backend->info->num_sp_cores;
266 
267       struct fd_bo *pvtmem = fd_bo_new(a6xx_backend->dev, total_size, 0, "pvtmem");
268       OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_PARAM, 4);
269       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(per_fiber_size));
270       OUT_RELOC(ring, pvtmem, 0, 0, 0);
271       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(per_sp_size) |
272                      COND(v->pvtmem_per_wave,
273                           A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));
274 
275       OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET, 1);
276       OUT_RING(ring, A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(per_sp_size));
277    }
278 }
279 
280 template<chip CHIP>
281 static void
emit_const(struct fd_ringbuffer * ring,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)282 emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
283            const uint32_t *dwords)
284 {
285    uint32_t align_sz;
286 
287    assert((regid % 4) == 0);
288 
289    align_sz = align(sizedwords, 4);
290 
291    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
292    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
293                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
294                      CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
295                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
296                      CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
297    OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
298    OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
299 
300    for (uint32_t i = 0; i < sizedwords; i++) {
301       OUT_RING(ring, dwords[i]);
302    }
303 
304    /* Zero-pad to multiple of 4 dwords */
305    for (uint32_t i = sizedwords; i < align_sz; i++) {
306       OUT_RING(ring, 0);
307    }
308 }
309 
310 template<chip CHIP>
311 static void
cs_const_emit(struct fd_ringbuffer * ring,struct kernel * kernel,uint32_t grid[3])312 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel,
313               uint32_t grid[3])
314 {
315    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
316    struct ir3_shader_variant *v = ir3_kernel->v;
317 
318    const struct ir3_const_state *const_state = ir3_const_state(v);
319    uint32_t base = const_state->allocs.max_const_offset_vec4;
320    int size = DIV_ROUND_UP(const_state->immediates_count, 4);
321 
322    if (ir3_kernel->info.numwg != INVALID_REG) {
323       assert((ir3_kernel->info.numwg & 0x3) == 0);
324       int idx = ir3_kernel->info.numwg >> 2;
325       const_state->immediates[idx * 4 + 0] = grid[0];
326       const_state->immediates[idx * 4 + 1] = grid[1];
327       const_state->immediates[idx * 4 + 2] = grid[2];
328    }
329 
330    for (int i = 0; i < MAX_BUFS; i++) {
331       if (kernel->buf_addr_regs[i] != INVALID_REG) {
332          assert((kernel->buf_addr_regs[i] & 0x3) == 0);
333          int idx = kernel->buf_addr_regs[i] >> 2;
334 
335          uint64_t iova = fd_bo_get_iova(kernel->bufs[i]);
336 
337          const_state->immediates[idx * 4 + 1] = iova >> 32;
338          const_state->immediates[idx * 4 + 0] = (iova << 32) >> 32;
339       }
340    }
341 
342    /* truncate size to avoid writing constants that shader
343     * does not use:
344     */
345    size = MIN2(size + base, v->constlen) - base;
346 
347    /* convert out of vec4: */
348    base *= 4;
349    size *= 4;
350 
351    if (size > 0) {
352       emit_const<CHIP>(ring, base, size, const_state->immediates);
353    }
354 }
355 
356 template<chip CHIP>
357 static void
cs_ibo_emit(struct fd_ringbuffer * ring,struct fd_submit * submit,struct kernel * kernel)358 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
359             struct kernel *kernel)
360 {
361    struct fd_ringbuffer *state = fd_submit_new_ringbuffer(
362       submit, kernel->num_bufs * 16 * 4, FD_RINGBUFFER_STREAMING);
363 
364    for (unsigned i = 0; i < kernel->num_bufs; i++) {
365       /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
366        * in units of elements:
367        */
368       unsigned sz = kernel->buf_sizes[i];
369       unsigned width = sz & MASK(15);
370       unsigned height = sz >> 15;
371 
372       OUT_RING(state, A6XX_TEX_CONST_0_FMT(FMT6_32_UINT) | A6XX_TEX_CONST_0_TILE_MODE(TILE6_LINEAR));
373       OUT_RING(state, A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height));
374       OUT_RING(state, A6XX_TEX_CONST_2_PITCH(0) |
375                       A6XX_TEX_CONST_2_STRUCTSIZETEXELS(1) |
376                       A6XX_TEX_CONST_2_TYPE(A6XX_TEX_BUFFER));
377       OUT_RING(state, A6XX_TEX_CONST_3_ARRAY_PITCH(0));
378       OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
379       OUT_RING(state, 0x00000000);
380       OUT_RING(state, 0x00000000);
381       OUT_RING(state, 0x00000000);
382       OUT_RING(state, 0x00000000);
383       OUT_RING(state, 0x00000000);
384       OUT_RING(state, 0x00000000);
385       OUT_RING(state, 0x00000000);
386       OUT_RING(state, 0x00000000);
387       OUT_RING(state, 0x00000000);
388       OUT_RING(state, 0x00000000);
389    }
390 
391    OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
392    OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
393                      CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
394                      CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
395                      CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
396                      CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
397    OUT_RB(ring, state);
398 
399    OUT_PKT4(ring, REG_A6XX_SP_CS_IBO, 2);
400    OUT_RB(ring, state);
401 
402    OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
403    OUT_RING(ring, kernel->num_bufs);
404 
405    fd_ringbuffer_del(state);
406 }
407 
408 template<chip CHIP>
409 static inline unsigned
event_write(struct fd_ringbuffer * ring,struct kernel * kernel,enum vgt_event_type evt,bool timestamp)410 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
411             enum vgt_event_type evt, bool timestamp)
412 {
413    unsigned seqno = 0;
414 
415    if (CHIP == A6XX) {
416       OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
417       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
418    } else {
419       OUT_PKT7(ring, CP_EVENT_WRITE7, timestamp ? 4 : 1);
420       OUT_RING(ring,
421          CP_EVENT_WRITE7_0_EVENT(evt) |
422             COND(timestamp, CP_EVENT_WRITE7_0_WRITE_ENABLED |
423                                CP_EVENT_WRITE7_0_WRITE_SRC(EV_WRITE_USER_32B)));
424    }
425 
426    if (timestamp) {
427       struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
428       struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
429       seqno = ++a6xx_backend->seqno;
430       OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
431       OUT_RING(ring, seqno);
432    }
433 
434    return seqno;
435 }
436 
437 template<chip CHIP>
438 static inline void
cache_flush(struct fd_ringbuffer * ring,struct kernel * kernel)439 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
440 {
441    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
442    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
443    unsigned seqno;
444 
445    seqno = event_write<CHIP>(ring, kernel, RB_DONE_TS, true);
446 
447    OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
448    OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
449                      CP_WAIT_REG_MEM_0_POLL(POLL_MEMORY));
450    OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
451    OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
452    OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
453    OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
454 
455    if (CHIP == A6XX) {
456       seqno = event_write<CHIP>(ring, kernel, CACHE_FLUSH_TS, true);
457 
458       OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
459       OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
460       OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
461       OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
462    } else {
463       event_write<CHIP>(ring, kernel, CACHE_FLUSH7, false);
464    }
465 }
466 
467 template<chip CHIP>
468 static void
a6xx_emit_grid(struct kernel * kernel,uint32_t grid[3],struct fd_submit * submit)469 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3],
470                struct fd_submit *submit)
471 {
472    struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
473    struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
474    struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
475       submit, 0,
476       (enum fd_ringbuffer_flags)(FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE));
477 
478    cs_program_emit<CHIP>(ring, kernel);
479    cs_const_emit<CHIP>(ring, kernel, grid);
480    cs_ibo_emit<CHIP>(ring, submit, kernel);
481 
482    OUT_PKT7(ring, CP_SET_MARKER, 1);
483    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
484 
485    const unsigned *local_size = kernel->local_size;
486    const unsigned *num_groups = grid;
487 
488    unsigned work_dim = 0;
489    for (int i = 0; i < 3; i++) {
490       if (!grid[i])
491          break;
492       work_dim++;
493    }
494 
495    OUT_REG(ring, HLSQ_CS_NDRANGE_0(CHIP,
496                     .kerneldim = work_dim,
497                     .localsizex = local_size[0] - 1,
498                     .localsizey = local_size[1] - 1,
499                     .localsizez = local_size[2] - 1,
500                  ));
501    if (CHIP == A7XX) {
502       OUT_REG(ring, A7XX_HLSQ_CS_LOCAL_SIZE(.localsizex = local_size[0] - 1,
503                                             .localsizey = local_size[1] - 1,
504                                             .localsizez = local_size[2] - 1, ));
505    }
506 
507    OUT_REG(ring, HLSQ_CS_NDRANGE_1(CHIP,
508                     .globalsize_x = local_size[0] * num_groups[0],
509                  ));
510    OUT_REG(ring, HLSQ_CS_NDRANGE_2(CHIP, 0));
511    OUT_REG(ring, HLSQ_CS_NDRANGE_3(CHIP,
512                     .globalsize_y = local_size[1] * num_groups[1],
513                  ));
514    OUT_REG(ring, HLSQ_CS_NDRANGE_4(CHIP, 0));
515    OUT_REG(ring, HLSQ_CS_NDRANGE_5(CHIP,
516                     .globalsize_z = local_size[2] * num_groups[2],
517                  ));
518    OUT_REG(ring, HLSQ_CS_NDRANGE_6(CHIP, 0));
519 
520    OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_X(CHIP, 1));
521    OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_Y(CHIP, 1));
522    OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_Z(CHIP, 1));
523 
524    if (a6xx_backend->num_perfcntrs > 0) {
525       a6xx_backend->query_mem = fd_bo_new(
526          a6xx_backend->dev,
527          a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample), 0, "query");
528 
529       /* configure the performance counters to count the requested
530        * countables:
531        */
532       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
533          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
534 
535          OUT_PKT4(ring, counter->select_reg, 1);
536          OUT_RING(ring, counter->selector);
537       }
538 
539       OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
540 
541       /* and snapshot the start values: */
542       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
543          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
544 
545          OUT_PKT7(ring, CP_REG_TO_MEM, 3);
546          OUT_RING(ring, CP_REG_TO_MEM_0_64B |
547                            CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
548          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
549       }
550    }
551 
552    OUT_PKT7(ring, CP_EXEC_CS, 4);
553    OUT_RING(ring, 0x00000000);
554    OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
555    OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
556    OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
557 
558    OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
559 
560    if (a6xx_backend->num_perfcntrs > 0) {
561       /* snapshot the end values: */
562       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
563          const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
564 
565          OUT_PKT7(ring, CP_REG_TO_MEM, 3);
566          OUT_RING(ring, CP_REG_TO_MEM_0_64B |
567                            CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
568          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
569       }
570 
571       /* and compute the result: */
572       for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
573          /* result += stop - start: */
574          OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
575          OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
576          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
577          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
578          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));   /* srcB */
579          OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));  /* srcC */
580       }
581    }
582 
583    cache_flush<CHIP>(ring, kernel);
584 }
585 
586 static void
a6xx_set_perfcntrs(struct backend * b,const struct perfcntr * perfcntrs,unsigned num_perfcntrs)587 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
588                    unsigned num_perfcntrs)
589 {
590    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
591 
592    a6xx_backend->perfcntrs = perfcntrs;
593    a6xx_backend->num_perfcntrs = num_perfcntrs;
594 }
595 
596 static void
a6xx_read_perfcntrs(struct backend * b,uint64_t * results)597 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
598 {
599    struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
600 
601    fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, FD_BO_PREP_READ);
602    struct fd6_query_sample *samples =
603       (struct fd6_query_sample *)fd_bo_map(a6xx_backend->query_mem);
604 
605    for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
606       results[i] = samples[i].result;
607    }
608 }
609 
610 template<chip CHIP>
611 struct backend *
a6xx_init(struct fd_device * dev,const struct fd_dev_id * dev_id)612 a6xx_init(struct fd_device *dev, const struct fd_dev_id *dev_id)
613 {
614    struct a6xx_backend *a6xx_backend =
615       (struct a6xx_backend *)calloc(1, sizeof(*a6xx_backend));
616 
617    a6xx_backend->base = (struct backend){
618       .assemble = a6xx_assemble,
619       .disassemble = a6xx_disassemble,
620       .emit_grid = a6xx_emit_grid<CHIP>,
621       .set_perfcntrs = a6xx_set_perfcntrs,
622       .read_perfcntrs = a6xx_read_perfcntrs,
623    };
624 
625    struct ir3_compiler_options compiler_options = {};
626    a6xx_backend->compiler =
627       ir3_compiler_create(dev, dev_id, fd_dev_info_raw(dev_id), &compiler_options);
628    a6xx_backend->dev = dev;
629 
630    a6xx_backend->info = fd_dev_info_raw(dev_id);
631 
632    a6xx_backend->control_mem =
633       fd_bo_new(dev, 0x1000, 0, "control");
634 
635    return &a6xx_backend->base;
636 }
637 
638 template
639 struct backend *a6xx_init<A6XX>(struct fd_device *dev, const struct fd_dev_id *dev_id);
640 
641 template
642 struct backend *a6xx_init<A7XX>(struct fd_device *dev, const struct fd_dev_id *dev_id);
643