• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 
26 #include "radv_cs.h"
27 #include "radv_private.h"
28 #include "sid.h"
29 
30 #define SQTT_BUFFER_ALIGN_SHIFT 12
31 
32 bool
radv_is_instruction_timing_enabled(void)33 radv_is_instruction_timing_enabled(void)
34 {
35    return debug_get_bool_option("RADV_THREAD_TRACE_INSTRUCTION_TIMING", true);
36 }
37 
38 static bool
radv_se_is_disabled(struct radv_device * device,unsigned se)39 radv_se_is_disabled(struct radv_device *device, unsigned se)
40 {
41    /* No active CU on the SE means it is disabled. */
42    return device->physical_device->rad_info.cu_mask[se][0] == 0;
43 }
44 
45 static uint32_t
gfx10_get_thread_trace_ctrl(struct radv_device * device,bool enable)46 gfx10_get_thread_trace_ctrl(struct radv_device *device, bool enable)
47 {
48    uint32_t thread_trace_ctrl = S_008D1C_MODE(enable) | S_008D1C_HIWATER(5) |
49                                 S_008D1C_UTIL_TIMER(1) | S_008D1C_RT_FREQ(2) | /* 4096 clk */
50                                 S_008D1C_DRAW_EVENT_EN(1) | S_008D1C_REG_STALL_EN(1) |
51                                 S_008D1C_SPI_STALL_EN(1) | S_008D1C_SQ_STALL_EN(1) |
52                                 S_008D1C_REG_DROP_ON_STALL(0);
53 
54    if (device->physical_device->rad_info.gfx_level == GFX10_3)
55       thread_trace_ctrl |= S_008D1C_LOWATER_OFFSET(4);
56 
57    if (device->physical_device->rad_info.has_sqtt_auto_flush_mode_bug)
58       thread_trace_ctrl |= S_008D1C_AUTO_FLUSH_MODE(1);
59 
60    return thread_trace_ctrl;
61 }
62 
63 static void
radv_emit_wait_for_idle(struct radv_device * device,struct radeon_cmdbuf * cs,int family)64 radv_emit_wait_for_idle(struct radv_device *device, struct radeon_cmdbuf *cs, int family)
65 {
66    enum rgp_flush_bits sqtt_flush_bits = 0;
67    si_cs_emit_cache_flush(
68       cs, device->physical_device->rad_info.gfx_level, NULL, 0,
69       family == AMD_IP_COMPUTE && device->physical_device->rad_info.gfx_level >= GFX7,
70       (family == RADV_QUEUE_COMPUTE
71           ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH
72           : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
73          RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE | RADV_CMD_FLAG_INV_VCACHE |
74          RADV_CMD_FLAG_INV_L2,
75       &sqtt_flush_bits, 0);
76 }
77 
78 static void
radv_emit_thread_trace_start(struct radv_device * device,struct radeon_cmdbuf * cs,enum radv_queue_family qf)79 radv_emit_thread_trace_start(struct radv_device *device, struct radeon_cmdbuf *cs,
80                              enum radv_queue_family qf)
81 {
82    uint32_t shifted_size = device->thread_trace.buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
83    struct radeon_info *rad_info = &device->physical_device->rad_info;
84    unsigned max_se = rad_info->max_se;
85 
86    for (unsigned se = 0; se < max_se; se++) {
87       uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
88       uint64_t data_va = ac_thread_trace_get_data_va(rad_info, &device->thread_trace, va, se);
89       uint64_t shifted_va = data_va >> SQTT_BUFFER_ALIGN_SHIFT;
90       int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
91 
92       if (radv_se_is_disabled(device, se))
93          continue;
94 
95       /* Target SEx and SH0. */
96       radeon_set_uconfig_reg(
97          cs, R_030800_GRBM_GFX_INDEX,
98          S_030800_SE_INDEX(se) | S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
99 
100       if (device->physical_device->rad_info.gfx_level >= GFX10) {
101          /* Order seems important for the following 2 registers. */
102          radeon_set_privileged_config_reg(
103             cs, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
104             S_008D04_SIZE(shifted_size) | S_008D04_BASE_HI(shifted_va >> 32));
105 
106          radeon_set_privileged_config_reg(cs, R_008D00_SQ_THREAD_TRACE_BUF0_BASE, shifted_va);
107 
108          radeon_set_privileged_config_reg(
109             cs, R_008D14_SQ_THREAD_TRACE_MASK,
110             S_008D14_WTYPE_INCLUDE(0x7f) | /* all shader stages */
111                S_008D14_SA_SEL(0) | S_008D14_WGP_SEL(first_active_cu / 2) | S_008D14_SIMD_SEL(0));
112 
113          uint32_t thread_trace_token_mask = S_008D18_REG_INCLUDE(
114             V_008D18_REG_INCLUDE_SQDEC | V_008D18_REG_INCLUDE_SHDEC | V_008D18_REG_INCLUDE_GFXUDEC |
115             V_008D18_REG_INCLUDE_COMP | V_008D18_REG_INCLUDE_CONTEXT | V_008D18_REG_INCLUDE_CONFIG);
116 
117          /* Performance counters with SQTT are considered deprecated. */
118          uint32_t token_exclude = V_008D18_TOKEN_EXCLUDE_PERF;
119 
120          if (!radv_is_instruction_timing_enabled()) {
121             /* Reduce SQTT traffic when instruction timing isn't enabled. */
122             token_exclude |= V_008D18_TOKEN_EXCLUDE_VMEMEXEC |
123                              V_008D18_TOKEN_EXCLUDE_ALUEXEC |
124                              V_008D18_TOKEN_EXCLUDE_VALUINST |
125                              V_008D18_TOKEN_EXCLUDE_IMMEDIATE |
126                              V_008D18_TOKEN_EXCLUDE_INST;
127          }
128          thread_trace_token_mask |= S_008D18_TOKEN_EXCLUDE(token_exclude);
129 
130          radeon_set_privileged_config_reg(cs, R_008D18_SQ_THREAD_TRACE_TOKEN_MASK,
131                                           thread_trace_token_mask);
132 
133          /* Should be emitted last (it enables thread traces). */
134          radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL,
135                                           gfx10_get_thread_trace_ctrl(device, true));
136       } else {
137          /* Order seems important for the following 4 registers. */
138          radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
139                                 S_030CDC_ADDR_HI(shifted_va >> 32));
140 
141          radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE, shifted_va);
142 
143          radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE, S_030CC4_SIZE(shifted_size));
144 
145          radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL, S_030CD4_RESET_BUFFER(1));
146 
147          uint32_t thread_trace_mask = S_030CC8_CU_SEL(first_active_cu) | S_030CC8_SH_SEL(0) |
148                                       S_030CC8_SIMD_EN(0xf) | S_030CC8_VM_ID_MASK(0) |
149                                       S_030CC8_REG_STALL_EN(1) | S_030CC8_SPI_STALL_EN(1) |
150                                       S_030CC8_SQ_STALL_EN(1);
151 
152          if (device->physical_device->rad_info.gfx_level < GFX9) {
153             thread_trace_mask |= S_030CC8_RANDOM_SEED(0xffff);
154          }
155 
156          radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK, thread_trace_mask);
157 
158          /* Trace all tokens and registers. */
159          radeon_set_uconfig_reg(
160             cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
161             S_030CCC_TOKEN_MASK(0xbfff) | S_030CCC_REG_MASK(0xff) | S_030CCC_REG_DROP_ON_STALL(0));
162 
163          /* Enable SQTT perf counters for all CUs. */
164          radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
165                                 S_030CD0_SH0_MASK(0xffff) | S_030CD0_SH1_MASK(0xffff));
166 
167          radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2, 0xffffffff);
168 
169          radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER, S_030CEC_HIWATER(4));
170 
171          if (device->physical_device->rad_info.gfx_level == GFX9) {
172             /* Reset thread trace status errors. */
173             radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS, S_030CE8_UTC_ERROR(0));
174          }
175 
176          /* Enable the thread trace mode. */
177          uint32_t thread_trace_mode =
178             S_030CD8_MASK_PS(1) | S_030CD8_MASK_VS(1) | S_030CD8_MASK_GS(1) | S_030CD8_MASK_ES(1) |
179             S_030CD8_MASK_HS(1) | S_030CD8_MASK_LS(1) | S_030CD8_MASK_CS(1) |
180             S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
181             S_030CD8_MODE(1);
182 
183          if (device->physical_device->rad_info.gfx_level == GFX9) {
184             /* Count SQTT traffic in TCC perf counters. */
185             thread_trace_mode |= S_030CD8_TC_PERF_EN(1);
186          }
187 
188          radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE, thread_trace_mode);
189       }
190    }
191 
192    /* Restore global broadcasting. */
193    radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
194                           S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
195                              S_030800_INSTANCE_BROADCAST_WRITES(1));
196 
197    /* Start the thread trace with a different event based on the queue. */
198    if (qf == RADV_QUEUE_COMPUTE) {
199       radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(1));
200    } else {
201       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
202       radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_START) | EVENT_INDEX(0));
203    }
204 }
205 
206 static const uint32_t gfx8_thread_trace_info_regs[] = {
207    R_030CE4_SQ_THREAD_TRACE_WPTR,
208    R_030CE8_SQ_THREAD_TRACE_STATUS,
209    R_008E40_SQ_THREAD_TRACE_CNTR,
210 };
211 
212 static const uint32_t gfx9_thread_trace_info_regs[] = {
213    R_030CE4_SQ_THREAD_TRACE_WPTR,
214    R_030CE8_SQ_THREAD_TRACE_STATUS,
215    R_030CF0_SQ_THREAD_TRACE_CNTR,
216 };
217 
218 static const uint32_t gfx10_thread_trace_info_regs[] = {
219    R_008D10_SQ_THREAD_TRACE_WPTR,
220    R_008D20_SQ_THREAD_TRACE_STATUS,
221    R_008D24_SQ_THREAD_TRACE_DROPPED_CNTR,
222 };
223 
224 static void
radv_copy_thread_trace_info_regs(struct radv_device * device,struct radeon_cmdbuf * cs,unsigned se_index)225 radv_copy_thread_trace_info_regs(struct radv_device *device, struct radeon_cmdbuf *cs,
226                                  unsigned se_index)
227 {
228    const uint32_t *thread_trace_info_regs = NULL;
229 
230    if (device->physical_device->rad_info.gfx_level >= GFX10) {
231       thread_trace_info_regs = gfx10_thread_trace_info_regs;
232    } else if (device->physical_device->rad_info.gfx_level == GFX9) {
233       thread_trace_info_regs = gfx9_thread_trace_info_regs;
234    } else {
235       assert(device->physical_device->rad_info.gfx_level == GFX8);
236       thread_trace_info_regs = gfx8_thread_trace_info_regs;
237    }
238 
239    /* Get the VA where the info struct is stored for this SE. */
240    uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
241    uint64_t info_va = ac_thread_trace_get_info_va(va, se_index);
242 
243    /* Copy back the info struct one DWORD at a time. */
244    for (unsigned i = 0; i < 3; i++) {
245       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
246       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) | COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
247                          COPY_DATA_WR_CONFIRM);
248       radeon_emit(cs, thread_trace_info_regs[i] >> 2);
249       radeon_emit(cs, 0); /* unused */
250       radeon_emit(cs, (info_va + i * 4));
251       radeon_emit(cs, (info_va + i * 4) >> 32);
252    }
253 }
254 
255 static void
radv_emit_thread_trace_stop(struct radv_device * device,struct radeon_cmdbuf * cs,enum radv_queue_family qf)256 radv_emit_thread_trace_stop(struct radv_device *device, struct radeon_cmdbuf *cs,
257                             enum radv_queue_family qf)
258 {
259    unsigned max_se = device->physical_device->rad_info.max_se;
260 
261    /* Stop the thread trace with a different event based on the queue. */
262    if (qf == RADV_QUEUE_COMPUTE) {
263       radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(0));
264    } else {
265       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
266       radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP) | EVENT_INDEX(0));
267    }
268 
269    radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
270    radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH) | EVENT_INDEX(0));
271 
272    if (device->physical_device->rad_info.has_sqtt_rb_harvest_bug) {
273       /* Some chips with disabled RBs should wait for idle because FINISH_DONE doesn't work. */
274       radv_emit_wait_for_idle(device, cs, qf);
275    }
276 
277    for (unsigned se = 0; se < max_se; se++) {
278       if (radv_se_is_disabled(device, se))
279          continue;
280 
281       /* Target SEi and SH0. */
282       radeon_set_uconfig_reg(
283          cs, R_030800_GRBM_GFX_INDEX,
284          S_030800_SE_INDEX(se) | S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
285 
286       if (device->physical_device->rad_info.gfx_level >= GFX10) {
287          if (!device->physical_device->rad_info.has_sqtt_rb_harvest_bug) {
288             /* Make sure to wait for the trace buffer. */
289             radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
290             radeon_emit(
291                cs,
292                WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
293             radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
294             radeon_emit(cs, 0);
295             radeon_emit(cs, 0);                       /* reference value */
296             radeon_emit(cs, ~C_008D20_FINISH_DONE);
297             radeon_emit(cs, 4);                       /* poll interval */
298          }
299 
300          /* Disable the thread trace mode. */
301          radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL,
302                                           gfx10_get_thread_trace_ctrl(device, false));
303 
304          /* Wait for thread trace completion. */
305          radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
306          radeon_emit(
307             cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
308          radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
309          radeon_emit(cs, 0);
310          radeon_emit(cs, 0);                /* reference value */
311          radeon_emit(cs, ~C_008D20_BUSY); /* mask */
312          radeon_emit(cs, 4);                /* poll interval */
313       } else {
314          /* Disable the thread trace mode. */
315          radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE, S_030CD8_MODE(0));
316 
317          /* Wait for thread trace completion. */
318          radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
319          radeon_emit(
320             cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
321          radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
322          radeon_emit(cs, 0);
323          radeon_emit(cs, 0);                /* reference value */
324          radeon_emit(cs, ~C_030CE8_BUSY); /* mask */
325          radeon_emit(cs, 4);                /* poll interval */
326       }
327 
328       radv_copy_thread_trace_info_regs(device, cs, se);
329    }
330 
331    /* Restore global broadcasting. */
332    radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
333                           S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
334                              S_030800_INSTANCE_BROADCAST_WRITES(1));
335 }
336 
337 void
radv_emit_thread_trace_userdata(struct radv_cmd_buffer * cmd_buffer,const void * data,uint32_t num_dwords)338 radv_emit_thread_trace_userdata(struct radv_cmd_buffer *cmd_buffer, const void *data,
339                                 uint32_t num_dwords)
340 {
341    struct radv_device *device = cmd_buffer->device;
342    struct radeon_cmdbuf *cs = cmd_buffer->cs;
343    const uint32_t *dwords = (uint32_t *)data;
344 
345    /* SQTT user data packets aren't supported on SDMA queues. */
346    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER)
347       return;
348 
349    while (num_dwords > 0) {
350       uint32_t count = MIN2(num_dwords, 2);
351 
352       radeon_check_space(device->ws, cs, 2 + count);
353 
354       /* Without the perfctr bit the CP might not always pass the
355        * write on correctly. */
356       if (device->physical_device->rad_info.gfx_level >= GFX10)
357          radeon_set_uconfig_reg_seq_perfctr(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
358       else
359          radeon_set_uconfig_reg_seq(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
360       radeon_emit_array(cs, dwords, count);
361 
362       dwords += count;
363       num_dwords -= count;
364    }
365 }
366 
367 void
radv_emit_spi_config_cntl(struct radv_device * device,struct radeon_cmdbuf * cs,bool enable)368 radv_emit_spi_config_cntl(struct radv_device *device, struct radeon_cmdbuf *cs, bool enable)
369 {
370    if (device->physical_device->rad_info.gfx_level >= GFX9) {
371       uint32_t spi_config_cntl =
372          S_031100_GPR_WRITE_PRIORITY(0x2c688) | S_031100_EXP_PRIORITY_ORDER(3) |
373          S_031100_ENABLE_SQG_TOP_EVENTS(enable) | S_031100_ENABLE_SQG_BOP_EVENTS(enable);
374 
375       if (device->physical_device->rad_info.gfx_level >= GFX10)
376          spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
377 
378       radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
379    } else {
380       /* SPI_CONFIG_CNTL is a protected register on GFX6-GFX8. */
381       radeon_set_privileged_config_reg(
382          cs, R_009100_SPI_CONFIG_CNTL,
383          S_009100_ENABLE_SQG_TOP_EVENTS(enable) | S_009100_ENABLE_SQG_BOP_EVENTS(enable));
384    }
385 }
386 
387 void
radv_emit_inhibit_clockgating(struct radv_device * device,struct radeon_cmdbuf * cs,bool inhibit)388 radv_emit_inhibit_clockgating(struct radv_device *device, struct radeon_cmdbuf *cs, bool inhibit)
389 {
390    if (device->physical_device->rad_info.gfx_level >= GFX11)
391       return; /* not needed */
392 
393    if (device->physical_device->rad_info.gfx_level >= GFX10) {
394       radeon_set_uconfig_reg(cs, R_037390_RLC_PERFMON_CLK_CNTL,
395                              S_037390_PERFMON_CLOCK_STATE(inhibit));
396    } else if (device->physical_device->rad_info.gfx_level >= GFX8) {
397       radeon_set_uconfig_reg(cs, R_0372FC_RLC_PERFMON_CLK_CNTL,
398                              S_0372FC_PERFMON_CLOCK_STATE(inhibit));
399    }
400 }
401 
402 static bool
radv_thread_trace_init_bo(struct radv_device * device)403 radv_thread_trace_init_bo(struct radv_device *device)
404 {
405    unsigned max_se = device->physical_device->rad_info.max_se;
406    struct radeon_winsys *ws = device->ws;
407    VkResult result;
408    uint64_t size;
409 
410    /* The buffer size and address need to be aligned in HW regs. Align the
411     * size as early as possible so that we do all the allocation & addressing
412     * correctly. */
413    device->thread_trace.buffer_size =
414       align64(device->thread_trace.buffer_size, 1u << SQTT_BUFFER_ALIGN_SHIFT);
415 
416    /* Compute total size of the thread trace BO for all SEs. */
417    size = align64(sizeof(struct ac_thread_trace_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT);
418    size += device->thread_trace.buffer_size * (uint64_t)max_se;
419 
420    struct radeon_winsys_bo *bo = NULL;
421    result = ws->buffer_create(
422       ws, size, 4096, RADEON_DOMAIN_VRAM,
423       RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
424       RADV_BO_PRIORITY_SCRATCH, 0, &bo);
425    device->thread_trace.bo = bo;
426    if (result != VK_SUCCESS)
427       return false;
428 
429    result = ws->buffer_make_resident(ws, device->thread_trace.bo, true);
430    if (result != VK_SUCCESS)
431       return false;
432 
433    device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo);
434    if (!device->thread_trace.ptr)
435       return false;
436 
437    return true;
438 }
439 
440 static void
radv_thread_trace_finish_bo(struct radv_device * device)441 radv_thread_trace_finish_bo(struct radv_device *device)
442 {
443    struct radeon_winsys *ws = device->ws;
444 
445    if (unlikely(device->thread_trace.bo)) {
446       ws->buffer_make_resident(ws, device->thread_trace.bo, false);
447       ws->buffer_destroy(ws, device->thread_trace.bo);
448    }
449 }
450 
451 bool
radv_thread_trace_init(struct radv_device * device)452 radv_thread_trace_init(struct radv_device *device)
453 {
454    struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
455 
456    /* Default buffer size set to 32MB per SE. */
457    device->thread_trace.buffer_size =
458       radv_get_int_debug_option("RADV_THREAD_TRACE_BUFFER_SIZE", 32 * 1024 * 1024);
459    device->thread_trace.start_frame = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
460 
461    const char *trigger_file = getenv("RADV_THREAD_TRACE_TRIGGER");
462    if (trigger_file)
463       device->thread_trace.trigger_file = strdup(trigger_file);
464 
465    if (!radv_thread_trace_init_bo(device))
466       return false;
467 
468    if (!radv_device_acquire_performance_counters(device))
469       return false;
470 
471    list_inithead(&thread_trace_data->rgp_pso_correlation.record);
472    simple_mtx_init(&thread_trace_data->rgp_pso_correlation.lock, mtx_plain);
473 
474    list_inithead(&thread_trace_data->rgp_loader_events.record);
475    simple_mtx_init(&thread_trace_data->rgp_loader_events.lock, mtx_plain);
476 
477    list_inithead(&thread_trace_data->rgp_code_object.record);
478    simple_mtx_init(&thread_trace_data->rgp_code_object.lock, mtx_plain);
479 
480    return true;
481 }
482 
483 void
radv_thread_trace_finish(struct radv_device * device)484 radv_thread_trace_finish(struct radv_device *device)
485 {
486    struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
487    struct radeon_winsys *ws = device->ws;
488 
489    free(device->thread_trace.trigger_file);
490 
491    radv_thread_trace_finish_bo(device);
492 
493    for (unsigned i = 0; i < 2; i++) {
494       if (device->thread_trace.start_cs[i])
495          ws->cs_destroy(device->thread_trace.start_cs[i]);
496       if (device->thread_trace.stop_cs[i])
497          ws->cs_destroy(device->thread_trace.stop_cs[i]);
498    }
499 
500    assert(thread_trace_data->rgp_pso_correlation.record_count == 0);
501    simple_mtx_destroy(&thread_trace_data->rgp_pso_correlation.lock);
502 
503    assert(thread_trace_data->rgp_loader_events.record_count == 0);
504    simple_mtx_destroy(&thread_trace_data->rgp_loader_events.lock);
505 
506    assert(thread_trace_data->rgp_code_object.record_count == 0);
507    simple_mtx_destroy(&thread_trace_data->rgp_code_object.lock);
508 }
509 
510 static bool
radv_thread_trace_resize_bo(struct radv_device * device)511 radv_thread_trace_resize_bo(struct radv_device *device)
512 {
513    /* Destroy the previous thread trace BO. */
514    radv_thread_trace_finish_bo(device);
515 
516    /* Double the size of the thread trace buffer per SE. */
517    device->thread_trace.buffer_size *= 2;
518 
519    fprintf(stderr,
520            "Failed to get the thread trace because the buffer "
521            "was too small, resizing to %d KB\n",
522            device->thread_trace.buffer_size / 1024);
523 
524    /* Re-create the thread trace BO. */
525    return radv_thread_trace_init_bo(device);
526 }
527 
528 bool
radv_begin_thread_trace(struct radv_queue * queue)529 radv_begin_thread_trace(struct radv_queue *queue)
530 {
531    struct radv_device *device = queue->device;
532    enum radv_queue_family family = queue->state.qf;
533    struct radeon_winsys *ws = device->ws;
534    struct radeon_cmdbuf *cs;
535    VkResult result;
536 
537    /* Destroy the previous start CS and create a new one. */
538    if (device->thread_trace.start_cs[family]) {
539       ws->cs_destroy(device->thread_trace.start_cs[family]);
540       device->thread_trace.start_cs[family] = NULL;
541    }
542 
543    cs = ws->cs_create(ws, radv_queue_ring(queue));
544    if (!cs)
545       return false;
546 
547    switch (family) {
548    case RADV_QUEUE_GENERAL:
549       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
550       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
551       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
552       break;
553    case RADV_QUEUE_COMPUTE:
554       radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
555       radeon_emit(cs, 0);
556       break;
557    default:
558       unreachable("Incorrect queue family");
559       break;
560    }
561 
562    /* Make sure to wait-for-idle before starting SQTT. */
563    radv_emit_wait_for_idle(device, cs, family);
564 
565    /* Disable clock gating before starting SQTT. */
566    radv_emit_inhibit_clockgating(device, cs, true);
567 
568    /* Enable SQG events that collects thread trace data. */
569    radv_emit_spi_config_cntl(device, cs, true);
570 
571    radv_perfcounter_emit_spm_reset(cs);
572 
573    if (device->spm_trace.bo) {
574       /* Enable all shader stages by default. */
575       radv_perfcounter_emit_shaders(cs, 0x7f);
576 
577       radv_emit_spm_setup(device, cs);
578    }
579 
580    /* Start SQTT. */
581    radv_emit_thread_trace_start(device, cs, family);
582 
583    if (device->spm_trace.bo)
584       radv_perfcounter_emit_spm_start(device, cs, family);
585 
586    result = ws->cs_finalize(cs);
587    if (result != VK_SUCCESS) {
588       ws->cs_destroy(cs);
589       return false;
590    }
591 
592    device->thread_trace.start_cs[family] = cs;
593 
594    return radv_queue_internal_submit(queue, cs);
595 }
596 
597 bool
radv_end_thread_trace(struct radv_queue * queue)598 radv_end_thread_trace(struct radv_queue *queue)
599 {
600    struct radv_device *device = queue->device;
601    enum radv_queue_family family = queue->state.qf;
602    struct radeon_winsys *ws = device->ws;
603    struct radeon_cmdbuf *cs;
604    VkResult result;
605 
606    /* Destroy the previous stop CS and create a new one. */
607    if (queue->device->thread_trace.stop_cs[family]) {
608       ws->cs_destroy(device->thread_trace.stop_cs[family]);
609       device->thread_trace.stop_cs[family] = NULL;
610    }
611 
612    cs = ws->cs_create(ws, radv_queue_ring(queue));
613    if (!cs)
614       return false;
615 
616    switch (family) {
617    case RADV_QUEUE_GENERAL:
618       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
619       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
620       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
621       break;
622    case RADV_QUEUE_COMPUTE:
623       radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
624       radeon_emit(cs, 0);
625       break;
626    default:
627       unreachable("Incorrect queue family");
628       break;
629    }
630 
631    /* Make sure to wait-for-idle before stopping SQTT. */
632    radv_emit_wait_for_idle(device, cs, family);
633 
634    if (device->spm_trace.bo)
635       radv_perfcounter_emit_spm_stop(device, cs, family);
636 
637    /* Stop SQTT. */
638    radv_emit_thread_trace_stop(device, cs, family);
639 
640    radv_perfcounter_emit_spm_reset(cs);
641 
642    /* Restore previous state by disabling SQG events. */
643    radv_emit_spi_config_cntl(device, cs, false);
644 
645    /* Restore previous state by re-enabling clock gating. */
646    radv_emit_inhibit_clockgating(device, cs, false);
647 
648    result = ws->cs_finalize(cs);
649    if (result != VK_SUCCESS) {
650       ws->cs_destroy(cs);
651       return false;
652    }
653 
654    device->thread_trace.stop_cs[family] = cs;
655 
656    return radv_queue_internal_submit(queue, cs);
657 }
658 
659 bool
radv_get_thread_trace(struct radv_queue * queue,struct ac_thread_trace * thread_trace)660 radv_get_thread_trace(struct radv_queue *queue, struct ac_thread_trace *thread_trace)
661 {
662    struct radv_device *device = queue->device;
663    struct radeon_info *rad_info = &device->physical_device->rad_info;
664    unsigned max_se = rad_info->max_se;
665    void *thread_trace_ptr = device->thread_trace.ptr;
666 
667    memset(thread_trace, 0, sizeof(*thread_trace));
668 
669    for (unsigned se = 0; se < max_se; se++) {
670       uint64_t info_offset = ac_thread_trace_get_info_offset(se);
671       uint64_t data_offset = ac_thread_trace_get_data_offset(rad_info, &device->thread_trace, se);
672       void *info_ptr = (uint8_t *)thread_trace_ptr + info_offset;
673       void *data_ptr = (uint8_t *)thread_trace_ptr + data_offset;
674       struct ac_thread_trace_info *info = (struct ac_thread_trace_info *)info_ptr;
675       struct ac_thread_trace_se thread_trace_se = {0};
676       int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
677 
678       if (radv_se_is_disabled(device, se))
679          continue;
680 
681       if (!ac_is_thread_trace_complete(&device->physical_device->rad_info, &device->thread_trace,
682                                        info)) {
683          if (!radv_thread_trace_resize_bo(device)) {
684             fprintf(stderr, "Failed to resize the thread "
685                             "trace buffer.\n");
686             abort();
687          }
688          return false;
689       }
690 
691       thread_trace_se.data_ptr = data_ptr;
692       thread_trace_se.info = *info;
693       thread_trace_se.shader_engine = se;
694 
695       /* RGP seems to expect units of WGP on GFX10+. */
696       thread_trace_se.compute_unit = device->physical_device->rad_info.gfx_level >= GFX10
697                                         ? (first_active_cu / 2)
698                                         : first_active_cu;
699 
700       thread_trace->traces[thread_trace->num_traces] = thread_trace_se;
701       thread_trace->num_traces++;
702    }
703 
704    thread_trace->data = &device->thread_trace;
705    return true;
706 }
707