• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  * Copyright 2020 Valve Corporation
4  *
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "ac_pm4.h"
9 #include "ac_sqtt.h"
10 
11 #include "sid.h"
12 #include "ac_gpu_info.h"
13 #include "util/u_math.h"
14 #include "util/os_time.h"
15 
16 #include "sid.h"
17 
18 uint32_t
ac_sqtt_get_buffer_align_shift(const struct radeon_info * info)19 ac_sqtt_get_buffer_align_shift(const struct radeon_info *info)
20 {
21    /* SQTT buffer VA is 36-bits on GFX8-11.5. */
22    return info->gfx_level >= GFX12 ? 0 : 12;
23 }
24 
25 uint64_t
ac_sqtt_get_info_offset(unsigned se)26 ac_sqtt_get_info_offset(unsigned se)
27 {
28    return sizeof(struct ac_sqtt_data_info) * se;
29 }
30 
31 uint64_t
ac_sqtt_get_data_offset(const struct radeon_info * rad_info,const struct ac_sqtt * data,unsigned se)32 ac_sqtt_get_data_offset(const struct radeon_info *rad_info, const struct ac_sqtt *data, unsigned se)
33 {
34    const uint32_t align_shift = ac_sqtt_get_buffer_align_shift(rad_info);
35    unsigned max_se = rad_info->max_se;
36    uint64_t data_offset;
37 
38    data_offset = align64(sizeof(struct ac_sqtt_data_info) * max_se, 1ull << align_shift);
39    data_offset += data->buffer_size * se;
40 
41    return data_offset;
42 }
43 
44 static uint64_t
ac_sqtt_get_info_va(uint64_t va,unsigned se)45 ac_sqtt_get_info_va(uint64_t va, unsigned se)
46 {
47    return va + ac_sqtt_get_info_offset(se);
48 }
49 
50 static uint64_t
ac_sqtt_get_data_va(const struct radeon_info * rad_info,const struct ac_sqtt * data,unsigned se)51 ac_sqtt_get_data_va(const struct radeon_info *rad_info, const struct ac_sqtt *data,
52                     unsigned se)
53 {
54    return data->buffer_va + ac_sqtt_get_data_offset(rad_info, data, se);
55 }
56 
57 void
ac_sqtt_init(struct ac_sqtt * data)58 ac_sqtt_init(struct ac_sqtt *data)
59 {
60    list_inithead(&data->rgp_pso_correlation.record);
61    simple_mtx_init(&data->rgp_pso_correlation.lock, mtx_plain);
62 
63    list_inithead(&data->rgp_loader_events.record);
64    simple_mtx_init(&data->rgp_loader_events.lock, mtx_plain);
65 
66    list_inithead(&data->rgp_code_object.record);
67    simple_mtx_init(&data->rgp_code_object.lock, mtx_plain);
68 
69    list_inithead(&data->rgp_clock_calibration.record);
70    simple_mtx_init(&data->rgp_clock_calibration.lock, mtx_plain);
71 
72    list_inithead(&data->rgp_queue_info.record);
73    simple_mtx_init(&data->rgp_queue_info.lock, mtx_plain);
74 
75    list_inithead(&data->rgp_queue_event.record);
76    simple_mtx_init(&data->rgp_queue_event.lock, mtx_plain);
77 }
78 
79 void
ac_sqtt_finish(struct ac_sqtt * data)80 ac_sqtt_finish(struct ac_sqtt *data)
81 {
82    assert(data->rgp_pso_correlation.record_count == 0);
83    simple_mtx_destroy(&data->rgp_pso_correlation.lock);
84 
85    assert(data->rgp_loader_events.record_count == 0);
86    simple_mtx_destroy(&data->rgp_loader_events.lock);
87 
88    assert(data->rgp_code_object.record_count == 0);
89    simple_mtx_destroy(&data->rgp_code_object.lock);
90 
91    assert(data->rgp_clock_calibration.record_count == 0);
92    simple_mtx_destroy(&data->rgp_clock_calibration.lock);
93 
94    assert(data->rgp_queue_info.record_count == 0);
95    simple_mtx_destroy(&data->rgp_queue_info.lock);
96 
97    assert(data->rgp_queue_event.record_count == 0);
98    simple_mtx_destroy(&data->rgp_queue_event.lock);
99 }
100 
101 bool
ac_is_sqtt_complete(const struct radeon_info * rad_info,const struct ac_sqtt * data,const struct ac_sqtt_data_info * info)102 ac_is_sqtt_complete(const struct radeon_info *rad_info, const struct ac_sqtt *data,
103                     const struct ac_sqtt_data_info *info)
104 {
105    if (rad_info->gfx_level >= GFX10) {
106       /* GFX10 doesn't have THREAD_TRACE_CNTR but it reports the number of
107        * dropped bytes per SE via THREAD_TRACE_DROPPED_CNTR. Though, this
108        * doesn't seem reliable because it might still report non-zero even if
109        * the SQTT buffer isn't full.
110        *
111        * The solution here is to compare the number of bytes written by the hw
112        * (in units of 32 bytes) to the SQTT buffer size. If it's equal, that
113        * means that the buffer is full and should be resized.
114        */
115       return !(info->cur_offset * 32 == data->buffer_size - 32);
116    }
117 
118    /* Otherwise, compare the current thread trace offset with the number
119     * of written bytes.
120     */
121    return info->cur_offset == info->gfx9_write_counter;
122 }
123 
124 bool
ac_sqtt_add_pso_correlation(struct ac_sqtt * sqtt,uint64_t pipeline_hash,uint64_t api_hash)125 ac_sqtt_add_pso_correlation(struct ac_sqtt *sqtt, uint64_t pipeline_hash, uint64_t api_hash)
126 {
127    struct rgp_pso_correlation *pso_correlation = &sqtt->rgp_pso_correlation;
128    struct rgp_pso_correlation_record *record;
129 
130    record = malloc(sizeof(struct rgp_pso_correlation_record));
131    if (!record)
132       return false;
133 
134    record->api_pso_hash = api_hash;
135    record->pipeline_hash[0] = pipeline_hash;
136    record->pipeline_hash[1] = pipeline_hash;
137    memset(record->api_level_obj_name, 0, sizeof(record->api_level_obj_name));
138 
139    simple_mtx_lock(&pso_correlation->lock);
140    list_addtail(&record->list, &pso_correlation->record);
141    pso_correlation->record_count++;
142    simple_mtx_unlock(&pso_correlation->lock);
143 
144    return true;
145 }
146 
147 bool
ac_sqtt_add_code_object_loader_event(struct ac_sqtt * sqtt,uint64_t pipeline_hash,uint64_t base_address)148 ac_sqtt_add_code_object_loader_event(struct ac_sqtt *sqtt, uint64_t pipeline_hash,
149                                      uint64_t base_address)
150 {
151    struct rgp_loader_events *loader_events = &sqtt->rgp_loader_events;
152    struct rgp_loader_events_record *record;
153 
154    record = malloc(sizeof(struct rgp_loader_events_record));
155    if (!record)
156       return false;
157 
158    record->loader_event_type = RGP_LOAD_TO_GPU_MEMORY;
159    record->reserved = 0;
160    record->base_address = base_address & 0xffffffffffff;
161    record->code_object_hash[0] = pipeline_hash;
162    record->code_object_hash[1] = pipeline_hash;
163    record->time_stamp = os_time_get_nano();
164 
165    simple_mtx_lock(&loader_events->lock);
166    list_addtail(&record->list, &loader_events->record);
167    loader_events->record_count++;
168    simple_mtx_unlock(&loader_events->lock);
169 
170    return true;
171 }
172 
173 bool
ac_sqtt_add_clock_calibration(struct ac_sqtt * sqtt,uint64_t cpu_timestamp,uint64_t gpu_timestamp)174 ac_sqtt_add_clock_calibration(struct ac_sqtt *sqtt, uint64_t cpu_timestamp, uint64_t gpu_timestamp)
175 {
176    struct rgp_clock_calibration *clock_calibration = &sqtt->rgp_clock_calibration;
177    struct rgp_clock_calibration_record *record;
178 
179    record = malloc(sizeof(struct rgp_clock_calibration_record));
180    if (!record)
181       return false;
182 
183    record->cpu_timestamp = cpu_timestamp;
184    record->gpu_timestamp = gpu_timestamp;
185 
186    simple_mtx_lock(&clock_calibration->lock);
187    list_addtail(&record->list, &clock_calibration->record);
188    clock_calibration->record_count++;
189    simple_mtx_unlock(&clock_calibration->lock);
190 
191    return true;
192 }
193 
194 /* See https://gitlab.freedesktop.org/mesa/mesa/-/issues/5260
195  * On some HW SQTT can hang if we're not in one of the profiling pstates. */
196 bool
ac_check_profile_state(const struct radeon_info * info)197 ac_check_profile_state(const struct radeon_info *info)
198 {
199    char path[128];
200    char data[128];
201    int n;
202 
203    if (!info->pci.valid)
204       return false; /* Unknown but optimistic. */
205 
206    snprintf(path, sizeof(path),
207             "/sys/bus/pci/devices/%04x:%02x:%02x.%x/power_dpm_force_performance_level",
208             info->pci.domain, info->pci.bus, info->pci.dev, info->pci.func);
209 
210    FILE *f = fopen(path, "r");
211    if (!f)
212       return false; /* Unknown but optimistic. */
213    n = fread(data, 1, sizeof(data) - 1, f);
214    fclose(f);
215    data[n] = 0;
216    return strstr(data, "profile") == NULL;
217 }
218 
219 union rgp_sqtt_marker_cb_id
ac_sqtt_get_next_cmdbuf_id(struct ac_sqtt * data,enum amd_ip_type ip_type)220 ac_sqtt_get_next_cmdbuf_id(struct ac_sqtt *data, enum amd_ip_type ip_type)
221 {
222    union rgp_sqtt_marker_cb_id cb_id = {0};
223 
224    cb_id.global_cb_id.cb_index =
225       p_atomic_inc_return(&data->cmdbuf_ids_per_queue[ip_type]);
226 
227    return cb_id;
228 }
229 
230 static bool
ac_sqtt_se_is_disabled(const struct radeon_info * info,unsigned se)231 ac_sqtt_se_is_disabled(const struct radeon_info *info, unsigned se)
232 {
233    /* No active CU on the SE means it is disabled. */
234    return info->cu_mask[se][0] == 0;
235 }
236 
237 static uint32_t
ac_sqtt_get_active_cu(const struct radeon_info * info,unsigned se)238 ac_sqtt_get_active_cu(const struct radeon_info *info, unsigned se)
239 {
240    uint32_t cu_index;
241 
242    if (info->gfx_level >= GFX11) {
243       /* GFX11 seems to operate on the last active CU. */
244       cu_index = util_last_bit(info->cu_mask[se][0]) - 1;
245    } else {
246       /* Default to the first active CU. */
247       cu_index = ffs(info->cu_mask[se][0]);
248    }
249 
250    return cu_index;
251 }
252 
253 bool
ac_sqtt_get_trace(struct ac_sqtt * data,const struct radeon_info * info,struct ac_sqtt_trace * sqtt_trace)254 ac_sqtt_get_trace(struct ac_sqtt *data, const struct radeon_info *info,
255                   struct ac_sqtt_trace *sqtt_trace)
256 {
257    unsigned max_se = info->max_se;
258    void *ptr = data->ptr;
259 
260    memset(sqtt_trace, 0, sizeof(*sqtt_trace));
261 
262    for (unsigned se = 0; se < max_se; se++) {
263       uint64_t info_offset = ac_sqtt_get_info_offset(se);
264       uint64_t data_offset = ac_sqtt_get_data_offset(info, data, se);
265       void *info_ptr = (uint8_t *)ptr + info_offset;
266       void *data_ptr = (uint8_t *)ptr + data_offset;
267       struct ac_sqtt_data_info *trace_info = (struct ac_sqtt_data_info *)info_ptr;
268       struct ac_sqtt_data_se data_se = {0};
269       int active_cu = ac_sqtt_get_active_cu(info, se);
270 
271       if (ac_sqtt_se_is_disabled(info, se))
272          continue;
273 
274       if (!ac_is_sqtt_complete(info, data, trace_info))
275          return false;
276 
277       data_se.data_ptr = data_ptr;
278       data_se.info = *trace_info;
279       data_se.shader_engine = se;
280 
281       /* RGP seems to expect units of WGP on GFX10+. */
282       data_se.compute_unit = info->gfx_level >= GFX10 ? (active_cu / 2) : active_cu;
283 
284       sqtt_trace->traces[sqtt_trace->num_traces] = data_se;
285       sqtt_trace->num_traces++;
286    }
287 
288    sqtt_trace->rgp_code_object = &data->rgp_code_object;
289    sqtt_trace->rgp_loader_events = &data->rgp_loader_events;
290    sqtt_trace->rgp_pso_correlation = &data->rgp_pso_correlation;
291    sqtt_trace->rgp_queue_info = &data->rgp_queue_info;
292    sqtt_trace->rgp_queue_event = &data->rgp_queue_event;
293    sqtt_trace->rgp_clock_calibration = &data->rgp_clock_calibration;
294 
295    return true;
296 }
297 
298 uint32_t
ac_sqtt_get_ctrl(const struct radeon_info * info,bool enable)299 ac_sqtt_get_ctrl(const struct radeon_info *info, bool enable)
300 {
301 
302    uint32_t ctrl;
303 
304    if (info->gfx_level >= GFX11) {
305       if (info->gfx_level >= GFX12) {
306          ctrl = S_0367B0_UTIL_TIMER_GFX12(1);
307       } else {
308          ctrl = S_0367B0_UTIL_TIMER_GFX11(1) | S_0367B0_RT_FREQ(2); /* 4096 clk */
309       }
310 
311       ctrl |= S_0367B0_MODE(enable) | S_0367B0_HIWATER(5) |
312               S_0367B0_DRAW_EVENT_EN(1) | S_0367B0_SPI_STALL_EN(1) |
313               S_0367B0_SQ_STALL_EN(1) | S_0367B0_REG_AT_HWM(2);
314    } else {
315       assert(info->gfx_level >= GFX10);
316 
317       ctrl = S_008D1C_MODE(enable) | S_008D1C_HIWATER(5) | S_008D1C_UTIL_TIMER(1) |
318              S_008D1C_RT_FREQ(2) | /* 4096 clk */ S_008D1C_DRAW_EVENT_EN(1) |
319              S_008D1C_REG_STALL_EN(1) | S_008D1C_SPI_STALL_EN(1) |
320              S_008D1C_SQ_STALL_EN(1) | S_008D1C_REG_DROP_ON_STALL(0);
321 
322       if (info->gfx_level == GFX10_3)
323          ctrl |= S_008D1C_LOWATER_OFFSET(4);
324 
325       if (info->has_sqtt_auto_flush_mode_bug)
326          ctrl |= S_008D1C_AUTO_FLUSH_MODE(1);
327    }
328 
329    return ctrl;
330 }
331 
332 uint32_t
ac_sqtt_get_shader_mask(const struct radeon_info * info)333 ac_sqtt_get_shader_mask(const struct radeon_info *info)
334 {
335    unsigned shader_mask = 0x7f; /* all shader stages */
336 
337    if (info->gfx_level >= GFX11) {
338       /* Disable unsupported hw shader stages */
339       shader_mask &= ~(0x02 /* VS */ | 0x08 /* ES */ | 0x20 /* LS */);
340    }
341 
342    return shader_mask;
343 }
344 
345 void
ac_sqtt_emit_start(const struct radeon_info * info,struct ac_pm4_state * pm4,const struct ac_sqtt * sqtt,bool is_compute_queue)346 ac_sqtt_emit_start(const struct radeon_info *info, struct ac_pm4_state *pm4,
347                    const struct ac_sqtt *sqtt, bool is_compute_queue)
348 {
349    const uint32_t align_shift = ac_sqtt_get_buffer_align_shift(info);
350    const uint32_t shifted_size = sqtt->buffer_size >> align_shift;
351    const unsigned shader_mask = ac_sqtt_get_shader_mask(info);
352    const unsigned max_se = info->max_se;
353 
354    for (unsigned se = 0; se < max_se; se++) {
355       uint64_t data_va = ac_sqtt_get_data_va(info, sqtt, se);
356       uint64_t shifted_va = data_va >> align_shift;
357       int active_cu = ac_sqtt_get_active_cu(info, se);
358 
359       if (ac_sqtt_se_is_disabled(info, se))
360          continue;
361 
362       /* Target SEx and SH0. */
363       ac_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX, S_030800_SE_INDEX(se) |
364                      S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
365 
366       if (info->gfx_level >= GFX11) {
367          /* Order seems important for the following 2 registers. */
368          if (info->gfx_level >= GFX12) {
369             ac_pm4_set_reg(pm4, R_036798_SQ_THREAD_TRACE_BUF0_SIZE,
370                            S_036798_SIZE(shifted_size));
371 
372             ac_pm4_set_reg(pm4, R_03679C_SQ_THREAD_TRACE_BUF0_BASE_LO, shifted_va);
373             ac_pm4_set_reg(pm4, R_0367A0_SQ_THREAD_TRACE_BUF0_BASE_HI, S_0367A0_BASE_HI(shifted_va >> 32));
374          } else {
375             ac_pm4_set_reg(pm4, R_0367A4_SQ_THREAD_TRACE_BUF0_SIZE,
376                            S_0367A4_SIZE(shifted_size) | S_0367A4_BASE_HI(shifted_va >> 32));
377 
378             ac_pm4_set_reg(pm4, R_0367A0_SQ_THREAD_TRACE_BUF0_BASE, shifted_va);
379          }
380 
381          ac_pm4_set_reg(pm4, R_0367B4_SQ_THREAD_TRACE_MASK,
382                         S_0367B4_WTYPE_INCLUDE(shader_mask) | S_0367B4_SA_SEL(0) |
383                         S_0367B4_WGP_SEL(active_cu / 2) | S_0367B4_SIMD_SEL(0));
384 
385          uint32_t sqtt_token_mask = S_0367B8_REG_INCLUDE(V_0367B8_REG_INCLUDE_SQDEC | V_0367B8_REG_INCLUDE_SHDEC |
386                                                          V_0367B8_REG_INCLUDE_GFXUDEC | V_0367B8_REG_INCLUDE_COMP |
387                                                          V_0367B8_REG_INCLUDE_CONTEXT | V_0367B8_REG_INCLUDE_CONFIG);
388 
389          /* Performance counters with SQTT are considered deprecated. */
390          uint32_t token_exclude = V_0367B8_TOKEN_EXCLUDE_PERF;
391 
392          if (!sqtt->instruction_timing_enabled) {
393             /* Reduce SQTT traffic when instruction timing isn't enabled. */
394             token_exclude |= V_0367B8_TOKEN_EXCLUDE_VMEMEXEC | V_0367B8_TOKEN_EXCLUDE_ALUEXEC |
395                              V_0367B8_TOKEN_EXCLUDE_VALUINST | V_0367B8_TOKEN_EXCLUDE_IMMEDIATE |
396                              V_0367B8_TOKEN_EXCLUDE_INST;
397          }
398 
399          if (info->gfx_level >= GFX12) {
400             sqtt_token_mask |= S_0367B8_TOKEN_EXCLUDE_GFX12(token_exclude) | S_0367B8_BOP_EVENTS_TOKEN_INCLUDE_GFX12(1);
401          } else {
402             sqtt_token_mask |= S_0367B8_TOKEN_EXCLUDE_GFX11(token_exclude) | S_0367B8_BOP_EVENTS_TOKEN_INCLUDE_GFX11(1);
403          }
404 
405          ac_pm4_set_reg(pm4, R_0367B8_SQ_THREAD_TRACE_TOKEN_MASK, sqtt_token_mask);
406 
407          /* Should be emitted last (it enables thread traces). */
408          ac_pm4_set_reg(pm4, R_0367B0_SQ_THREAD_TRACE_CTRL, ac_sqtt_get_ctrl(info, true));
409       } else if (info->gfx_level >= GFX10) {
410          /* Order seems important for the following 2 registers. */
411          ac_pm4_set_reg(pm4, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
412                         S_008D04_SIZE(shifted_size) | S_008D04_BASE_HI(shifted_va >> 32));
413 
414          ac_pm4_set_reg(pm4, R_008D00_SQ_THREAD_TRACE_BUF0_BASE, shifted_va);
415 
416          ac_pm4_set_reg(pm4, R_008D14_SQ_THREAD_TRACE_MASK,
417                         S_008D14_WTYPE_INCLUDE(shader_mask) | S_008D14_SA_SEL(0) |
418                         S_008D14_WGP_SEL(active_cu / 2) | S_008D14_SIMD_SEL(0));
419 
420          uint32_t sqtt_token_mask = S_008D18_REG_INCLUDE(V_008D18_REG_INCLUDE_SQDEC | V_008D18_REG_INCLUDE_SHDEC |
421                                                          V_008D18_REG_INCLUDE_GFXUDEC | V_008D18_REG_INCLUDE_COMP |
422                                                          V_008D18_REG_INCLUDE_CONTEXT | V_008D18_REG_INCLUDE_CONFIG);
423 
424          /* Performance counters with SQTT are considered deprecated. */
425          uint32_t token_exclude = V_008D18_TOKEN_EXCLUDE_PERF;
426 
427          if (!sqtt->instruction_timing_enabled) {
428             /* Reduce SQTT traffic when instruction timing isn't enabled. */
429             token_exclude |= V_008D18_TOKEN_EXCLUDE_VMEMEXEC | V_008D18_TOKEN_EXCLUDE_ALUEXEC |
430                              V_008D18_TOKEN_EXCLUDE_VALUINST | V_008D18_TOKEN_EXCLUDE_IMMEDIATE |
431                              V_008D18_TOKEN_EXCLUDE_INST;
432          }
433          sqtt_token_mask |=
434             S_008D18_TOKEN_EXCLUDE(token_exclude) | S_008D18_BOP_EVENTS_TOKEN_INCLUDE(info->gfx_level == GFX10_3);
435 
436          ac_pm4_set_reg(pm4, R_008D18_SQ_THREAD_TRACE_TOKEN_MASK, sqtt_token_mask);
437 
438          /* Should be emitted last (it enables thread traces). */
439          ac_pm4_set_reg(pm4, R_008D1C_SQ_THREAD_TRACE_CTRL, ac_sqtt_get_ctrl(info, true));
440       } else {
441          /* Order seems important for the following 4 registers. */
442          ac_pm4_set_reg(pm4, R_030CDC_SQ_THREAD_TRACE_BASE2, S_030CDC_ADDR_HI(shifted_va >> 32));
443 
444          ac_pm4_set_reg(pm4, R_030CC0_SQ_THREAD_TRACE_BASE, shifted_va);
445 
446          ac_pm4_set_reg(pm4, R_030CC4_SQ_THREAD_TRACE_SIZE, S_030CC4_SIZE(shifted_size));
447 
448          ac_pm4_set_reg(pm4, R_030CD4_SQ_THREAD_TRACE_CTRL, S_030CD4_RESET_BUFFER(1));
449 
450          uint32_t sqtt_mask = S_030CC8_CU_SEL(active_cu) | S_030CC8_SH_SEL(0) | S_030CC8_SIMD_EN(0xf) |
451                               S_030CC8_VM_ID_MASK(0) | S_030CC8_REG_STALL_EN(1) | S_030CC8_SPI_STALL_EN(1) |
452                               S_030CC8_SQ_STALL_EN(1);
453 
454          if (info->gfx_level < GFX9) {
455             sqtt_mask |= S_030CC8_RANDOM_SEED(0xffff);
456          }
457 
458          ac_pm4_set_reg(pm4, R_030CC8_SQ_THREAD_TRACE_MASK, sqtt_mask);
459 
460          /* Trace all tokens and registers. */
461          ac_pm4_set_reg(pm4, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
462                         S_030CCC_TOKEN_MASK(0xbfff) | S_030CCC_REG_MASK(0xff) | S_030CCC_REG_DROP_ON_STALL(0));
463 
464          /* Enable SQTT perf counters for all CUs. */
465          ac_pm4_set_reg(pm4, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
466                         S_030CD0_SH0_MASK(0xffff) | S_030CD0_SH1_MASK(0xffff));
467 
468          ac_pm4_set_reg(pm4, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2, 0xffffffff);
469 
470          ac_pm4_set_reg(pm4, R_030CEC_SQ_THREAD_TRACE_HIWATER, S_030CEC_HIWATER(4));
471 
472          if (info->gfx_level == GFX9) {
473             /* Reset thread trace status errors. */
474             ac_pm4_set_reg(pm4, R_030CE8_SQ_THREAD_TRACE_STATUS, S_030CE8_UTC_ERROR(0));
475          }
476 
477          /* Enable the thread trace mode. */
478          uint32_t sqtt_mode = S_030CD8_MASK_PS(1) | S_030CD8_MASK_VS(1) | S_030CD8_MASK_GS(1) | S_030CD8_MASK_ES(1) |
479                               S_030CD8_MASK_HS(1) | S_030CD8_MASK_LS(1) | S_030CD8_MASK_CS(1) |
480                               S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
481                               S_030CD8_MODE(1);
482 
483          if (info->gfx_level == GFX9) {
484             /* Count SQTT traffic in TCC perf counters. */
485             sqtt_mode |= S_030CD8_TC_PERF_EN(1);
486          }
487 
488          ac_pm4_set_reg(pm4, R_030CD8_SQ_THREAD_TRACE_MODE, sqtt_mode);
489       }
490    }
491 
492    /* Restore global broadcasting. */
493    ac_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX,  S_030800_SE_BROADCAST_WRITES(1) |
494                   S_030800_SH_BROADCAST_WRITES(1) | S_030800_INSTANCE_BROADCAST_WRITES(1));
495 
496    /* Start the thread trace with a different event based on the queue. */
497    if (is_compute_queue) {
498       ac_pm4_set_reg(pm4, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(1));
499    } else {
500       ac_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
501       ac_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_THREAD_TRACE_START) | EVENT_INDEX(0));
502    }
503 
504 }
505 
506 static const uint32_t gfx8_sqtt_info_regs[] = {
507    R_030CE4_SQ_THREAD_TRACE_WPTR,
508    R_030CE8_SQ_THREAD_TRACE_STATUS,
509    R_008E40_SQ_THREAD_TRACE_CNTR,
510 };
511 
512 static const uint32_t gfx9_sqtt_info_regs[] = {
513    R_030CE4_SQ_THREAD_TRACE_WPTR,
514    R_030CE8_SQ_THREAD_TRACE_STATUS,
515    R_030CF0_SQ_THREAD_TRACE_CNTR,
516 };
517 
518 static const uint32_t gfx10_sqtt_info_regs[] = {
519    R_008D10_SQ_THREAD_TRACE_WPTR,
520    R_008D20_SQ_THREAD_TRACE_STATUS,
521    R_008D24_SQ_THREAD_TRACE_DROPPED_CNTR,
522 };
523 
524 static const uint32_t gfx11_sqtt_info_regs[] = {
525    R_0367BC_SQ_THREAD_TRACE_WPTR,
526    R_0367D0_SQ_THREAD_TRACE_STATUS,
527    R_0367E8_SQ_THREAD_TRACE_DROPPED_CNTR,
528 };
529 
530 static void
ac_sqtt_copy_info_regs(const struct radeon_info * info,struct ac_pm4_state * pm4,const struct ac_sqtt * sqtt,uint32_t se_index)531 ac_sqtt_copy_info_regs(const struct radeon_info *info, struct ac_pm4_state *pm4,
532                        const struct ac_sqtt *sqtt, uint32_t se_index)
533 {
534    const uint32_t *sqtt_info_regs = NULL;
535 
536    if (info->gfx_level >= GFX11) {
537       sqtt_info_regs = gfx11_sqtt_info_regs;
538    } else if (info->gfx_level >= GFX10) {
539       sqtt_info_regs = gfx10_sqtt_info_regs;
540    } else if (info->gfx_level == GFX9) {
541       sqtt_info_regs = gfx9_sqtt_info_regs;
542    } else {
543       assert(info->gfx_level == GFX8);
544       sqtt_info_regs = gfx8_sqtt_info_regs;
545    }
546 
547    /* Get the VA where the info struct is stored for this SE. */
548    uint64_t info_va = ac_sqtt_get_info_va(sqtt->buffer_va, se_index);
549 
550    /* Copy back the info struct one DWORD at a time. */
551    for (unsigned i = 0; i < 3; i++) {
552       ac_pm4_cmd_add(pm4, PKT3(PKT3_COPY_DATA, 4, 0));
553       ac_pm4_cmd_add(pm4, COPY_DATA_SRC_SEL(COPY_DATA_PERF) | COPY_DATA_DST_SEL(COPY_DATA_TC_L2) | COPY_DATA_WR_CONFIRM);
554       ac_pm4_cmd_add(pm4, sqtt_info_regs[i] >> 2);
555       ac_pm4_cmd_add(pm4, 0); /* unused */
556       ac_pm4_cmd_add(pm4, (info_va + i * 4));
557       ac_pm4_cmd_add(pm4, (info_va + i * 4) >> 32);
558    }
559 
560    if (info->gfx_level == GFX11) {
561       /* On GFX11, SQ_THREAD_TRACE_WPTR is incremented from the "initial WPTR address" instead of 0.
562        * To get the number of bytes (in units of 32 bytes) written by SQTT, the workaround is to
563        * subtract SQ_THREAD_TRACE_WPTR from the "initial WPTR address" as follow:
564        *
565        * 1) get the current buffer base address for this SE
566        * 2) shift right by 5 bits because SQ_THREAD_TRACE_WPTR is 32-byte aligned
567        * 3) mask off the higher 3 bits because WPTR.OFFSET is 29 bits
568        */
569       uint64_t data_va = ac_sqtt_get_data_va(info, sqtt, se_index);
570       uint64_t shifted_data_va = (data_va >> 5);
571       uint32_t init_wptr_value = shifted_data_va & 0x1fffffff;
572 
573       ac_pm4_cmd_add(pm4, PKT3(PKT3_ATOMIC_MEM, 7, 0));
574       ac_pm4_cmd_add(pm4, ATOMIC_OP(TC_OP_ATOMIC_SUB_RTN_32));
575       ac_pm4_cmd_add(pm4, info_va);         /* addr lo */
576       ac_pm4_cmd_add(pm4, info_va >> 32);   /* addr hi */
577       ac_pm4_cmd_add(pm4, init_wptr_value); /* data lo */
578       ac_pm4_cmd_add(pm4, 0);               /* data hi */
579       ac_pm4_cmd_add(pm4, 0);               /* compare data lo */
580       ac_pm4_cmd_add(pm4, 0);               /* compare data hi */
581       ac_pm4_cmd_add(pm4, 0);               /* loop interval */
582    }
583 }
584 
585 void
ac_sqtt_emit_stop(const struct radeon_info * info,struct ac_pm4_state * pm4,bool is_compute_queue)586 ac_sqtt_emit_stop(const struct radeon_info *info, struct ac_pm4_state *pm4,
587                   bool is_compute_queue)
588 {
589    /* Stop the thread trace with a different event based on the queue. */
590    if (is_compute_queue) {
591       ac_pm4_set_reg(pm4, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(0));
592    } else {
593       ac_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
594       ac_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP) | EVENT_INDEX(0));
595    }
596 
597    ac_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
598    ac_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH) | EVENT_INDEX(0));
599 }
600 
601 void
ac_sqtt_emit_wait(const struct radeon_info * info,struct ac_pm4_state * pm4,const struct ac_sqtt * sqtt,bool is_compute_queue)602 ac_sqtt_emit_wait(const struct radeon_info *info, struct ac_pm4_state *pm4,
603                   const struct ac_sqtt *sqtt, bool is_compute_queue)
604 {
605    const unsigned max_se = info->max_se;
606 
607    for (unsigned se = 0; se < max_se; se++) {
608       if (ac_sqtt_se_is_disabled(info, se))
609          continue;
610 
611       /* Target SEi and SH0. */
612       ac_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX, S_030800_SE_INDEX(se) |
613                      S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
614 
615       if (info->gfx_level >= GFX11) {
616          /* Make sure to wait for the trace buffer. */
617          ac_pm4_cmd_add(pm4, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
618          ac_pm4_cmd_add(pm4, WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
619          ac_pm4_cmd_add(pm4, R_0367D0_SQ_THREAD_TRACE_STATUS >> 2); /* register */
620          ac_pm4_cmd_add(pm4, 0);
621          ac_pm4_cmd_add(pm4, 0); /* reference value */
622          ac_pm4_cmd_add(pm4, ~C_0367D0_FINISH_DONE);
623          ac_pm4_cmd_add(pm4, 4); /* poll interval */
624 
625          /* Disable the thread trace mode. */
626          ac_pm4_set_reg(pm4, R_0367B0_SQ_THREAD_TRACE_CTRL, ac_sqtt_get_ctrl(info, false));
627 
628          /* Wait for thread trace completion. */
629          ac_pm4_cmd_add(pm4, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
630          ac_pm4_cmd_add(pm4, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
631          ac_pm4_cmd_add(pm4, R_0367D0_SQ_THREAD_TRACE_STATUS >> 2); /* register */
632          ac_pm4_cmd_add(pm4, 0);
633          ac_pm4_cmd_add(pm4, 0);              /* reference value */
634          ac_pm4_cmd_add(pm4, ~C_0367D0_BUSY); /* mask */
635          ac_pm4_cmd_add(pm4, 4);              /* poll interval */
636       } else if (info->gfx_level >= GFX10) {
637          if (!info->has_sqtt_rb_harvest_bug) {
638             /* Make sure to wait for the trace buffer. */
639             ac_pm4_cmd_add(pm4, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
640             ac_pm4_cmd_add(pm4, WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
641             ac_pm4_cmd_add(pm4, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
642             ac_pm4_cmd_add(pm4, 0);
643             ac_pm4_cmd_add(pm4, 0); /* reference value */
644             ac_pm4_cmd_add(pm4, ~C_008D20_FINISH_DONE);
645             ac_pm4_cmd_add(pm4, 4); /* poll interval */
646          }
647 
648          /* Disable the thread trace mode. */
649          ac_pm4_set_reg(pm4, R_008D1C_SQ_THREAD_TRACE_CTRL, ac_sqtt_get_ctrl(info, false));
650 
651          /* Wait for thread trace completion. */
652          ac_pm4_cmd_add(pm4, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
653          ac_pm4_cmd_add(pm4, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
654          ac_pm4_cmd_add(pm4, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
655          ac_pm4_cmd_add(pm4, 0);
656          ac_pm4_cmd_add(pm4, 0);              /* reference value */
657          ac_pm4_cmd_add(pm4, ~C_008D20_BUSY); /* mask */
658          ac_pm4_cmd_add(pm4, 4);              /* poll interval */
659       } else {
660          /* Disable the thread trace mode. */
661          ac_pm4_set_reg(pm4, R_030CD8_SQ_THREAD_TRACE_MODE, S_030CD8_MODE(0));
662 
663          /* Wait for thread trace completion. */
664          ac_pm4_cmd_add(pm4, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
665          ac_pm4_cmd_add(pm4, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
666          ac_pm4_cmd_add(pm4, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
667          ac_pm4_cmd_add(pm4, 0);
668          ac_pm4_cmd_add(pm4, 0);              /* reference value */
669          ac_pm4_cmd_add(pm4, ~C_030CE8_BUSY); /* mask */
670          ac_pm4_cmd_add(pm4, 4);              /* poll interval */
671       }
672 
673       ac_sqtt_copy_info_regs(info, pm4, sqtt, se);
674    }
675 
676    /* Restore global broadcasting. */
677    ac_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX, S_030800_SE_BROADCAST_WRITES(1) |
678                   S_030800_SH_BROADCAST_WRITES(1) | S_030800_INSTANCE_BROADCAST_WRITES(1));
679 }
680