• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <dirent.h>
25 
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 
29 #if defined(MAJOR_IN_SYSMACROS)
30 #include <sys/sysmacros.h>
31 #elif defined(MAJOR_IN_MKDEV)
32 #include <sys/mkdev.h>
33 #endif
34 
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 
39 #ifndef HAVE_DIRENT_D_TYPE
40 #include <limits.h> // PATH_MAX
41 #endif
42 
43 #include "common/intel_gem.h"
44 #include "common/i915/intel_gem.h"
45 
46 #include "dev/intel_debug.h"
47 #include "dev/intel_device_info.h"
48 
49 #include "perf/i915/intel_perf.h"
50 #include "perf/xe/intel_perf.h"
51 #include "perf/intel_perf.h"
52 #include "perf/intel_perf_common.h"
53 #include "perf/intel_perf_regs.h"
54 #include "perf/intel_perf_mdapi.h"
55 #include "perf/intel_perf_metrics.h"
56 #include "perf/intel_perf_private.h"
57 
58 #include "perf/i915/intel_perf.h"
59 #include "perf/xe/intel_perf.h"
60 
61 #include "util/bitscan.h"
62 #include "util/macros.h"
63 #include "util/mesa-sha1.h"
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 
67 #define FILE_DEBUG_FLAG DEBUG_PERFMON
68 
69 static bool
is_dir_or_link(const struct dirent * entry,const char * parent_dir)70 is_dir_or_link(const struct dirent *entry, const char *parent_dir)
71 {
72 #ifdef HAVE_DIRENT_D_TYPE
73    return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
74 #else
75    struct stat st;
76    char path[PATH_MAX + 1];
77    snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
78    lstat(path, &st);
79    return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
80 #endif
81 }
82 
83 static bool
get_sysfs_dev_dir(struct intel_perf_config * perf,int fd)84 get_sysfs_dev_dir(struct intel_perf_config *perf, int fd)
85 {
86    struct stat sb;
87    int min, maj;
88    DIR *drmdir;
89    struct dirent *drm_entry;
90    int len;
91 
92    perf->sysfs_dev_dir[0] = '\0';
93 
94    if (INTEL_DEBUG(DEBUG_NO_OACONFIG))
95       return true;
96 
97    if (fstat(fd, &sb)) {
98       DBG("Failed to stat DRM fd\n");
99       return false;
100    }
101 
102    maj = major(sb.st_rdev);
103    min = minor(sb.st_rdev);
104 
105    if (!S_ISCHR(sb.st_mode)) {
106       DBG("DRM fd is not a character device as expected\n");
107       return false;
108    }
109 
110    len = snprintf(perf->sysfs_dev_dir,
111                   sizeof(perf->sysfs_dev_dir),
112                   "/sys/dev/char/%d:%d/device/drm", maj, min);
113    if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
114       DBG("Failed to concatenate sysfs path to drm device\n");
115       return false;
116    }
117 
118    drmdir = opendir(perf->sysfs_dev_dir);
119    if (!drmdir) {
120       DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
121       return false;
122    }
123 
124    while ((drm_entry = readdir(drmdir))) {
125       if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
126           strncmp(drm_entry->d_name, "card", 4) == 0)
127       {
128          len = snprintf(perf->sysfs_dev_dir,
129                         sizeof(perf->sysfs_dev_dir),
130                         "/sys/dev/char/%d:%d/device/drm/%s",
131                         maj, min, drm_entry->d_name);
132          closedir(drmdir);
133          if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
134             return false;
135          else
136             return true;
137       }
138    }
139 
140    closedir(drmdir);
141 
142    DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
143        maj, min);
144 
145    return false;
146 }
147 
148 static bool
read_sysfs_drm_device_file_uint64(struct intel_perf_config * perf,const char * file,uint64_t * value)149 read_sysfs_drm_device_file_uint64(struct intel_perf_config *perf,
150                                   const char *file,
151                                   uint64_t *value)
152 {
153    char buf[512];
154    int len;
155 
156    len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
157    if (len < 0 || len >= sizeof(buf)) {
158       DBG("Failed to concatenate sys filename to read u64 from\n");
159       return false;
160    }
161 
162    return read_file_uint64(buf, value);
163 }
164 
165 static bool
oa_config_enabled(struct intel_perf_config * perf,const struct intel_perf_query_info * query)166 oa_config_enabled(struct intel_perf_config *perf,
167                   const struct intel_perf_query_info *query) {
168    // Hide extended metrics unless enabled with env param
169    bool is_extended_metric = strncmp(query->name, "Ext", 3) == 0;
170 
171    return perf->enable_all_metrics || !is_extended_metric;
172 }
173 
174 static void
register_oa_config(struct intel_perf_config * perf,const struct intel_device_info * devinfo,const struct intel_perf_query_info * query,uint64_t config_id)175 register_oa_config(struct intel_perf_config *perf,
176                    const struct intel_device_info *devinfo,
177                    const struct intel_perf_query_info *query,
178                    uint64_t config_id)
179 {
180    if (!oa_config_enabled(perf, query))
181       return;
182 
183    struct intel_perf_query_info *registered_query =
184       intel_perf_append_query_info(perf, 0);
185 
186    *registered_query = *query;
187    registered_query->oa_metrics_set_id = config_id;
188    DBG("metric set registered: id = %" PRIu64", guid = %s\n",
189        registered_query->oa_metrics_set_id, query->guid);
190 }
191 
192 static void
enumerate_sysfs_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)193 enumerate_sysfs_metrics(struct intel_perf_config *perf,
194                         const struct intel_device_info *devinfo)
195 {
196    DIR *metricsdir = NULL;
197    struct dirent *metric_entry;
198    char buf[256];
199    int len;
200 
201    len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
202    if (len < 0 || len >= sizeof(buf)) {
203       DBG("Failed to concatenate path to sysfs metrics/ directory\n");
204       return;
205    }
206 
207    metricsdir = opendir(buf);
208    if (!metricsdir) {
209       DBG("Failed to open %s: %m\n", buf);
210       return;
211    }
212 
213    while ((metric_entry = readdir(metricsdir))) {
214       struct hash_entry *entry;
215       if (!is_dir_or_link(metric_entry, buf) ||
216           metric_entry->d_name[0] == '.')
217          continue;
218 
219       DBG("metric set: %s\n", metric_entry->d_name);
220       entry = _mesa_hash_table_search(perf->oa_metrics_table,
221                                       metric_entry->d_name);
222       if (entry) {
223          uint64_t id;
224          if (!intel_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
225             DBG("Failed to read metric set id from %s: %m", buf);
226             continue;
227          }
228 
229          register_oa_config(perf, devinfo,
230                             (const struct intel_perf_query_info *)entry->data, id);
231       } else
232          DBG("metric set not known by mesa (skipping)\n");
233    }
234 
235    closedir(metricsdir);
236 }
237 
238 static void
add_all_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)239 add_all_metrics(struct intel_perf_config *perf,
240                 const struct intel_device_info *devinfo)
241 {
242    hash_table_foreach(perf->oa_metrics_table, entry) {
243       const struct intel_perf_query_info *query = entry->data;
244       register_oa_config(perf, devinfo, query, 0);
245    }
246 }
247 
248 static bool
kernel_has_dynamic_config_support(struct intel_perf_config * perf,int fd)249 kernel_has_dynamic_config_support(struct intel_perf_config *perf, int fd)
250 {
251    switch (perf->devinfo->kmd_type) {
252    case INTEL_KMD_TYPE_I915:
253       return i915_has_dynamic_config_support(perf, fd);
254    case INTEL_KMD_TYPE_XE:
255       return true;
256    default:
257       unreachable("missing");
258       return false;
259    }
260 }
261 
262 bool
intel_perf_load_metric_id(struct intel_perf_config * perf_cfg,const char * guid,uint64_t * metric_id)263 intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
264                           const char *guid,
265                           uint64_t *metric_id)
266 {
267    char config_path[280];
268 
269    snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
270             perf_cfg->sysfs_dev_dir, guid);
271 
272    /* Don't recreate already loaded configs. */
273    return read_file_uint64(config_path, metric_id);
274 }
275 
276 static uint64_t
kmd_add_config(struct intel_perf_config * perf,int fd,const struct intel_perf_registers * config,const char * guid)277 kmd_add_config(struct intel_perf_config *perf, int fd,
278                const struct intel_perf_registers *config,
279                const char *guid)
280 {
281    switch (perf->devinfo->kmd_type) {
282    case INTEL_KMD_TYPE_I915:
283       return i915_add_config(perf, fd, config, guid);
284    case INTEL_KMD_TYPE_XE:
285       return xe_add_config(perf, fd, config, guid);
286    default:
287       unreachable("missing");
288       return 0;
289    }
290 }
291 
292 static void
init_oa_configs(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)293 init_oa_configs(struct intel_perf_config *perf, int fd,
294                 const struct intel_device_info *devinfo)
295 {
296    hash_table_foreach(perf->oa_metrics_table, entry) {
297       const struct intel_perf_query_info *query = entry->data;
298       uint64_t config_id;
299 
300       if (intel_perf_load_metric_id(perf, query->guid, &config_id)) {
301          DBG("metric set: %s (already loaded)\n", query->guid);
302          register_oa_config(perf, devinfo, query, config_id);
303          continue;
304       }
305 
306       uint64_t ret = kmd_add_config(perf, fd, &query->config, query->guid);
307       if (ret == 0) {
308          DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
309              query->name, query->guid, strerror(errno));
310          continue;
311       }
312 
313       register_oa_config(perf, devinfo, query, ret);
314       DBG("metric set: %s (added)\n", query->guid);
315    }
316 }
317 
318 static void
compute_topology_builtins(struct intel_perf_config * perf)319 compute_topology_builtins(struct intel_perf_config *perf)
320 {
321    const struct intel_device_info *devinfo = perf->devinfo;
322 
323    perf->sys_vars.slice_mask = devinfo->slice_masks;
324    perf->sys_vars.n_eu_slices = devinfo->num_slices;
325    perf->sys_vars.n_l3_banks = devinfo->l3_banks;
326    perf->sys_vars.n_l3_nodes = devinfo->l3_banks / 4;
327    perf->sys_vars.n_sq_idis =  devinfo->num_slices;
328 
329    perf->sys_vars.n_eu_slice0123 = 0;
330    for (int s = 0; s < MIN2(4, devinfo->max_slices); s++) {
331       if (!intel_device_info_slice_available(devinfo, s))
332          continue;
333 
334       for (int ss = 0; ss < devinfo->max_subslices_per_slice; ss++) {
335          if (!intel_device_info_subslice_available(devinfo, s, ss))
336             continue;
337 
338          for (int eu = 0; eu < devinfo->max_eus_per_subslice; eu++) {
339             if (intel_device_info_eu_available(devinfo, s, ss, eu))
340                perf->sys_vars.n_eu_slice0123++;
341          }
342       }
343    }
344 
345    perf->sys_vars.n_eu_sub_slices = intel_device_info_subslice_total(devinfo);
346    perf->sys_vars.n_eus = intel_device_info_eu_total(devinfo);
347 
348    /* The subslice mask builtin contains bits for all slices. Prior to Gfx11
349     * it had groups of 3bits for each slice, on Gfx11 and above it's 8bits for
350     * each slice.
351     *
352     * Ideally equations would be updated to have a slice/subslice query
353     * function/operator.
354     */
355    perf->sys_vars.subslice_mask = 0;
356 
357    int bits_per_subslice = devinfo->ver >= 11 ? 8 : 3;
358 
359    for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
360       for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
361          if (intel_device_info_subslice_available(devinfo, s, ss))
362             perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
363       }
364    }
365 }
366 
367 static bool
init_oa_sys_vars(struct intel_perf_config * perf,bool use_register_snapshots)368 init_oa_sys_vars(struct intel_perf_config *perf,
369                  bool use_register_snapshots)
370 {
371    uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
372 
373    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
374       const char *min_file, *max_file;
375 
376       switch (perf->devinfo->kmd_type) {
377       case INTEL_KMD_TYPE_I915:
378          min_file = "gt_min_freq_mhz";
379          max_file = "gt_max_freq_mhz";
380          break;
381       case INTEL_KMD_TYPE_XE:
382          min_file = "device/tile0/gt0/freq0/min_freq";
383          max_file = "device/tile0/gt0/freq0/max_freq";
384          break;
385       default:
386          unreachable("missing");
387          return false;
388       }
389 
390       if (!read_sysfs_drm_device_file_uint64(perf, min_file, &min_freq_mhz))
391          return false;
392 
393       if (!read_sysfs_drm_device_file_uint64(perf, max_file, &max_freq_mhz))
394          return false;
395    } else {
396       min_freq_mhz = 300;
397       max_freq_mhz = 1000;
398    }
399 
400    memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
401    perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
402    perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
403    perf->sys_vars.query_mode = use_register_snapshots;
404    compute_topology_builtins(perf);
405 
406    return true;
407 }
408 
409 typedef void (*perf_register_oa_queries_t)(struct intel_perf_config *);
410 
411 static perf_register_oa_queries_t
get_register_queries_function(const struct intel_device_info * devinfo)412 get_register_queries_function(const struct intel_device_info *devinfo)
413 {
414    switch (devinfo->platform) {
415    case INTEL_PLATFORM_HSW:
416       return intel_oa_register_queries_hsw;
417    case INTEL_PLATFORM_CHV:
418       return intel_oa_register_queries_chv;
419    case INTEL_PLATFORM_BDW:
420       return intel_oa_register_queries_bdw;
421    case INTEL_PLATFORM_BXT:
422       return intel_oa_register_queries_bxt;
423    case INTEL_PLATFORM_SKL:
424       if (devinfo->gt == 2)
425          return intel_oa_register_queries_sklgt2;
426       if (devinfo->gt == 3)
427          return intel_oa_register_queries_sklgt3;
428       if (devinfo->gt == 4)
429          return intel_oa_register_queries_sklgt4;
430       return NULL;
431    case INTEL_PLATFORM_KBL:
432       if (devinfo->gt == 2)
433          return intel_oa_register_queries_kblgt2;
434       if (devinfo->gt == 3)
435          return intel_oa_register_queries_kblgt3;
436       return NULL;
437    case INTEL_PLATFORM_GLK:
438       return intel_oa_register_queries_glk;
439    case INTEL_PLATFORM_CFL:
440       if (devinfo->gt == 2)
441          return intel_oa_register_queries_cflgt2;
442       if (devinfo->gt == 3)
443          return intel_oa_register_queries_cflgt3;
444       return NULL;
445    case INTEL_PLATFORM_ICL:
446       return intel_oa_register_queries_icl;
447    case INTEL_PLATFORM_EHL:
448       return intel_oa_register_queries_ehl;
449    case INTEL_PLATFORM_TGL:
450       if (devinfo->gt == 1)
451          return intel_oa_register_queries_tglgt1;
452       if (devinfo->gt == 2)
453          return intel_oa_register_queries_tglgt2;
454       return NULL;
455    case INTEL_PLATFORM_RKL:
456       return intel_oa_register_queries_rkl;
457    case INTEL_PLATFORM_DG1:
458       return intel_oa_register_queries_dg1;
459    case INTEL_PLATFORM_ADL:
460    case INTEL_PLATFORM_RPL:
461       return intel_oa_register_queries_adl;
462    case INTEL_PLATFORM_DG2_G10:
463       return intel_oa_register_queries_acmgt3;
464    case INTEL_PLATFORM_DG2_G11:
465       return intel_oa_register_queries_acmgt1;
466    case INTEL_PLATFORM_DG2_G12:
467       return intel_oa_register_queries_acmgt2;
468    case INTEL_PLATFORM_MTL_U:
469    case INTEL_PLATFORM_MTL_H:
470       if (intel_device_info_eu_total(devinfo) <= 64)
471          return intel_oa_register_queries_mtlgt2;
472       if (intel_device_info_eu_total(devinfo) <= 128)
473          return intel_oa_register_queries_mtlgt3;
474       return NULL;
475    case INTEL_PLATFORM_ARL_U:
476    case INTEL_PLATFORM_ARL_H:
477       if (intel_device_info_eu_total(devinfo) <= 64)
478          return intel_oa_register_queries_arlgt1;
479       if (intel_device_info_eu_total(devinfo) <= 128)
480          return intel_oa_register_queries_arlgt2;
481       return NULL;
482    case INTEL_PLATFORM_LNL:
483       return intel_oa_register_queries_lnl;
484    case INTEL_PLATFORM_BMG:
485       return intel_oa_register_queries_bmg;
486    default:
487       return NULL;
488    }
489 }
490 
491 static int
intel_perf_compare_counter_names(const void * v1,const void * v2)492 intel_perf_compare_counter_names(const void *v1, const void *v2)
493 {
494    const struct intel_perf_query_counter *c1 = v1;
495    const struct intel_perf_query_counter *c2 = v2;
496 
497    return strcmp(c1->name, c2->name);
498 }
499 
500 static void
sort_query(struct intel_perf_query_info * q)501 sort_query(struct intel_perf_query_info *q)
502 {
503    qsort(q->counters, q->n_counters, sizeof(q->counters[0]),
504          intel_perf_compare_counter_names);
505 }
506 
507 static void
load_pipeline_statistic_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo)508 load_pipeline_statistic_metrics(struct intel_perf_config *perf_cfg,
509                                 const struct intel_device_info *devinfo)
510 {
511    struct intel_perf_query_info *query =
512       intel_perf_append_query_info(perf_cfg, MAX_STAT_COUNTERS);
513 
514    query->kind = INTEL_PERF_QUERY_TYPE_PIPELINE;
515    query->name = "Pipeline Statistics Registers";
516 
517    intel_perf_query_add_basic_stat_reg(query, IA_VERTICES_COUNT,
518                                        "N vertices submitted");
519    intel_perf_query_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
520                                        "N primitives submitted");
521    intel_perf_query_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
522                                        "N vertex shader invocations");
523 
524    if (devinfo->ver == 6) {
525       intel_perf_query_add_stat_reg(query, GFX6_SO_PRIM_STORAGE_NEEDED, 1, 1,
526                                     "SO_PRIM_STORAGE_NEEDED",
527                                     "N geometry shader stream-out primitives (total)");
528       intel_perf_query_add_stat_reg(query, GFX6_SO_NUM_PRIMS_WRITTEN, 1, 1,
529                                     "SO_NUM_PRIMS_WRITTEN",
530                                     "N geometry shader stream-out primitives (written)");
531    } else {
532       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
533                                     "SO_PRIM_STORAGE_NEEDED (Stream 0)",
534                                     "N stream-out (stream 0) primitives (total)");
535       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
536                                     "SO_PRIM_STORAGE_NEEDED (Stream 1)",
537                                     "N stream-out (stream 1) primitives (total)");
538       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
539                                     "SO_PRIM_STORAGE_NEEDED (Stream 2)",
540                                     "N stream-out (stream 2) primitives (total)");
541       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
542                                     "SO_PRIM_STORAGE_NEEDED (Stream 3)",
543                                     "N stream-out (stream 3) primitives (total)");
544       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
545                                     "SO_NUM_PRIMS_WRITTEN (Stream 0)",
546                                     "N stream-out (stream 0) primitives (written)");
547       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
548                                     "SO_NUM_PRIMS_WRITTEN (Stream 1)",
549                                     "N stream-out (stream 1) primitives (written)");
550       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
551                                     "SO_NUM_PRIMS_WRITTEN (Stream 2)",
552                                     "N stream-out (stream 2) primitives (written)");
553       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
554                                     "SO_NUM_PRIMS_WRITTEN (Stream 3)",
555                                     "N stream-out (stream 3) primitives (written)");
556    }
557 
558    intel_perf_query_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
559                                        "N TCS shader invocations");
560    intel_perf_query_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
561                                        "N TES shader invocations");
562 
563    intel_perf_query_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
564                                        "N geometry shader invocations");
565    intel_perf_query_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
566                                        "N geometry shader primitives emitted");
567 
568    intel_perf_query_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
569                                        "N primitives entering clipping");
570    intel_perf_query_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
571                                        "N primitives leaving clipping");
572 
573    if (devinfo->verx10 == 75 || devinfo->ver == 8) {
574       intel_perf_query_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
575                                     "N fragment shader invocations",
576                                     "N fragment shader invocations");
577    } else {
578       intel_perf_query_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
579                                           "N fragment shader invocations");
580    }
581 
582    intel_perf_query_add_basic_stat_reg(query, PS_DEPTH_COUNT,
583                                        "N z-pass fragments");
584 
585    if (devinfo->ver >= 7) {
586       intel_perf_query_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
587                                           "N compute shader invocations");
588    }
589 
590    query->data_size = sizeof(uint64_t) * query->n_counters;
591 
592    sort_query(query);
593 }
594 
595 static inline int
compare_str_or_null(const char * s1,const char * s2)596 compare_str_or_null(const char *s1, const char *s2)
597 {
598    if (s1 == NULL && s2 == NULL)
599       return 0;
600    if (s1 == NULL)
601       return -1;
602    if (s2 == NULL)
603       return 1;
604 
605    return strcmp(s1, s2);
606 }
607 
608 static int
compare_counter_categories_and_names(const void * _c1,const void * _c2)609 compare_counter_categories_and_names(const void *_c1, const void *_c2)
610 {
611    const struct intel_perf_query_counter_info *c1 = (const struct intel_perf_query_counter_info *)_c1;
612    const struct intel_perf_query_counter_info *c2 = (const struct intel_perf_query_counter_info *)_c2;
613 
614    /* pipeline counters don't have an assigned category */
615    int r = compare_str_or_null(c1->counter->category, c2->counter->category);
616    if (r)
617       return r;
618 
619    return strcmp(c1->counter->name, c2->counter->name);
620 }
621 
622 static void
build_unique_counter_list(struct intel_perf_config * perf)623 build_unique_counter_list(struct intel_perf_config *perf)
624 {
625    size_t max_counters = 0;
626 
627    for (int q = 0; q < perf->n_queries; q++)
628       max_counters += perf->queries[q].n_counters;
629 
630    /*
631     * Allocate big enough array to hold maximum possible number of counters.
632     * We can't alloc it small and realloc when needed because the hash table
633     * below contains pointers to this array.
634     */
635    struct intel_perf_query_counter_info *counter_infos =
636          rzalloc_array_size(perf, sizeof(counter_infos[0]), max_counters);
637 
638    perf->n_counters = 0;
639 
640    struct hash_table *counters_table =
641       _mesa_hash_table_create(NULL,
642                               _mesa_hash_string,
643                               _mesa_key_string_equal);
644    struct hash_entry *entry;
645    for (int q = 0; q < perf->n_queries ; q++) {
646       struct intel_perf_query_info *query = &perf->queries[q];
647 
648       for (int c = 0; c < query->n_counters; c++) {
649          struct intel_perf_query_counter *counter;
650          struct intel_perf_query_counter_info *counter_info;
651 
652          counter = &query->counters[c];
653          entry = _mesa_hash_table_search(counters_table, counter->symbol_name);
654 
655          if (entry) {
656             counter_info = entry->data;
657             BITSET_SET(counter_info->query_mask, q);
658             continue;
659          }
660          assert(perf->n_counters < max_counters);
661 
662          counter_info = &counter_infos[perf->n_counters++];
663          counter_info->counter = counter;
664          BITSET_SET(counter_info->query_mask, q);
665 
666          counter_info->location.group_idx = q;
667          counter_info->location.counter_idx = c;
668 
669          _mesa_hash_table_insert(counters_table, counter->symbol_name, counter_info);
670       }
671    }
672 
673    _mesa_hash_table_destroy(counters_table, NULL);
674 
675    perf->counter_infos = counter_infos;
676 
677    qsort(perf->counter_infos, perf->n_counters, sizeof(perf->counter_infos[0]),
678          compare_counter_categories_and_names);
679 }
680 
681 static bool
oa_metrics_available(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo,bool use_register_snapshots)682 oa_metrics_available(struct intel_perf_config *perf, int fd,
683                      const struct intel_device_info *devinfo,
684                      bool use_register_snapshots)
685 {
686    perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
687    bool oa_metrics_available = false;
688 
689    perf->devinfo = devinfo;
690 
691    /* Consider an invalid as supported. */
692    if (fd == -1) {
693       perf->features_supported = INTEL_PERF_FEATURE_QUERY_PERF;
694       return true;
695    }
696 
697    perf->enable_all_metrics = debug_get_bool_option("INTEL_EXTENDED_METRICS", false);
698 
699    /* TODO: We should query this from i915?
700     * Looks like Xe2 platforms don't need it but don't have a spec quote to
701     * back it.
702     */
703    if (devinfo->verx10 == 125)
704       perf->oa_timestamp_shift = 1;
705 
706    perf->oa_timestamp_mask =
707       0xffffffffffffffffull >> (32 + perf->oa_timestamp_shift);
708 
709    switch (devinfo->kmd_type) {
710    case INTEL_KMD_TYPE_I915:
711       oa_metrics_available = i915_oa_metrics_available(perf, fd, use_register_snapshots);
712       break;
713    case INTEL_KMD_TYPE_XE:
714       oa_metrics_available = xe_oa_metrics_available(perf, fd, use_register_snapshots);
715       break;
716    default:
717       unreachable("missing");
718       break;
719    }
720 
721    return oa_metrics_available &&
722           oa_register &&
723           get_sysfs_dev_dir(perf, fd) &&
724           init_oa_sys_vars(perf, use_register_snapshots);
725 }
726 
727 static void
load_oa_metrics(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)728 load_oa_metrics(struct intel_perf_config *perf, int fd,
729                 const struct intel_device_info *devinfo)
730 {
731    int existing_queries = perf->n_queries;
732 
733    perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
734 
735    perf->oa_metrics_table =
736       _mesa_hash_table_create(perf, _mesa_hash_string,
737                               _mesa_key_string_equal);
738 
739    /* Index all the metric sets mesa knows about before looking to see what
740     * the kernel is advertising.
741     */
742    oa_register(perf);
743 
744    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
745       if (kernel_has_dynamic_config_support(perf, fd))
746          init_oa_configs(perf, fd, devinfo);
747       else
748          enumerate_sysfs_metrics(perf, devinfo);
749    } else {
750       add_all_metrics(perf, devinfo);
751    }
752 
753    /* sort counters in each individual group created by this function by name */
754    for (int i = existing_queries; i < perf->n_queries; ++i)
755       sort_query(&perf->queries[i]);
756 
757    /* Select a fallback OA metric. Look for the TestOa metric or use the last
758     * one if no present (on HSW).
759     */
760    for (int i = existing_queries; i < perf->n_queries; i++) {
761       if (perf->queries[i].symbol_name &&
762           strcmp(perf->queries[i].symbol_name, "TestOa") == 0) {
763          perf->fallback_raw_oa_metric = perf->queries[i].oa_metrics_set_id;
764          break;
765       }
766    }
767    if (perf->fallback_raw_oa_metric == 0 && perf->n_queries > 0)
768       perf->fallback_raw_oa_metric = perf->queries[perf->n_queries - 1].oa_metrics_set_id;
769 }
770 
771 struct intel_perf_registers *
intel_perf_load_configuration(struct intel_perf_config * perf_cfg,int fd,const char * guid)772 intel_perf_load_configuration(struct intel_perf_config *perf_cfg, int fd, const char *guid)
773 {
774    if (!(perf_cfg->features_supported & INTEL_PERF_FEATURE_QUERY_PERF))
775       return NULL;
776 
777    switch (perf_cfg->devinfo->kmd_type) {
778    case INTEL_KMD_TYPE_I915:
779       return i915_perf_load_configurations(perf_cfg, fd, guid);
780    default:
781       unreachable("missing");
782       return NULL;
783    }
784 }
785 
786 uint64_t
intel_perf_store_configuration(struct intel_perf_config * perf_cfg,int fd,const struct intel_perf_registers * config,const char * guid)787 intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
788                                const struct intel_perf_registers *config,
789                                const char *guid)
790 {
791    if (guid)
792       return kmd_add_config(perf_cfg, fd, config, guid);
793 
794    struct mesa_sha1 sha1_ctx;
795    _mesa_sha1_init(&sha1_ctx);
796 
797    if (config->flex_regs) {
798       _mesa_sha1_update(&sha1_ctx, config->flex_regs,
799                         sizeof(config->flex_regs[0]) *
800                         config->n_flex_regs);
801    }
802    if (config->mux_regs) {
803       _mesa_sha1_update(&sha1_ctx, config->mux_regs,
804                         sizeof(config->mux_regs[0]) *
805                         config->n_mux_regs);
806    }
807    if (config->b_counter_regs) {
808       _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
809                         sizeof(config->b_counter_regs[0]) *
810                         config->n_b_counter_regs);
811    }
812 
813    uint8_t hash[20];
814    _mesa_sha1_final(&sha1_ctx, hash);
815 
816    char formatted_hash[41];
817    _mesa_sha1_format(formatted_hash, hash);
818 
819    char generated_guid[37];
820    snprintf(generated_guid, sizeof(generated_guid),
821             "%.8s-%.4s-%.4s-%.4s-%.12s",
822             &formatted_hash[0], &formatted_hash[8],
823             &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
824             &formatted_hash[8 + 4 + 4 + 4]);
825 
826    /* Check if already present. */
827    uint64_t id;
828    if (intel_perf_load_metric_id(perf_cfg, generated_guid, &id))
829       return id;
830 
831    return kmd_add_config(perf_cfg, fd, config, generated_guid);
832 }
833 
834 void
intel_perf_remove_configuration(struct intel_perf_config * perf_cfg,int fd,uint64_t config_id)835 intel_perf_remove_configuration(struct intel_perf_config *perf_cfg, int fd,
836                                 uint64_t config_id)
837 {
838    switch (perf_cfg->devinfo->kmd_type) {
839    case INTEL_KMD_TYPE_I915:
840       i915_remove_config(perf_cfg, fd, config_id);
841       break;
842    case INTEL_KMD_TYPE_XE:
843       xe_remove_config(perf_cfg, fd, config_id);
844       break;
845    default:
846       unreachable("missing");
847    }
848 }
849 
850 static void
get_passes_mask(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,BITSET_WORD * queries_mask)851 get_passes_mask(struct intel_perf_config *perf,
852                 const uint32_t *counter_indices,
853                 uint32_t counter_indices_count,
854                 BITSET_WORD *queries_mask)
855 {
856    /* For each counter, look if it's already computed by a selected metric set
857     * or find one that can compute it.
858     */
859    for (uint32_t c = 0; c < counter_indices_count; c++) {
860       uint32_t counter_idx = counter_indices[c];
861       assert(counter_idx < perf->n_counters);
862 
863       const struct intel_perf_query_counter_info *counter_info =
864          &perf->counter_infos[counter_idx];
865 
866       /* Check if the counter is already computed by one of the selected
867        * metric set. If it is, there is nothing more to do with this counter.
868        */
869       uint32_t match = UINT32_MAX;
870       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
871          if (queries_mask[w] & counter_info->query_mask[w]) {
872             match = w * BITSET_WORDBITS + ffsll(queries_mask[w] & counter_info->query_mask[w]) - 1;
873             break;
874          }
875       }
876       if (match != UINT32_MAX)
877          continue;
878 
879       /* Now go through each metric set and find one that contains this
880        * counter.
881        */
882       bool found = false;
883       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
884          if (!counter_info->query_mask[w])
885             continue;
886 
887          uint32_t query_idx = w * BITSET_WORDBITS + ffsll(counter_info->query_mask[w]) - 1;
888 
889          /* Since we already looked for this in the query_mask, it should not
890           * be set.
891           */
892          assert(!BITSET_TEST(queries_mask, query_idx));
893 
894          BITSET_SET(queries_mask, query_idx);
895          found = true;
896          break;
897       }
898       assert(found);
899    }
900 }
901 
902 uint32_t
intel_perf_get_n_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_query_info ** pass_queries)903 intel_perf_get_n_passes(struct intel_perf_config *perf,
904                         const uint32_t *counter_indices,
905                         uint32_t counter_indices_count,
906                         struct intel_perf_query_info **pass_queries)
907 {
908    BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
909    BITSET_ZERO(queries_mask);
910 
911    get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
912 
913    if (pass_queries) {
914       uint32_t pass = 0;
915       for (uint32_t q = 0; q < perf->n_queries; q++) {
916          if (BITSET_TEST(queries_mask, q))
917             pass_queries[pass++] = &perf->queries[q];
918       }
919    }
920 
921    return BITSET_COUNT(queries_mask);
922 }
923 
924 void
intel_perf_get_counters_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_counter_pass * counter_pass)925 intel_perf_get_counters_passes(struct intel_perf_config *perf,
926                                const uint32_t *counter_indices,
927                                uint32_t counter_indices_count,
928                                struct intel_perf_counter_pass *counter_pass)
929 {
930    BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
931    BITSET_ZERO(queries_mask);
932 
933    get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
934 
935    for (uint32_t i = 0; i < counter_indices_count; i++) {
936       assert(counter_indices[i] < perf->n_counters);
937 
938       uint32_t counter_idx = counter_indices[i];
939       counter_pass[i].counter = perf->counter_infos[counter_idx].counter;
940 
941       const struct intel_perf_query_counter_info *counter_info =
942          &perf->counter_infos[counter_idx];
943 
944       uint32_t query_idx = UINT32_MAX;
945       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
946          if (counter_info->query_mask[w] & queries_mask[w]) {
947             query_idx = w * BITSET_WORDBITS +
948                ffsll(counter_info->query_mask[w] & queries_mask[w]) - 1;
949             break;
950          }
951       }
952       assert(query_idx != UINT32_MAX);
953 
954       counter_pass[i].query = &perf->queries[query_idx];
955    }
956 }
957 
958 /* Accumulate 32bits OA counters */
959 static inline void
accumulate_uint32(const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)960 accumulate_uint32(const uint32_t *report0,
961                   const uint32_t *report1,
962                   uint64_t *accumulator)
963 {
964    *accumulator += (uint32_t)(*report1 - *report0);
965 }
966 
967 /* Accumulate 40bits OA counters */
968 static inline void
accumulate_uint40(int a_index,const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)969 accumulate_uint40(int a_index,
970                   const uint32_t *report0,
971                   const uint32_t *report1,
972                   uint64_t *accumulator)
973 {
974    const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
975    const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
976    uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
977    uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
978    uint64_t value0 = report0[a_index + 4] | high0;
979    uint64_t value1 = report1[a_index + 4] | high1;
980    uint64_t delta;
981 
982    if (value0 > value1)
983       delta = (1ULL << 40) + value1 - value0;
984    else
985       delta = value1 - value0;
986 
987    *accumulator += delta;
988 }
989 
990 /* Accumulate 64bits OA counters */
991 static inline void
accumulate_uint64(const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)992 accumulate_uint64(const uint32_t *report0,
993                   const uint32_t *report1,
994                   uint64_t *accumulator)
995 {
996    *accumulator += *((const uint64_t *)report1) - *((const uint64_t *)report0);
997 }
998 
999 static void
gfx8_read_report_clock_ratios(const uint32_t * report,uint64_t * slice_freq_hz,uint64_t * unslice_freq_hz)1000 gfx8_read_report_clock_ratios(const uint32_t *report,
1001                               uint64_t *slice_freq_hz,
1002                               uint64_t *unslice_freq_hz)
1003 {
1004    /* The lower 16bits of the RPT_ID field of the OA reports contains a
1005     * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1006     * divided this way :
1007     *
1008     * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1009     * RPT_ID[10:9]:  RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1010     * RPT_ID[8:0]:   RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1011     *
1012     * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1013     *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1014     *
1015     * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1016     *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1017     */
1018 
1019    uint32_t unslice_freq = report[0] & 0x1ff;
1020    uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1021    uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1022    uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1023 
1024    *slice_freq_hz = slice_freq * 16666667ULL;
1025    *unslice_freq_hz = unslice_freq * 16666667ULL;
1026 }
1027 
1028 void
intel_perf_query_result_read_frequencies(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t * start,const uint32_t * end)1029 intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
1030                                          const struct intel_device_info *devinfo,
1031                                          const uint32_t *start,
1032                                          const uint32_t *end)
1033 {
1034    /* Slice/Unslice frequency is only available in the OA reports when the
1035     * "Disable OA reports due to clock ratio change" field in
1036     * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1037     * global register (see drivers/gpu/drm/i915/i915_perf.c)
1038     *
1039     * Documentation says this should be available on Gfx9+ but experimentation
1040     * shows that Gfx8 reports similar values, so we enable it there too.
1041     */
1042    if (devinfo->ver < 8)
1043       return;
1044 
1045    gfx8_read_report_clock_ratios(start,
1046                                  &result->slice_frequency[0],
1047                                  &result->unslice_frequency[0]);
1048    gfx8_read_report_clock_ratios(end,
1049                                  &result->slice_frequency[1],
1050                                  &result->unslice_frequency[1]);
1051 }
1052 
1053 static inline bool
can_use_mi_rpc_bc_counters(const struct intel_device_info * devinfo)1054 can_use_mi_rpc_bc_counters(const struct intel_device_info *devinfo)
1055 {
1056    return devinfo->ver <= 11;
1057 }
1058 
1059 uint64_t
intel_perf_report_timestamp(const struct intel_perf_query_info * query,const struct intel_device_info * devinfo,const uint32_t * report)1060 intel_perf_report_timestamp(const struct intel_perf_query_info *query,
1061                             const struct intel_device_info *devinfo,
1062                             const uint32_t *report)
1063 {
1064    if (query->perf->devinfo->verx10 >= 200) {
1065       uint64_t data_u64 = *((const uint64_t *)&report[2]);
1066       return data_u64 >> query->perf->oa_timestamp_shift;
1067    }
1068 
1069    return report[1] >> query->perf->oa_timestamp_shift;
1070 }
1071 
1072 void
intel_perf_query_result_accumulate(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint32_t * start,const uint32_t * end)1073 intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
1074                                    const struct intel_perf_query_info *query,
1075                                    const uint32_t *start,
1076                                    const uint32_t *end)
1077 {
1078    const struct intel_device_info *devinfo = query->perf->devinfo;
1079    int i;
1080 
1081    if (query->perf->devinfo->verx10 >= 200) {
1082       if (result->hw_id == INTEL_PERF_INVALID_CTX_ID &&
1083           start[4] != INTEL_PERF_INVALID_CTX_ID)
1084          result->hw_id = start[4];
1085    } else {
1086       if (result->hw_id == INTEL_PERF_INVALID_CTX_ID &&
1087           start[2] != INTEL_PERF_INVALID_CTX_ID)
1088          result->hw_id = start[2];
1089    }
1090 
1091    if (result->reports_accumulated == 0)
1092       result->begin_timestamp = intel_perf_report_timestamp(query, devinfo, start);
1093    result->end_timestamp = intel_perf_report_timestamp(query, devinfo, end);
1094    result->reports_accumulated++;
1095 
1096    /* oa format handling needs to match with platform version returned in
1097     * intel_perf_get_oa_format()
1098     */
1099    assert(intel_perf_get_oa_format(query->perf) == query->oa_format);
1100    if (query->perf->devinfo->verx10 >= 200) {
1101       /* PEC64u64 */
1102       result->accumulator[query->gpu_time_offset] =
1103          intel_perf_report_timestamp(query, devinfo, end) -
1104          intel_perf_report_timestamp(query, devinfo, start);
1105       accumulate_uint64(start + 6, end + 6, &result->accumulator[query->gpu_clock_offset]);
1106 
1107       for (i = 0; i < 64; i++)
1108          accumulate_uint64(start + 8 + (2 * i), end + 8 + (2 * i),
1109                            &result->accumulator[query->pec_offset + i]);
1110    } else if (query->perf->devinfo->verx10 >= 125) {
1111       /* I915_OA_FORMAT_A24u40_A14u32_B8_C8 */
1112       result->accumulator[query->gpu_time_offset] =
1113          intel_perf_report_timestamp(query, devinfo, end) -
1114          intel_perf_report_timestamp(query, devinfo, start);
1115 
1116       accumulate_uint32(start + 3, end + 3,
1117                         result->accumulator + query->gpu_clock_offset); /* clock */
1118 
1119       /* A0-A3 counters are 32bits */
1120       for (i = 0; i < 4; i++) {
1121          accumulate_uint32(start + 4 + i, end + 4 + i,
1122                            result->accumulator + query->a_offset + i);
1123       }
1124 
1125       /* A4-A23 counters are 40bits */
1126       for (i = 4; i < 24; i++) {
1127          accumulate_uint40(i, start, end,
1128                            result->accumulator + query->a_offset + i);
1129       }
1130 
1131       /* A24-27 counters are 32bits */
1132       for (i = 0; i < 4; i++) {
1133          accumulate_uint32(start + 28 + i, end + 28 + i,
1134                            result->accumulator + query->a_offset + 24 + i);
1135       }
1136 
1137       /* A28-31 counters are 40bits */
1138       for (i = 28; i < 32; i++) {
1139          accumulate_uint40(i, start, end,
1140                            result->accumulator + query->a_offset + i);
1141       }
1142 
1143       /* A32-35 counters are 32bits */
1144       for (i = 0; i < 4; i++) {
1145          accumulate_uint32(start + 36 + i, end + 36 + i,
1146                            result->accumulator + query->a_offset + 32 + i);
1147       }
1148 
1149       if (can_use_mi_rpc_bc_counters(query->perf->devinfo) ||
1150           !query->perf->sys_vars.query_mode) {
1151          /* A36-37 counters are 32bits */
1152          accumulate_uint32(start + 40, end + 40,
1153                            result->accumulator + query->a_offset + 36);
1154          accumulate_uint32(start + 46, end + 46,
1155                            result->accumulator + query->a_offset + 37);
1156 
1157          /* 8x 32bit B counters */
1158          for (i = 0; i < 8; i++) {
1159             accumulate_uint32(start + 48 + i, end + 48 + i,
1160                               result->accumulator + query->b_offset + i);
1161          }
1162 
1163          /* 8x 32bit C counters... */
1164          for (i = 0; i < 8; i++) {
1165             accumulate_uint32(start + 56 + i, end + 56 + i,
1166                               result->accumulator + query->c_offset + i);
1167          }
1168       }
1169    } else if (query->perf->devinfo->verx10 >= 120) {
1170       /* I915_OA_FORMAT_A32u40_A4u32_B8_C8 */
1171       result->accumulator[query->gpu_time_offset] =
1172          intel_perf_report_timestamp(query, devinfo, end) -
1173          intel_perf_report_timestamp(query, devinfo, start);
1174 
1175       accumulate_uint32(start + 3, end + 3,
1176                         result->accumulator + query->gpu_clock_offset); /* clock */
1177 
1178       /* 32x 40bit A counters... */
1179       for (i = 0; i < 32; i++) {
1180          accumulate_uint40(i, start, end,
1181                            result->accumulator + query->a_offset + i);
1182       }
1183 
1184       /* 4x 32bit A counters... */
1185       for (i = 0; i < 4; i++) {
1186          accumulate_uint32(start + 36 + i, end + 36 + i,
1187                            result->accumulator + query->a_offset + 32 + i);
1188       }
1189 
1190       if (can_use_mi_rpc_bc_counters(query->perf->devinfo) ||
1191           !query->perf->sys_vars.query_mode) {
1192          /* 8x 32bit B counters */
1193          for (i = 0; i < 8; i++) {
1194             accumulate_uint32(start + 48 + i, end + 48 + i,
1195                               result->accumulator + query->b_offset + i);
1196          }
1197 
1198          /* 8x 32bit C counters... */
1199          for (i = 0; i < 8; i++) {
1200             accumulate_uint32(start + 56 + i, end + 56 + i,
1201                               result->accumulator + query->c_offset + i);
1202          }
1203       }
1204    } else {
1205       /* I915_OA_FORMAT_A24u40_A14u32_B8_C8 */
1206       result->accumulator[query->gpu_time_offset] =
1207          intel_perf_report_timestamp(query, devinfo, end) -
1208          intel_perf_report_timestamp(query, devinfo, start);
1209 
1210       for (i = 0; i < 61; i++) {
1211          accumulate_uint32(start + 3 + i, end + 3 + i,
1212                            result->accumulator + query->a_offset + i);
1213       }
1214    }
1215 }
1216 
1217 #define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
1218 
1219 void
intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t start,const uint32_t end)1220 intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
1221                                           const struct intel_device_info *devinfo,
1222                                           const uint32_t start,
1223                                           const uint32_t end)
1224 {
1225    switch (devinfo->ver) {
1226    case 7:
1227    case 8:
1228       result->gt_frequency[0] = GET_FIELD(start, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1229       result->gt_frequency[1] = GET_FIELD(end, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1230       break;
1231    case 9:
1232    case 11:
1233    case 12:
1234    case 20:
1235       result->gt_frequency[0] = GET_FIELD(start, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1236       result->gt_frequency[1] = GET_FIELD(end, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1237       break;
1238    default:
1239       unreachable("unexpected gen");
1240    }
1241 
1242    /* Put the numbers into Hz. */
1243    result->gt_frequency[0] *= 1000000ULL;
1244    result->gt_frequency[1] *= 1000000ULL;
1245 }
1246 
1247 void
intel_perf_query_result_read_perfcnts(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint64_t * start,const uint64_t * end)1248 intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
1249                                       const struct intel_perf_query_info *query,
1250                                       const uint64_t *start,
1251                                       const uint64_t *end)
1252 {
1253    for (uint32_t i = 0; i < 2; i++) {
1254       uint64_t v0 = start[i] & PERF_CNT_VALUE_MASK;
1255       uint64_t v1 = end[i] & PERF_CNT_VALUE_MASK;
1256 
1257       result->accumulator[query->perfcnt_offset + i] = v0 > v1 ?
1258          (PERF_CNT_VALUE_MASK + 1 + v1 - v0) :
1259          (v1 - v0);
1260    }
1261 }
1262 
1263 static uint32_t
query_accumulator_offset(const struct intel_perf_query_info * query,enum intel_perf_query_field_type type,uint8_t index)1264 query_accumulator_offset(const struct intel_perf_query_info *query,
1265                          enum intel_perf_query_field_type type,
1266                          uint8_t index)
1267 {
1268    switch (type) {
1269    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1270       return query->perfcnt_offset + index;
1271    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1272       return query->a_offset + index;
1273    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1274       return query->b_offset + index;
1275    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1276       return query->c_offset + index;
1277    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1278       return query->pec_offset + index;
1279    default:
1280       unreachable("Invalid register type");
1281       return 0;
1282    }
1283 }
1284 
1285 void
intel_perf_query_result_accumulate_fields(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const void * start,const void * end,bool no_oa_accumulate)1286 intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
1287                                           const struct intel_perf_query_info *query,
1288                                           const void *start,
1289                                           const void *end,
1290                                           bool no_oa_accumulate)
1291 {
1292    const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1293    const struct intel_device_info *devinfo = query->perf->devinfo;
1294 
1295    for (uint32_t r = 0; r < layout->n_fields; r++) {
1296       const struct intel_perf_query_field *field = &layout->fields[r];
1297 
1298       if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC) {
1299          intel_perf_query_result_read_frequencies(result, devinfo,
1300                                                 start + field->location,
1301                                                 end + field->location);
1302          /* no_oa_accumulate=true is used when doing GL perf queries, we
1303           * manually parse the OA reports from the OA buffer and subtract
1304           * unrelated deltas, so don't accumulate the begin/end reports here.
1305           */
1306          if (!no_oa_accumulate) {
1307             intel_perf_query_result_accumulate(result, query,
1308                                                start + field->location,
1309                                                end + field->location);
1310          }
1311       } else {
1312          uint64_t v0, v1;
1313 
1314          if (field->size == 4) {
1315             v0 = *(const uint32_t *)(start + field->location);
1316             v1 = *(const uint32_t *)(end + field->location);
1317          } else {
1318             assert(field->size == 8);
1319             v0 = *(const uint64_t *)(start + field->location);
1320             v1 = *(const uint64_t *)(end + field->location);
1321          }
1322 
1323          if (field->mask) {
1324             v0 = field->mask & v0;
1325             v1 = field->mask & v1;
1326          }
1327 
1328          /* RPSTAT is a bit of a special case because its begin/end values
1329           * represent frequencies. We store it in a separate location.
1330           */
1331          if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT)
1332             intel_perf_query_result_read_gt_frequency(result, devinfo, v0, v1);
1333          else
1334             result->accumulator[query_accumulator_offset(query, field->type, field->index)] = v1 - v0;
1335       }
1336    }
1337 }
1338 
1339 void
intel_perf_query_result_clear(struct intel_perf_query_result * result)1340 intel_perf_query_result_clear(struct intel_perf_query_result *result)
1341 {
1342    memset(result, 0, sizeof(*result));
1343    result->hw_id = INTEL_PERF_INVALID_CTX_ID;
1344 }
1345 
1346 void
intel_perf_query_result_print_fields(const struct intel_perf_query_info * query,const void * data)1347 intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
1348                                      const void *data)
1349 {
1350    const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1351 
1352    for (uint32_t r = 0; r < layout->n_fields; r++) {
1353       const struct intel_perf_query_field *field = &layout->fields[r];
1354       const uint32_t *value32 = data + field->location;
1355 
1356       switch (field->type) {
1357       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1358          fprintf(stderr, "MI_RPC:\n");
1359          fprintf(stderr, "  TS: 0x%08x\n", *(value32 + 1));
1360          fprintf(stderr, "  CLK: 0x%08x\n", *(value32 + 3));
1361          break;
1362       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1363          fprintf(stderr, "A%u: 0x%08x\n", field->index, *value32);
1364          break;
1365       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1366          fprintf(stderr, "B%u: 0x%08x\n", field->index, *value32);
1367          break;
1368       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1369          fprintf(stderr, "C%u: 0x%08x\n", field->index, *value32);
1370          break;
1371       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC: {
1372          const uint64_t *value64 = data + field->location;
1373          fprintf(stderr, "PEC%u: 0x%" PRIx64 "\n", field->index, *value64);
1374          break;
1375       }
1376       default:
1377          break;
1378       }
1379    }
1380 }
1381 
1382 static int
intel_perf_compare_query_names(const void * v1,const void * v2)1383 intel_perf_compare_query_names(const void *v1, const void *v2)
1384 {
1385    const struct intel_perf_query_info *q1 = v1;
1386    const struct intel_perf_query_info *q2 = v2;
1387 
1388    return strcmp(q1->name, q2->name);
1389 }
1390 
1391 /* Xe2: (64 x PEC) + SRM_RPSTAT + MI_RPC */
1392 #define MAX_QUERY_FIELDS(devinfo) (devinfo->verx10 >= 200 ? (64 + 2) : (5 + 16))
1393 
1394 static inline struct intel_perf_query_field *
add_query_register(struct intel_perf_config * perf_cfg,enum intel_perf_query_field_type type,uint32_t offset,uint16_t size,uint8_t index)1395 add_query_register(struct intel_perf_config *perf_cfg,
1396                    enum intel_perf_query_field_type type,
1397                    uint32_t offset,
1398                    uint16_t size,
1399                    uint8_t index)
1400 {
1401    struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
1402 
1403    /* Align MI_RPC to 64bytes (HW requirement) & 64bit registers to 8bytes
1404     * (shows up nicely in the debugger).
1405     */
1406    if (type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC)
1407       layout->size = align(layout->size, 64);
1408    else if (size % 8 == 0)
1409       layout->size = align(layout->size, 8);
1410 
1411    assert(layout->n_fields < MAX_QUERY_FIELDS(perf_cfg->devinfo));
1412    layout->fields[layout->n_fields++] = (struct intel_perf_query_field) {
1413       .mmio_offset = offset,
1414       .location = layout->size,
1415       .type = type,
1416       .index = index,
1417       .size = size,
1418    };
1419    layout->size += size;
1420 
1421    return &layout->fields[layout->n_fields - 1];
1422 }
1423 
1424 static void
intel_perf_init_query_fields(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,bool use_register_snapshots)1425 intel_perf_init_query_fields(struct intel_perf_config *perf_cfg,
1426                              const struct intel_device_info *devinfo,
1427                              bool use_register_snapshots)
1428 {
1429    struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
1430 
1431    layout->n_fields = 0;
1432 
1433    /* MI_RPC requires a 64byte alignment. */
1434    layout->alignment = 64;
1435 
1436    layout->fields = rzalloc_array(perf_cfg, struct intel_perf_query_field,
1437                                   MAX_QUERY_FIELDS(devinfo));
1438 
1439    add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC,
1440                       0, perf_cfg->oa_sample_size, 0);
1441 
1442    if (use_register_snapshots) {
1443       if (devinfo->ver <= 11) {
1444          struct intel_perf_query_field *field =
1445             add_query_register(perf_cfg,
1446                                INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1447                                PERF_CNT_1_DW0, 8, 0);
1448          field->mask = PERF_CNT_VALUE_MASK;
1449 
1450          field = add_query_register(perf_cfg,
1451                                     INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1452                                     PERF_CNT_2_DW0, 8, 1);
1453          field->mask = PERF_CNT_VALUE_MASK;
1454       }
1455 
1456       if (devinfo->ver == 8 && devinfo->platform != INTEL_PLATFORM_CHV) {
1457          add_query_register(perf_cfg,
1458                             INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1459                             GFX7_RPSTAT1, 4, 0);
1460       }
1461 
1462       if (devinfo->ver >= 9) {
1463          add_query_register(perf_cfg,
1464                             INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1465                             GFX9_RPSTAT0, 4, 0);
1466       }
1467 
1468       if (!can_use_mi_rpc_bc_counters(devinfo)) {
1469          if (devinfo->ver >= 8 && devinfo->ver <= 11) {
1470             for (uint32_t i = 0; i < GFX8_N_OA_PERF_B32; i++) {
1471                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1472                                   GFX8_OA_PERF_B32(i), 4, i);
1473             }
1474             for (uint32_t i = 0; i < GFX8_N_OA_PERF_C32; i++) {
1475                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1476                                   GFX8_OA_PERF_C32(i), 4, i);
1477             }
1478          } else if (devinfo->verx10 == 120) {
1479             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1480                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1481                                   GFX12_OAG_PERF_B32(i), 4, i);
1482             }
1483             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1484                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1485                                   GFX12_OAG_PERF_C32(i), 4, i);
1486             }
1487          } else if (devinfo->verx10 == 125) {
1488             add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1489                                GFX125_OAG_PERF_A36, 4, 36);
1490             add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1491                                GFX125_OAG_PERF_A37, 4, 37);
1492             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1493                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1494                                   GFX12_OAG_PERF_B32(i), 4, i);
1495             }
1496             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1497                add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1498                                   GFX12_OAG_PERF_C32(i), 4, i);
1499             }
1500          }
1501       }
1502    }
1503 
1504    /* Align the whole package to 64bytes so that 2 snapshots can be put
1505     * together without extract alignment for the user.
1506     */
1507    layout->size = align(layout->size, 64);
1508 }
1509 
1510 static size_t
intel_perf_get_oa_format_size(const struct intel_device_info * devinfo)1511 intel_perf_get_oa_format_size(const struct intel_device_info *devinfo)
1512 {
1513    if (devinfo->verx10 >= 200)
1514       return 576;
1515 
1516    return 256;
1517 }
1518 
1519 void
intel_perf_init_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,int drm_fd,bool include_pipeline_statistics,bool use_register_snapshots)1520 intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
1521                         const struct intel_device_info *devinfo,
1522                         int drm_fd,
1523                         bool include_pipeline_statistics,
1524                         bool use_register_snapshots)
1525 {
1526    perf_cfg->devinfo = devinfo;
1527    perf_cfg->oa_sample_size = intel_perf_get_oa_format_size(devinfo);
1528 
1529    intel_perf_init_query_fields(perf_cfg, devinfo, use_register_snapshots);
1530 
1531    if (include_pipeline_statistics) {
1532       load_pipeline_statistic_metrics(perf_cfg, devinfo);
1533       intel_perf_register_mdapi_statistic_query(perf_cfg, devinfo);
1534    }
1535 
1536    bool oa_metrics = oa_metrics_available(perf_cfg, drm_fd, devinfo,
1537                                           use_register_snapshots);
1538    if (oa_metrics)
1539       load_oa_metrics(perf_cfg, drm_fd, devinfo);
1540 
1541    /* sort query groups by name */
1542    if (perf_cfg->queries != NULL) {
1543       qsort(perf_cfg->queries, perf_cfg->n_queries,
1544             sizeof(perf_cfg->queries[0]), intel_perf_compare_query_names);
1545    }
1546 
1547    build_unique_counter_list(perf_cfg);
1548 
1549    if (oa_metrics)
1550       intel_perf_register_mdapi_oa_query(perf_cfg, devinfo);
1551 }
1552 
1553 void
intel_perf_free(struct intel_perf_config * perf_cfg)1554 intel_perf_free(struct intel_perf_config *perf_cfg)
1555 {
1556    ralloc_free(perf_cfg);
1557 }
1558 
1559 uint64_t
intel_perf_get_oa_format(struct intel_perf_config * perf_cfg)1560 intel_perf_get_oa_format(struct intel_perf_config *perf_cfg)
1561 {
1562    switch (perf_cfg->devinfo->kmd_type) {
1563    case INTEL_KMD_TYPE_I915:
1564       return i915_perf_get_oa_format(perf_cfg);
1565    case INTEL_KMD_TYPE_XE:
1566       return xe_perf_get_oa_format(perf_cfg);
1567    default:
1568       unreachable("missing");
1569       return 0;
1570    }
1571 }
1572 
1573 int
intel_perf_stream_open(struct intel_perf_config * perf_config,int drm_fd,uint32_t ctx_id,uint64_t metrics_set_id,uint64_t period_exponent,bool hold_preemption,bool enable,struct intel_bind_timeline * timeline)1574 intel_perf_stream_open(struct intel_perf_config *perf_config, int drm_fd,
1575                        uint32_t ctx_id, uint64_t metrics_set_id,
1576                        uint64_t period_exponent, bool hold_preemption,
1577                        bool enable, struct intel_bind_timeline *timeline)
1578 {
1579    uint64_t report_format = intel_perf_get_oa_format(perf_config);
1580 
1581    switch (perf_config->devinfo->kmd_type) {
1582    case INTEL_KMD_TYPE_I915:
1583       return i915_perf_stream_open(perf_config, drm_fd, ctx_id, metrics_set_id,
1584                                    report_format, period_exponent,
1585                                    hold_preemption, enable);
1586    case INTEL_KMD_TYPE_XE:
1587       return xe_perf_stream_open(perf_config, drm_fd, ctx_id, metrics_set_id,
1588                                  report_format, period_exponent,
1589                                  hold_preemption, enable, timeline);
1590    default:
1591          unreachable("missing");
1592          return 0;
1593    }
1594 }
1595 
1596 /*
1597  * Read perf stream samples.
1598  *
1599  * buffer will be filled with multiple struct intel_perf_record_header + data.
1600  *
1601  * Returns 0 if no sample is available, -errno value if a error happened or
1602  * the number of bytes read on success.
1603  */
1604 int
intel_perf_stream_read_samples(struct intel_perf_config * perf_config,int perf_stream_fd,uint8_t * buffer,size_t buffer_len)1605 intel_perf_stream_read_samples(struct intel_perf_config *perf_config,
1606                                int perf_stream_fd, uint8_t *buffer,
1607                                size_t buffer_len)
1608 {
1609    switch (perf_config->devinfo->kmd_type) {
1610    case INTEL_KMD_TYPE_I915:
1611       return i915_perf_stream_read_samples(perf_config, perf_stream_fd, buffer, buffer_len);
1612    case INTEL_KMD_TYPE_XE:
1613       return xe_perf_stream_read_samples(perf_config, perf_stream_fd, buffer, buffer_len);
1614    default:
1615          unreachable("missing");
1616          return -1;
1617    }
1618 }
1619 
1620 int
intel_perf_stream_set_state(struct intel_perf_config * perf_config,int perf_stream_fd,bool enable)1621 intel_perf_stream_set_state(struct intel_perf_config *perf_config,
1622                             int perf_stream_fd, bool enable)
1623 {
1624    switch (perf_config->devinfo->kmd_type) {
1625    case INTEL_KMD_TYPE_I915:
1626       return i915_perf_stream_set_state(perf_stream_fd, enable);
1627    case INTEL_KMD_TYPE_XE:
1628       return xe_perf_stream_set_state(perf_stream_fd, enable);
1629    default:
1630          unreachable("missing");
1631          return -1;
1632    }
1633 }
1634 
1635 int
intel_perf_stream_set_metrics_id(struct intel_perf_config * perf_config,int drm_fd,int perf_stream_fd,uint32_t exec_queue,uint64_t metrics_set_id,struct intel_bind_timeline * timeline)1636 intel_perf_stream_set_metrics_id(struct intel_perf_config *perf_config,
1637                                  int drm_fd, int perf_stream_fd,
1638                                  uint32_t exec_queue,
1639                                  uint64_t metrics_set_id,
1640                                  struct intel_bind_timeline *timeline)
1641 {
1642    switch (perf_config->devinfo->kmd_type) {
1643    case INTEL_KMD_TYPE_I915:
1644       return i915_perf_stream_set_metrics_id(perf_stream_fd, metrics_set_id);
1645    case INTEL_KMD_TYPE_XE:
1646       return xe_perf_stream_set_metrics_id(perf_stream_fd, drm_fd,
1647                                            exec_queue, metrics_set_id,
1648                                            timeline);
1649    default:
1650          unreachable("missing");
1651          return -1;
1652    }
1653 }
1654