• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <dirent.h>
25 
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <errno.h>
31 
32 #ifndef HAVE_DIRENT_D_TYPE
33 #include <limits.h> // PATH_MAX
34 #endif
35 
36 #include <drm-uapi/i915_drm.h>
37 
38 #include "common/intel_gem.h"
39 #include "common/i915/intel_gem.h"
40 
41 #include "dev/intel_debug.h"
42 #include "dev/intel_device_info.h"
43 
44 #include "perf/intel_perf.h"
45 #include "perf/intel_perf_regs.h"
46 #include "perf/intel_perf_mdapi.h"
47 #include "perf/intel_perf_metrics.h"
48 #include "perf/intel_perf_private.h"
49 
50 #include "util/bitscan.h"
51 #include "util/macros.h"
52 #include "util/mesa-sha1.h"
53 #include "util/u_debug.h"
54 #include "util/u_math.h"
55 
56 #define FILE_DEBUG_FLAG DEBUG_PERFMON
57 
58 static bool
is_dir_or_link(const struct dirent * entry,const char * parent_dir)59 is_dir_or_link(const struct dirent *entry, const char *parent_dir)
60 {
61 #ifdef HAVE_DIRENT_D_TYPE
62    return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
63 #else
64    struct stat st;
65    char path[PATH_MAX + 1];
66    snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
67    lstat(path, &st);
68    return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
69 #endif
70 }
71 
72 static bool
get_sysfs_dev_dir(struct intel_perf_config * perf,int fd)73 get_sysfs_dev_dir(struct intel_perf_config *perf, int fd)
74 {
75    struct stat sb;
76    int min, maj;
77    DIR *drmdir;
78    struct dirent *drm_entry;
79    int len;
80 
81    perf->sysfs_dev_dir[0] = '\0';
82 
83    if (INTEL_DEBUG(DEBUG_NO_OACONFIG))
84       return true;
85 
86    if (fstat(fd, &sb)) {
87       DBG("Failed to stat DRM fd\n");
88       return false;
89    }
90 
91    maj = major(sb.st_rdev);
92    min = minor(sb.st_rdev);
93 
94    if (!S_ISCHR(sb.st_mode)) {
95       DBG("DRM fd is not a character device as expected\n");
96       return false;
97    }
98 
99    len = snprintf(perf->sysfs_dev_dir,
100                   sizeof(perf->sysfs_dev_dir),
101                   "/sys/dev/char/%d:%d/device/drm", maj, min);
102    if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
103       DBG("Failed to concatenate sysfs path to drm device\n");
104       return false;
105    }
106 
107    drmdir = opendir(perf->sysfs_dev_dir);
108    if (!drmdir) {
109       DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
110       return false;
111    }
112 
113    while ((drm_entry = readdir(drmdir))) {
114       if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
115           strncmp(drm_entry->d_name, "card", 4) == 0)
116       {
117          len = snprintf(perf->sysfs_dev_dir,
118                         sizeof(perf->sysfs_dev_dir),
119                         "/sys/dev/char/%d:%d/device/drm/%s",
120                         maj, min, drm_entry->d_name);
121          closedir(drmdir);
122          if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
123             return false;
124          else
125             return true;
126       }
127    }
128 
129    closedir(drmdir);
130 
131    DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
132        maj, min);
133 
134    return false;
135 }
136 
137 static bool
read_file_uint64(const char * file,uint64_t * val)138 read_file_uint64(const char *file, uint64_t *val)
139 {
140     char buf[32];
141     int fd, n;
142 
143     fd = open(file, 0);
144     if (fd < 0)
145        return false;
146     while ((n = read(fd, buf, sizeof (buf) - 1)) < 0 &&
147            errno == EINTR);
148     close(fd);
149     if (n < 0)
150        return false;
151 
152     buf[n] = '\0';
153     *val = strtoull(buf, NULL, 0);
154 
155     return true;
156 }
157 
158 static bool
read_sysfs_drm_device_file_uint64(struct intel_perf_config * perf,const char * file,uint64_t * value)159 read_sysfs_drm_device_file_uint64(struct intel_perf_config *perf,
160                                   const char *file,
161                                   uint64_t *value)
162 {
163    char buf[512];
164    int len;
165 
166    len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
167    if (len < 0 || len >= sizeof(buf)) {
168       DBG("Failed to concatenate sys filename to read u64 from\n");
169       return false;
170    }
171 
172    return read_file_uint64(buf, value);
173 }
174 
175 static bool
oa_config_enabled(struct intel_perf_config * perf,const struct intel_perf_query_info * query)176 oa_config_enabled(struct intel_perf_config *perf,
177                   const struct intel_perf_query_info *query) {
178    // Hide extended metrics unless enabled with env param
179    bool is_extended_metric = strncmp(query->name, "Ext", 3) == 0;
180 
181    return perf->enable_all_metrics || !is_extended_metric;
182 }
183 
184 static void
register_oa_config(struct intel_perf_config * perf,const struct intel_device_info * devinfo,const struct intel_perf_query_info * query,uint64_t config_id)185 register_oa_config(struct intel_perf_config *perf,
186                    const struct intel_device_info *devinfo,
187                    const struct intel_perf_query_info *query,
188                    uint64_t config_id)
189 {
190    if (!oa_config_enabled(perf, query))
191       return;
192 
193    struct intel_perf_query_info *registered_query =
194       intel_perf_append_query_info(perf, 0);
195 
196    *registered_query = *query;
197    registered_query->oa_metrics_set_id = config_id;
198    DBG("metric set registered: id = %" PRIu64", guid = %s\n",
199        registered_query->oa_metrics_set_id, query->guid);
200 }
201 
202 static void
enumerate_sysfs_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)203 enumerate_sysfs_metrics(struct intel_perf_config *perf,
204                         const struct intel_device_info *devinfo)
205 {
206    DIR *metricsdir = NULL;
207    struct dirent *metric_entry;
208    char buf[256];
209    int len;
210 
211    len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
212    if (len < 0 || len >= sizeof(buf)) {
213       DBG("Failed to concatenate path to sysfs metrics/ directory\n");
214       return;
215    }
216 
217    metricsdir = opendir(buf);
218    if (!metricsdir) {
219       DBG("Failed to open %s: %m\n", buf);
220       return;
221    }
222 
223    while ((metric_entry = readdir(metricsdir))) {
224       struct hash_entry *entry;
225       if (!is_dir_or_link(metric_entry, buf) ||
226           metric_entry->d_name[0] == '.')
227          continue;
228 
229       DBG("metric set: %s\n", metric_entry->d_name);
230       entry = _mesa_hash_table_search(perf->oa_metrics_table,
231                                       metric_entry->d_name);
232       if (entry) {
233          uint64_t id;
234          if (!intel_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
235             DBG("Failed to read metric set id from %s: %m", buf);
236             continue;
237          }
238 
239          register_oa_config(perf, devinfo,
240                             (const struct intel_perf_query_info *)entry->data, id);
241       } else
242          DBG("metric set not known by mesa (skipping)\n");
243    }
244 
245    closedir(metricsdir);
246 }
247 
248 static void
add_all_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)249 add_all_metrics(struct intel_perf_config *perf,
250                 const struct intel_device_info *devinfo)
251 {
252    hash_table_foreach(perf->oa_metrics_table, entry) {
253       const struct intel_perf_query_info *query = entry->data;
254       register_oa_config(perf, devinfo, query, 0);
255    }
256 }
257 
258 static bool
kernel_has_dynamic_config_support(struct intel_perf_config * perf,int fd)259 kernel_has_dynamic_config_support(struct intel_perf_config *perf, int fd)
260 {
261    uint64_t invalid_config_id = UINT64_MAX;
262 
263    return intel_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
264                     &invalid_config_id) < 0 && errno == ENOENT;
265 }
266 
267 static bool
i915_query_perf_config_supported(struct intel_perf_config * perf,int fd)268 i915_query_perf_config_supported(struct intel_perf_config *perf, int fd)
269 {
270    int32_t length = 0;
271    return !intel_i915_query_flags(fd, DRM_I915_QUERY_PERF_CONFIG,
272                                   DRM_I915_QUERY_PERF_CONFIG_LIST,
273                                   NULL, &length);
274 }
275 
276 static bool
i915_query_perf_config_data(struct intel_perf_config * perf,int fd,const char * guid,struct drm_i915_perf_oa_config * config)277 i915_query_perf_config_data(struct intel_perf_config *perf,
278                             int fd, const char *guid,
279                             struct drm_i915_perf_oa_config *config)
280 {
281    char data[sizeof(struct drm_i915_query_perf_config) +
282              sizeof(struct drm_i915_perf_oa_config)] = {};
283    struct drm_i915_query_perf_config *i915_query = (void *)data;
284    struct drm_i915_perf_oa_config *i915_config = (void *)data + sizeof(*i915_query);
285 
286    memcpy(i915_query->uuid, guid, sizeof(i915_query->uuid));
287    memcpy(i915_config, config, sizeof(*config));
288 
289    int32_t item_length = sizeof(data);
290    if (intel_i915_query_flags(fd, DRM_I915_QUERY_PERF_CONFIG,
291                               DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
292                               i915_query, &item_length))
293       return false;
294 
295    memcpy(config, i915_config, sizeof(*config));
296 
297    return true;
298 }
299 
300 bool
intel_perf_load_metric_id(struct intel_perf_config * perf_cfg,const char * guid,uint64_t * metric_id)301 intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
302                           const char *guid,
303                           uint64_t *metric_id)
304 {
305    char config_path[280];
306 
307    snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
308             perf_cfg->sysfs_dev_dir, guid);
309 
310    /* Don't recreate already loaded configs. */
311    return read_file_uint64(config_path, metric_id);
312 }
313 
314 static uint64_t
i915_add_config(struct intel_perf_config * perf,int fd,const struct intel_perf_registers * config,const char * guid)315 i915_add_config(struct intel_perf_config *perf, int fd,
316                 const struct intel_perf_registers *config,
317                 const char *guid)
318 {
319    struct drm_i915_perf_oa_config i915_config = { 0, };
320 
321    memcpy(i915_config.uuid, guid, sizeof(i915_config.uuid));
322 
323    i915_config.n_mux_regs = config->n_mux_regs;
324    i915_config.mux_regs_ptr = to_const_user_pointer(config->mux_regs);
325 
326    i915_config.n_boolean_regs = config->n_b_counter_regs;
327    i915_config.boolean_regs_ptr = to_const_user_pointer(config->b_counter_regs);
328 
329    i915_config.n_flex_regs = config->n_flex_regs;
330    i915_config.flex_regs_ptr = to_const_user_pointer(config->flex_regs);
331 
332    int ret = intel_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
333    return ret > 0 ? ret : 0;
334 }
335 
336 static void
init_oa_configs(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)337 init_oa_configs(struct intel_perf_config *perf, int fd,
338                 const struct intel_device_info *devinfo)
339 {
340    hash_table_foreach(perf->oa_metrics_table, entry) {
341       const struct intel_perf_query_info *query = entry->data;
342       uint64_t config_id;
343 
344       if (intel_perf_load_metric_id(perf, query->guid, &config_id)) {
345          DBG("metric set: %s (already loaded)\n", query->guid);
346          register_oa_config(perf, devinfo, query, config_id);
347          continue;
348       }
349 
350       int ret = i915_add_config(perf, fd, &query->config, query->guid);
351       if (ret < 0) {
352          DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
353              query->name, query->guid, strerror(errno));
354          continue;
355       }
356 
357       register_oa_config(perf, devinfo, query, ret);
358       DBG("metric set: %s (added)\n", query->guid);
359    }
360 }
361 
362 static void
compute_topology_builtins(struct intel_perf_config * perf)363 compute_topology_builtins(struct intel_perf_config *perf)
364 {
365    const struct intel_device_info *devinfo = &perf->devinfo;
366 
367    perf->sys_vars.slice_mask = devinfo->slice_masks;
368    perf->sys_vars.n_eu_slices = devinfo->num_slices;
369 
370    perf->sys_vars.n_eu_slice0123 = 0;
371    for (int s = 0; s < MIN2(4, devinfo->max_slices); s++) {
372       if (!intel_device_info_slice_available(devinfo, s))
373          continue;
374 
375       for (int ss = 0; ss < devinfo->max_subslices_per_slice; ss++) {
376          if (!intel_device_info_subslice_available(devinfo, s, ss))
377             continue;
378 
379          for (int eu = 0; eu < devinfo->max_eus_per_subslice; eu++) {
380             if (intel_device_info_eu_available(devinfo, s, ss, eu))
381                perf->sys_vars.n_eu_slice0123++;
382          }
383       }
384    }
385 
386    perf->sys_vars.n_eu_sub_slices = intel_device_info_subslice_total(devinfo);
387    perf->sys_vars.n_eus = intel_device_info_eu_total(devinfo);
388 
389    /* The subslice mask builtin contains bits for all slices. Prior to Gfx11
390     * it had groups of 3bits for each slice, on Gfx11 and above it's 8bits for
391     * each slice.
392     *
393     * Ideally equations would be updated to have a slice/subslice query
394     * function/operator.
395     */
396    perf->sys_vars.subslice_mask = 0;
397 
398    int bits_per_subslice = devinfo->ver >= 11 ? 8 : 3;
399 
400    for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
401       for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
402          if (intel_device_info_subslice_available(devinfo, s, ss))
403             perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
404       }
405    }
406 }
407 
408 static bool
init_oa_sys_vars(struct intel_perf_config * perf,bool use_register_snapshots)409 init_oa_sys_vars(struct intel_perf_config *perf,
410                  bool use_register_snapshots)
411 {
412    uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
413 
414    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
415       if (!read_sysfs_drm_device_file_uint64(perf, "gt_min_freq_mhz", &min_freq_mhz))
416          return false;
417 
418       if (!read_sysfs_drm_device_file_uint64(perf,  "gt_max_freq_mhz", &max_freq_mhz))
419          return false;
420    } else {
421       min_freq_mhz = 300;
422       max_freq_mhz = 1000;
423    }
424 
425    memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
426    perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
427    perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
428    perf->sys_vars.query_mode = use_register_snapshots;
429    compute_topology_builtins(perf);
430 
431    return true;
432 }
433 
434 typedef void (*perf_register_oa_queries_t)(struct intel_perf_config *);
435 
436 static perf_register_oa_queries_t
get_register_queries_function(const struct intel_device_info * devinfo)437 get_register_queries_function(const struct intel_device_info *devinfo)
438 {
439    switch (devinfo->platform) {
440    case INTEL_PLATFORM_HSW:
441       return intel_oa_register_queries_hsw;
442    case INTEL_PLATFORM_CHV:
443       return intel_oa_register_queries_chv;
444    case INTEL_PLATFORM_BDW:
445       return intel_oa_register_queries_bdw;
446    case INTEL_PLATFORM_BXT:
447       return intel_oa_register_queries_bxt;
448    case INTEL_PLATFORM_SKL:
449       if (devinfo->gt == 2)
450          return intel_oa_register_queries_sklgt2;
451       if (devinfo->gt == 3)
452          return intel_oa_register_queries_sklgt3;
453       if (devinfo->gt == 4)
454          return intel_oa_register_queries_sklgt4;
455       return NULL;
456    case INTEL_PLATFORM_KBL:
457       if (devinfo->gt == 2)
458          return intel_oa_register_queries_kblgt2;
459       if (devinfo->gt == 3)
460          return intel_oa_register_queries_kblgt3;
461       return NULL;
462    case INTEL_PLATFORM_GLK:
463       return intel_oa_register_queries_glk;
464    case INTEL_PLATFORM_CFL:
465       if (devinfo->gt == 2)
466          return intel_oa_register_queries_cflgt2;
467       if (devinfo->gt == 3)
468          return intel_oa_register_queries_cflgt3;
469       return NULL;
470    case INTEL_PLATFORM_ICL:
471       return intel_oa_register_queries_icl;
472    case INTEL_PLATFORM_EHL:
473       return intel_oa_register_queries_ehl;
474    case INTEL_PLATFORM_TGL:
475       if (devinfo->gt == 1)
476          return intel_oa_register_queries_tglgt1;
477       if (devinfo->gt == 2)
478          return intel_oa_register_queries_tglgt2;
479       return NULL;
480    case INTEL_PLATFORM_RKL:
481       return intel_oa_register_queries_rkl;
482    case INTEL_PLATFORM_DG1:
483       return intel_oa_register_queries_dg1;
484    case INTEL_PLATFORM_ADL:
485    case INTEL_PLATFORM_RPL:
486       return intel_oa_register_queries_adl;
487    case INTEL_PLATFORM_DG2_G10:
488       return intel_oa_register_queries_acmgt3;
489    case INTEL_PLATFORM_DG2_G11:
490       return intel_oa_register_queries_acmgt1;
491    case INTEL_PLATFORM_DG2_G12:
492       return intel_oa_register_queries_acmgt2;
493    case INTEL_PLATFORM_MTL_U:
494    case INTEL_PLATFORM_MTL_H:
495       if (intel_device_info_eu_total(devinfo) <= 64)
496          return intel_oa_register_queries_mtlgt2;
497       if (intel_device_info_eu_total(devinfo) <= 128)
498          return intel_oa_register_queries_mtlgt3;
499       return NULL;
500    default:
501       return NULL;
502    }
503 }
504 
505 static int
intel_perf_compare_counter_names(const void * v1,const void * v2)506 intel_perf_compare_counter_names(const void *v1, const void *v2)
507 {
508    const struct intel_perf_query_counter *c1 = v1;
509    const struct intel_perf_query_counter *c2 = v2;
510 
511    return strcmp(c1->name, c2->name);
512 }
513 
514 static void
sort_query(struct intel_perf_query_info * q)515 sort_query(struct intel_perf_query_info *q)
516 {
517    qsort(q->counters, q->n_counters, sizeof(q->counters[0]),
518          intel_perf_compare_counter_names);
519 }
520 
521 static void
load_pipeline_statistic_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo)522 load_pipeline_statistic_metrics(struct intel_perf_config *perf_cfg,
523                                 const struct intel_device_info *devinfo)
524 {
525    struct intel_perf_query_info *query =
526       intel_perf_append_query_info(perf_cfg, MAX_STAT_COUNTERS);
527 
528    query->kind = INTEL_PERF_QUERY_TYPE_PIPELINE;
529    query->name = "Pipeline Statistics Registers";
530 
531    intel_perf_query_add_basic_stat_reg(query, IA_VERTICES_COUNT,
532                                        "N vertices submitted");
533    intel_perf_query_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
534                                        "N primitives submitted");
535    intel_perf_query_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
536                                        "N vertex shader invocations");
537 
538    if (devinfo->ver == 6) {
539       intel_perf_query_add_stat_reg(query, GFX6_SO_PRIM_STORAGE_NEEDED, 1, 1,
540                                     "SO_PRIM_STORAGE_NEEDED",
541                                     "N geometry shader stream-out primitives (total)");
542       intel_perf_query_add_stat_reg(query, GFX6_SO_NUM_PRIMS_WRITTEN, 1, 1,
543                                     "SO_NUM_PRIMS_WRITTEN",
544                                     "N geometry shader stream-out primitives (written)");
545    } else {
546       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
547                                     "SO_PRIM_STORAGE_NEEDED (Stream 0)",
548                                     "N stream-out (stream 0) primitives (total)");
549       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
550                                     "SO_PRIM_STORAGE_NEEDED (Stream 1)",
551                                     "N stream-out (stream 1) primitives (total)");
552       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
553                                     "SO_PRIM_STORAGE_NEEDED (Stream 2)",
554                                     "N stream-out (stream 2) primitives (total)");
555       intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
556                                     "SO_PRIM_STORAGE_NEEDED (Stream 3)",
557                                     "N stream-out (stream 3) primitives (total)");
558       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
559                                     "SO_NUM_PRIMS_WRITTEN (Stream 0)",
560                                     "N stream-out (stream 0) primitives (written)");
561       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
562                                     "SO_NUM_PRIMS_WRITTEN (Stream 1)",
563                                     "N stream-out (stream 1) primitives (written)");
564       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
565                                     "SO_NUM_PRIMS_WRITTEN (Stream 2)",
566                                     "N stream-out (stream 2) primitives (written)");
567       intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
568                                     "SO_NUM_PRIMS_WRITTEN (Stream 3)",
569                                     "N stream-out (stream 3) primitives (written)");
570    }
571 
572    intel_perf_query_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
573                                        "N TCS shader invocations");
574    intel_perf_query_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
575                                        "N TES shader invocations");
576 
577    intel_perf_query_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
578                                        "N geometry shader invocations");
579    intel_perf_query_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
580                                        "N geometry shader primitives emitted");
581 
582    intel_perf_query_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
583                                        "N primitives entering clipping");
584    intel_perf_query_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
585                                        "N primitives leaving clipping");
586 
587    if (devinfo->verx10 == 75 || devinfo->ver == 8) {
588       intel_perf_query_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
589                                     "N fragment shader invocations",
590                                     "N fragment shader invocations");
591    } else {
592       intel_perf_query_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
593                                           "N fragment shader invocations");
594    }
595 
596    intel_perf_query_add_basic_stat_reg(query, PS_DEPTH_COUNT,
597                                        "N z-pass fragments");
598 
599    if (devinfo->ver >= 7) {
600       intel_perf_query_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
601                                           "N compute shader invocations");
602    }
603 
604    query->data_size = sizeof(uint64_t) * query->n_counters;
605 
606    sort_query(query);
607 }
608 
609 static int
i915_perf_version(int drm_fd)610 i915_perf_version(int drm_fd)
611 {
612    int tmp = 0;
613    intel_gem_get_param(drm_fd, I915_PARAM_PERF_REVISION, &tmp);
614    return tmp;
615 }
616 
617 static void
i915_get_sseu(int drm_fd,struct drm_i915_gem_context_param_sseu * sseu)618 i915_get_sseu(int drm_fd, struct drm_i915_gem_context_param_sseu *sseu)
619 {
620    struct drm_i915_gem_context_param arg = {
621       .param = I915_CONTEXT_PARAM_SSEU,
622       .size = sizeof(*sseu),
623       .value = to_user_pointer(sseu)
624    };
625 
626    intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
627 }
628 
629 static inline int
compare_str_or_null(const char * s1,const char * s2)630 compare_str_or_null(const char *s1, const char *s2)
631 {
632    if (s1 == NULL && s2 == NULL)
633       return 0;
634    if (s1 == NULL)
635       return -1;
636    if (s2 == NULL)
637       return 1;
638 
639    return strcmp(s1, s2);
640 }
641 
642 static int
compare_counter_categories_and_names(const void * _c1,const void * _c2)643 compare_counter_categories_and_names(const void *_c1, const void *_c2)
644 {
645    const struct intel_perf_query_counter_info *c1 = (const struct intel_perf_query_counter_info *)_c1;
646    const struct intel_perf_query_counter_info *c2 = (const struct intel_perf_query_counter_info *)_c2;
647 
648    /* pipeline counters don't have an assigned category */
649    int r = compare_str_or_null(c1->counter->category, c2->counter->category);
650    if (r)
651       return r;
652 
653    return strcmp(c1->counter->name, c2->counter->name);
654 }
655 
656 static void
build_unique_counter_list(struct intel_perf_config * perf)657 build_unique_counter_list(struct intel_perf_config *perf)
658 {
659    size_t max_counters = 0;
660 
661    for (int q = 0; q < perf->n_queries; q++)
662       max_counters += perf->queries[q].n_counters;
663 
664    /*
665     * Allocate big enough array to hold maximum possible number of counters.
666     * We can't alloc it small and realloc when needed because the hash table
667     * below contains pointers to this array.
668     */
669    struct intel_perf_query_counter_info *counter_infos =
670          rzalloc_array_size(perf, sizeof(counter_infos[0]), max_counters);
671 
672    perf->n_counters = 0;
673 
674    struct hash_table *counters_table =
675       _mesa_hash_table_create(NULL,
676                               _mesa_hash_string,
677                               _mesa_key_string_equal);
678    struct hash_entry *entry;
679    for (int q = 0; q < perf->n_queries ; q++) {
680       struct intel_perf_query_info *query = &perf->queries[q];
681 
682       for (int c = 0; c < query->n_counters; c++) {
683          struct intel_perf_query_counter *counter;
684          struct intel_perf_query_counter_info *counter_info;
685 
686          counter = &query->counters[c];
687          entry = _mesa_hash_table_search(counters_table, counter->symbol_name);
688 
689          if (entry) {
690             counter_info = entry->data;
691             BITSET_SET(counter_info->query_mask, q);
692             continue;
693          }
694          assert(perf->n_counters < max_counters);
695 
696          counter_info = &counter_infos[perf->n_counters++];
697          counter_info->counter = counter;
698          BITSET_SET(counter_info->query_mask, q);
699 
700          counter_info->location.group_idx = q;
701          counter_info->location.counter_idx = c;
702 
703          _mesa_hash_table_insert(counters_table, counter->symbol_name, counter_info);
704       }
705    }
706 
707    _mesa_hash_table_destroy(counters_table, NULL);
708 
709    perf->counter_infos = counter_infos;
710 
711    qsort(perf->counter_infos, perf->n_counters, sizeof(perf->counter_infos[0]),
712          compare_counter_categories_and_names);
713 }
714 
715 static bool
oa_metrics_available(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo,bool use_register_snapshots)716 oa_metrics_available(struct intel_perf_config *perf, int fd,
717                      const struct intel_device_info *devinfo,
718                      bool use_register_snapshots)
719 {
720    perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
721    bool i915_perf_oa_available = false;
722    struct stat sb;
723 
724    /* TODO: Xe still don't have support for performance metrics */
725    if (devinfo->kmd_type != INTEL_KMD_TYPE_I915)
726       return false;
727 
728    perf->devinfo = *devinfo;
729 
730    /* Consider an invalid as supported. */
731    if (fd == -1) {
732       perf->i915_query_supported = true;
733       return true;
734    }
735 
736    perf->i915_query_supported = i915_query_perf_config_supported(perf, fd);
737    perf->enable_all_metrics = debug_get_bool_option("INTEL_EXTENDED_METRICS", false);
738    perf->i915_perf_version = i915_perf_version(fd);
739 
740    /* TODO: We should query this from i915 */
741    if (devinfo->verx10 >= 125)
742       perf->oa_timestamp_shift = 1;
743 
744    perf->oa_timestamp_mask =
745       0xffffffffffffffffull >> (32 + perf->oa_timestamp_shift);
746 
747    /* Record the default SSEU configuration. */
748    i915_get_sseu(fd, &perf->sseu);
749 
750    /* The existence of this sysctl parameter implies the kernel supports
751     * the i915 perf interface.
752     */
753    if (stat("/proc/sys/dev/i915/perf_stream_paranoid", &sb) == 0) {
754 
755       /* If _paranoid == 1 then on Gfx8+ we won't be able to access OA
756        * metrics unless running as root.
757        */
758       if (devinfo->platform == INTEL_PLATFORM_HSW)
759          i915_perf_oa_available = true;
760       else {
761          uint64_t paranoid = 1;
762 
763          read_file_uint64("/proc/sys/dev/i915/perf_stream_paranoid", &paranoid);
764 
765          if (paranoid == 0 || geteuid() == 0)
766             i915_perf_oa_available = true;
767       }
768 
769       perf->platform_supported = oa_register != NULL;
770    }
771 
772    return i915_perf_oa_available &&
773           oa_register &&
774           get_sysfs_dev_dir(perf, fd) &&
775           init_oa_sys_vars(perf, use_register_snapshots);
776 }
777 
778 static void
load_oa_metrics(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)779 load_oa_metrics(struct intel_perf_config *perf, int fd,
780                 const struct intel_device_info *devinfo)
781 {
782    int existing_queries = perf->n_queries;
783 
784    perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
785 
786    perf->oa_metrics_table =
787       _mesa_hash_table_create(perf, _mesa_hash_string,
788                               _mesa_key_string_equal);
789 
790    /* Index all the metric sets mesa knows about before looking to see what
791     * the kernel is advertising.
792     */
793    oa_register(perf);
794 
795    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
796       if (kernel_has_dynamic_config_support(perf, fd))
797          init_oa_configs(perf, fd, devinfo);
798       else
799          enumerate_sysfs_metrics(perf, devinfo);
800    } else {
801       add_all_metrics(perf, devinfo);
802    }
803 
804    /* sort counters in each individual group created by this function by name */
805    for (int i = existing_queries; i < perf->n_queries; ++i)
806       sort_query(&perf->queries[i]);
807 
808    /* Select a fallback OA metric. Look for the TestOa metric or use the last
809     * one if no present (on HSW).
810     */
811    for (int i = existing_queries; i < perf->n_queries; i++) {
812       if (perf->queries[i].symbol_name &&
813           strcmp(perf->queries[i].symbol_name, "TestOa") == 0) {
814          perf->fallback_raw_oa_metric = perf->queries[i].oa_metrics_set_id;
815          break;
816       }
817    }
818    if (perf->fallback_raw_oa_metric == 0 && perf->n_queries > 0)
819       perf->fallback_raw_oa_metric = perf->queries[perf->n_queries - 1].oa_metrics_set_id;
820 }
821 
822 struct intel_perf_registers *
intel_perf_load_configuration(struct intel_perf_config * perf_cfg,int fd,const char * guid)823 intel_perf_load_configuration(struct intel_perf_config *perf_cfg, int fd, const char *guid)
824 {
825    if (!perf_cfg->i915_query_supported)
826       return NULL;
827 
828    struct drm_i915_perf_oa_config i915_config = { 0, };
829    if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config))
830       return NULL;
831 
832    struct intel_perf_registers *config = rzalloc(NULL, struct intel_perf_registers);
833    config->n_flex_regs = i915_config.n_flex_regs;
834    config->flex_regs = rzalloc_array(config, struct intel_perf_query_register_prog, config->n_flex_regs);
835    config->n_mux_regs = i915_config.n_mux_regs;
836    config->mux_regs = rzalloc_array(config, struct intel_perf_query_register_prog, config->n_mux_regs);
837    config->n_b_counter_regs = i915_config.n_boolean_regs;
838    config->b_counter_regs = rzalloc_array(config, struct intel_perf_query_register_prog, config->n_b_counter_regs);
839 
840    /*
841     * struct intel_perf_query_register_prog maps exactly to the tuple of
842     * (register offset, register value) returned by the i915.
843     */
844    i915_config.flex_regs_ptr = to_const_user_pointer(config->flex_regs);
845    i915_config.mux_regs_ptr = to_const_user_pointer(config->mux_regs);
846    i915_config.boolean_regs_ptr = to_const_user_pointer(config->b_counter_regs);
847    if (!i915_query_perf_config_data(perf_cfg, fd, guid, &i915_config)) {
848       ralloc_free(config);
849       return NULL;
850    }
851 
852    return config;
853 }
854 
855 uint64_t
intel_perf_store_configuration(struct intel_perf_config * perf_cfg,int fd,const struct intel_perf_registers * config,const char * guid)856 intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
857                                const struct intel_perf_registers *config,
858                                const char *guid)
859 {
860    if (guid)
861       return i915_add_config(perf_cfg, fd, config, guid);
862 
863    struct mesa_sha1 sha1_ctx;
864    _mesa_sha1_init(&sha1_ctx);
865 
866    if (config->flex_regs) {
867       _mesa_sha1_update(&sha1_ctx, config->flex_regs,
868                         sizeof(config->flex_regs[0]) *
869                         config->n_flex_regs);
870    }
871    if (config->mux_regs) {
872       _mesa_sha1_update(&sha1_ctx, config->mux_regs,
873                         sizeof(config->mux_regs[0]) *
874                         config->n_mux_regs);
875    }
876    if (config->b_counter_regs) {
877       _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
878                         sizeof(config->b_counter_regs[0]) *
879                         config->n_b_counter_regs);
880    }
881 
882    uint8_t hash[20];
883    _mesa_sha1_final(&sha1_ctx, hash);
884 
885    char formatted_hash[41];
886    _mesa_sha1_format(formatted_hash, hash);
887 
888    char generated_guid[37];
889    snprintf(generated_guid, sizeof(generated_guid),
890             "%.8s-%.4s-%.4s-%.4s-%.12s",
891             &formatted_hash[0], &formatted_hash[8],
892             &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
893             &formatted_hash[8 + 4 + 4 + 4]);
894 
895    /* Check if already present. */
896    uint64_t id;
897    if (intel_perf_load_metric_id(perf_cfg, generated_guid, &id))
898       return id;
899 
900    return i915_add_config(perf_cfg, fd, config, generated_guid);
901 }
902 
903 static void
get_passes_mask(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,BITSET_WORD * queries_mask)904 get_passes_mask(struct intel_perf_config *perf,
905                 const uint32_t *counter_indices,
906                 uint32_t counter_indices_count,
907                 BITSET_WORD *queries_mask)
908 {
909    /* For each counter, look if it's already computed by a selected metric set
910     * or find one that can compute it.
911     */
912    for (uint32_t c = 0; c < counter_indices_count; c++) {
913       uint32_t counter_idx = counter_indices[c];
914       assert(counter_idx < perf->n_counters);
915 
916       const struct intel_perf_query_counter_info *counter_info =
917          &perf->counter_infos[counter_idx];
918 
919       /* Check if the counter is already computed by one of the selected
920        * metric set. If it is, there is nothing more to do with this counter.
921        */
922       uint32_t match = UINT32_MAX;
923       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
924          if (queries_mask[w] & counter_info->query_mask[w]) {
925             match = w * BITSET_WORDBITS + ffsll(queries_mask[w] & counter_info->query_mask[w]) - 1;
926             break;
927          }
928       }
929       if (match != UINT32_MAX)
930          continue;
931 
932       /* Now go through each metric set and find one that contains this
933        * counter.
934        */
935       bool found = false;
936       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
937          if (!counter_info->query_mask[w])
938             continue;
939 
940          uint32_t query_idx = w * BITSET_WORDBITS + ffsll(counter_info->query_mask[w]) - 1;
941 
942          /* Since we already looked for this in the query_mask, it should not
943           * be set.
944           */
945          assert(!BITSET_TEST(queries_mask, query_idx));
946 
947          BITSET_SET(queries_mask, query_idx);
948          found = true;
949          break;
950       }
951       assert(found);
952    }
953 }
954 
955 uint32_t
intel_perf_get_n_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_query_info ** pass_queries)956 intel_perf_get_n_passes(struct intel_perf_config *perf,
957                         const uint32_t *counter_indices,
958                         uint32_t counter_indices_count,
959                         struct intel_perf_query_info **pass_queries)
960 {
961    BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
962    BITSET_ZERO(queries_mask);
963 
964    get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
965 
966    if (pass_queries) {
967       uint32_t pass = 0;
968       for (uint32_t q = 0; q < perf->n_queries; q++) {
969          if (BITSET_TEST(queries_mask, q))
970             pass_queries[pass++] = &perf->queries[q];
971       }
972    }
973 
974    return BITSET_COUNT(queries_mask);
975 }
976 
977 void
intel_perf_get_counters_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_counter_pass * counter_pass)978 intel_perf_get_counters_passes(struct intel_perf_config *perf,
979                                const uint32_t *counter_indices,
980                                uint32_t counter_indices_count,
981                                struct intel_perf_counter_pass *counter_pass)
982 {
983    BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
984    BITSET_ZERO(queries_mask);
985 
986    get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
987    ASSERTED uint32_t n_passes = BITSET_COUNT(queries_mask);
988 
989    struct intel_perf_query_info **pass_array = calloc(perf->n_queries,
990                                                       sizeof(*pass_array));
991    uint32_t n_written_passes = 0;
992 
993    for (uint32_t i = 0; i < counter_indices_count; i++) {
994       assert(counter_indices[i] < perf->n_counters);
995 
996       uint32_t counter_idx = counter_indices[i];
997       counter_pass[i].counter = perf->counter_infos[counter_idx].counter;
998 
999       const struct intel_perf_query_counter_info *counter_info =
1000          &perf->counter_infos[counter_idx];
1001 
1002       uint32_t query_idx = UINT32_MAX;
1003       for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
1004          if (counter_info->query_mask[w] & queries_mask[w]) {
1005             query_idx = w * BITSET_WORDBITS +
1006                ffsll(counter_info->query_mask[w] & queries_mask[w]) - 1;
1007             break;
1008          }
1009       }
1010       assert(query_idx != UINT32_MAX);
1011 
1012       counter_pass[i].query = &perf->queries[query_idx];
1013 
1014       uint32_t pass_idx = UINT32_MAX;
1015       for (uint32_t p = 0; p < n_written_passes; p++) {
1016          if (pass_array[p] == counter_pass[i].query) {
1017             pass_idx = p;
1018             break;
1019          }
1020       }
1021 
1022       if (pass_idx == UINT32_MAX)
1023          pass_array[n_written_passes] = counter_pass[i].query;
1024 
1025       assert(n_written_passes <= n_passes);
1026    }
1027 
1028    free(pass_array);
1029 }
1030 
1031 /* Accumulate 32bits OA counters */
1032 static inline void
accumulate_uint32(const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)1033 accumulate_uint32(const uint32_t *report0,
1034                   const uint32_t *report1,
1035                   uint64_t *accumulator)
1036 {
1037    *accumulator += (uint32_t)(*report1 - *report0);
1038 }
1039 
1040 /* Accumulate 40bits OA counters */
1041 static inline void
accumulate_uint40(int a_index,const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)1042 accumulate_uint40(int a_index,
1043                   const uint32_t *report0,
1044                   const uint32_t *report1,
1045                   uint64_t *accumulator)
1046 {
1047    const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
1048    const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
1049    uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
1050    uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
1051    uint64_t value0 = report0[a_index + 4] | high0;
1052    uint64_t value1 = report1[a_index + 4] | high1;
1053    uint64_t delta;
1054 
1055    if (value0 > value1)
1056       delta = (1ULL << 40) + value1 - value0;
1057    else
1058       delta = value1 - value0;
1059 
1060    *accumulator += delta;
1061 }
1062 
1063 static void
gfx8_read_report_clock_ratios(const uint32_t * report,uint64_t * slice_freq_hz,uint64_t * unslice_freq_hz)1064 gfx8_read_report_clock_ratios(const uint32_t *report,
1065                               uint64_t *slice_freq_hz,
1066                               uint64_t *unslice_freq_hz)
1067 {
1068    /* The lower 16bits of the RPT_ID field of the OA reports contains a
1069     * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
1070     * divided this way :
1071     *
1072     * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
1073     * RPT_ID[10:9]:  RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
1074     * RPT_ID[8:0]:   RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
1075     *
1076     * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1077     *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1078     *
1079     * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1080     *                        Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1081     */
1082 
1083    uint32_t unslice_freq = report[0] & 0x1ff;
1084    uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1085    uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1086    uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1087 
1088    *slice_freq_hz = slice_freq * 16666667ULL;
1089    *unslice_freq_hz = unslice_freq * 16666667ULL;
1090 }
1091 
1092 void
intel_perf_query_result_read_frequencies(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t * start,const uint32_t * end)1093 intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
1094                                          const struct intel_device_info *devinfo,
1095                                          const uint32_t *start,
1096                                          const uint32_t *end)
1097 {
1098    /* Slice/Unslice frequency is only available in the OA reports when the
1099     * "Disable OA reports due to clock ratio change" field in
1100     * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1101     * global register (see drivers/gpu/drm/i915/i915_perf.c)
1102     *
1103     * Documentation says this should be available on Gfx9+ but experimentation
1104     * shows that Gfx8 reports similar values, so we enable it there too.
1105     */
1106    if (devinfo->ver < 8)
1107       return;
1108 
1109    gfx8_read_report_clock_ratios(start,
1110                                  &result->slice_frequency[0],
1111                                  &result->unslice_frequency[0]);
1112    gfx8_read_report_clock_ratios(end,
1113                                  &result->slice_frequency[1],
1114                                  &result->unslice_frequency[1]);
1115 }
1116 
1117 static inline bool
can_use_mi_rpc_bc_counters(const struct intel_device_info * devinfo)1118 can_use_mi_rpc_bc_counters(const struct intel_device_info *devinfo)
1119 {
1120    return devinfo->ver <= 11;
1121 }
1122 
1123 uint64_t
intel_perf_report_timestamp(const struct intel_perf_query_info * query,const uint32_t * report)1124 intel_perf_report_timestamp(const struct intel_perf_query_info *query,
1125                             const uint32_t *report)
1126 {
1127    return report[1] >> query->perf->oa_timestamp_shift;
1128 }
1129 
1130 void
intel_perf_query_result_accumulate(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint32_t * start,const uint32_t * end)1131 intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
1132                                    const struct intel_perf_query_info *query,
1133                                    const uint32_t *start,
1134                                    const uint32_t *end)
1135 {
1136    int i;
1137 
1138    if (result->hw_id == INTEL_PERF_INVALID_CTX_ID &&
1139        start[2] != INTEL_PERF_INVALID_CTX_ID)
1140       result->hw_id = start[2];
1141    if (result->reports_accumulated == 0)
1142       result->begin_timestamp = intel_perf_report_timestamp(query, start);
1143    result->end_timestamp = intel_perf_report_timestamp(query, end);
1144    result->reports_accumulated++;
1145 
1146    switch (query->oa_format) {
1147    case I915_OA_FORMAT_A24u40_A14u32_B8_C8:
1148       result->accumulator[query->gpu_time_offset] =
1149          intel_perf_report_timestamp(query, end) -
1150          intel_perf_report_timestamp(query, start);
1151 
1152       accumulate_uint32(start + 3, end + 3,
1153                         result->accumulator + query->gpu_clock_offset); /* clock */
1154 
1155       /* A0-A3 counters are 32bits */
1156       for (i = 0; i < 4; i++) {
1157          accumulate_uint32(start + 4 + i, end + 4 + i,
1158                            result->accumulator + query->a_offset + i);
1159       }
1160 
1161       /* A4-A23 counters are 40bits */
1162       for (i = 4; i < 24; i++) {
1163          accumulate_uint40(i, start, end,
1164                            result->accumulator + query->a_offset + i);
1165       }
1166 
1167       /* A24-27 counters are 32bits */
1168       for (i = 0; i < 4; i++) {
1169          accumulate_uint32(start + 28 + i, end + 28 + i,
1170                            result->accumulator + query->a_offset + 24 + i);
1171       }
1172 
1173       /* A28-31 counters are 40bits */
1174       for (i = 28; i < 32; i++) {
1175          accumulate_uint40(i, start, end,
1176                            result->accumulator + query->a_offset + i);
1177       }
1178 
1179       /* A32-35 counters are 32bits */
1180       for (i = 0; i < 4; i++) {
1181          accumulate_uint32(start + 36 + i, end + 36 + i,
1182                            result->accumulator + query->a_offset + 32 + i);
1183       }
1184 
1185       if (can_use_mi_rpc_bc_counters(&query->perf->devinfo) ||
1186           !query->perf->sys_vars.query_mode) {
1187          /* A36-37 counters are 32bits */
1188          accumulate_uint32(start + 40, end + 40,
1189                            result->accumulator + query->a_offset + 36);
1190          accumulate_uint32(start + 46, end + 46,
1191                            result->accumulator + query->a_offset + 37);
1192 
1193          /* 8x 32bit B counters */
1194          for (i = 0; i < 8; i++) {
1195             accumulate_uint32(start + 48 + i, end + 48 + i,
1196                               result->accumulator + query->b_offset + i);
1197          }
1198 
1199          /* 8x 32bit C counters... */
1200          for (i = 0; i < 8; i++) {
1201             accumulate_uint32(start + 56 + i, end + 56 + i,
1202                               result->accumulator + query->c_offset + i);
1203          }
1204       }
1205       break;
1206 
1207    case I915_OA_FORMAT_A32u40_A4u32_B8_C8:
1208       result->accumulator[query->gpu_time_offset] =
1209          intel_perf_report_timestamp(query, end) -
1210          intel_perf_report_timestamp(query, start);
1211 
1212       accumulate_uint32(start + 3, end + 3,
1213                         result->accumulator + query->gpu_clock_offset); /* clock */
1214 
1215       /* 32x 40bit A counters... */
1216       for (i = 0; i < 32; i++) {
1217          accumulate_uint40(i, start, end,
1218                            result->accumulator + query->a_offset + i);
1219       }
1220 
1221       /* 4x 32bit A counters... */
1222       for (i = 0; i < 4; i++) {
1223          accumulate_uint32(start + 36 + i, end + 36 + i,
1224                            result->accumulator + query->a_offset + 32 + i);
1225       }
1226 
1227       if (can_use_mi_rpc_bc_counters(&query->perf->devinfo) ||
1228           !query->perf->sys_vars.query_mode) {
1229          /* 8x 32bit B counters */
1230          for (i = 0; i < 8; i++) {
1231             accumulate_uint32(start + 48 + i, end + 48 + i,
1232                               result->accumulator + query->b_offset + i);
1233          }
1234 
1235          /* 8x 32bit C counters... */
1236          for (i = 0; i < 8; i++) {
1237             accumulate_uint32(start + 56 + i, end + 56 + i,
1238                               result->accumulator + query->c_offset + i);
1239          }
1240       }
1241       break;
1242 
1243    case I915_OA_FORMAT_A45_B8_C8:
1244       result->accumulator[query->gpu_time_offset] =
1245          intel_perf_report_timestamp(query, end) -
1246          intel_perf_report_timestamp(query, start);
1247 
1248       for (i = 0; i < 61; i++) {
1249          accumulate_uint32(start + 3 + i, end + 3 + i,
1250                            result->accumulator + query->a_offset + i);
1251       }
1252       break;
1253 
1254    default:
1255       unreachable("Can't accumulate OA counters in unknown format");
1256    }
1257 
1258 }
1259 
1260 #define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
1261 
1262 void
intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t start,const uint32_t end)1263 intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
1264                                           const struct intel_device_info *devinfo,
1265                                           const uint32_t start,
1266                                           const uint32_t end)
1267 {
1268    switch (devinfo->ver) {
1269    case 7:
1270    case 8:
1271       result->gt_frequency[0] = GET_FIELD(start, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1272       result->gt_frequency[1] = GET_FIELD(end, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1273       break;
1274    case 9:
1275    case 11:
1276    case 12:
1277       result->gt_frequency[0] = GET_FIELD(start, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1278       result->gt_frequency[1] = GET_FIELD(end, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1279       break;
1280    default:
1281       unreachable("unexpected gen");
1282    }
1283 
1284    /* Put the numbers into Hz. */
1285    result->gt_frequency[0] *= 1000000ULL;
1286    result->gt_frequency[1] *= 1000000ULL;
1287 }
1288 
1289 void
intel_perf_query_result_read_perfcnts(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint64_t * start,const uint64_t * end)1290 intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
1291                                       const struct intel_perf_query_info *query,
1292                                       const uint64_t *start,
1293                                       const uint64_t *end)
1294 {
1295    for (uint32_t i = 0; i < 2; i++) {
1296       uint64_t v0 = start[i] & PERF_CNT_VALUE_MASK;
1297       uint64_t v1 = end[i] & PERF_CNT_VALUE_MASK;
1298 
1299       result->accumulator[query->perfcnt_offset + i] = v0 > v1 ?
1300          (PERF_CNT_VALUE_MASK + 1 + v1 - v0) :
1301          (v1 - v0);
1302    }
1303 }
1304 
1305 static uint32_t
query_accumulator_offset(const struct intel_perf_query_info * query,enum intel_perf_query_field_type type,uint8_t index)1306 query_accumulator_offset(const struct intel_perf_query_info *query,
1307                          enum intel_perf_query_field_type type,
1308                          uint8_t index)
1309 {
1310    switch (type) {
1311    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1312       return query->perfcnt_offset + index;
1313    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1314       return query->a_offset + index;
1315    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1316       return query->b_offset + index;
1317    case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1318       return query->c_offset + index;
1319    default:
1320       unreachable("Invalid register type");
1321       return 0;
1322    }
1323 }
1324 
1325 void
intel_perf_query_result_accumulate_fields(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const void * start,const void * end,bool no_oa_accumulate)1326 intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
1327                                           const struct intel_perf_query_info *query,
1328                                           const void *start,
1329                                           const void *end,
1330                                           bool no_oa_accumulate)
1331 {
1332    const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1333    const struct intel_device_info *devinfo = &query->perf->devinfo;
1334 
1335    for (uint32_t r = 0; r < layout->n_fields; r++) {
1336       const struct intel_perf_query_field *field = &layout->fields[r];
1337 
1338       if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC) {
1339          intel_perf_query_result_read_frequencies(result, devinfo,
1340                                                 start + field->location,
1341                                                 end + field->location);
1342          /* no_oa_accumulate=true is used when doing GL perf queries, we
1343           * manually parse the OA reports from the OA buffer and subtract
1344           * unrelated deltas, so don't accumulate the begin/end reports here.
1345           */
1346          if (!no_oa_accumulate) {
1347             intel_perf_query_result_accumulate(result, query,
1348                                                start + field->location,
1349                                                end + field->location);
1350          }
1351       } else {
1352          uint64_t v0, v1;
1353 
1354          if (field->size == 4) {
1355             v0 = *(const uint32_t *)(start + field->location);
1356             v1 = *(const uint32_t *)(end + field->location);
1357          } else {
1358             assert(field->size == 8);
1359             v0 = *(const uint64_t *)(start + field->location);
1360             v1 = *(const uint64_t *)(end + field->location);
1361          }
1362 
1363          if (field->mask) {
1364             v0 = field->mask & v0;
1365             v1 = field->mask & v1;
1366          }
1367 
1368          /* RPSTAT is a bit of a special case because its begin/end values
1369           * represent frequencies. We store it in a separate location.
1370           */
1371          if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT)
1372             intel_perf_query_result_read_gt_frequency(result, devinfo, v0, v1);
1373          else
1374             result->accumulator[query_accumulator_offset(query, field->type, field->index)] = v1 - v0;
1375       }
1376    }
1377 }
1378 
1379 void
intel_perf_query_result_clear(struct intel_perf_query_result * result)1380 intel_perf_query_result_clear(struct intel_perf_query_result *result)
1381 {
1382    memset(result, 0, sizeof(*result));
1383    result->hw_id = INTEL_PERF_INVALID_CTX_ID;
1384 }
1385 
1386 void
intel_perf_query_result_print_fields(const struct intel_perf_query_info * query,const void * data)1387 intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
1388                                      const void *data)
1389 {
1390    const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1391 
1392    for (uint32_t r = 0; r < layout->n_fields; r++) {
1393       const struct intel_perf_query_field *field = &layout->fields[r];
1394       const uint32_t *value32 = data + field->location;
1395 
1396       switch (field->type) {
1397       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1398          fprintf(stderr, "MI_RPC:\n");
1399          fprintf(stderr, "  TS: 0x%08x\n", *(value32 + 1));
1400          fprintf(stderr, "  CLK: 0x%08x\n", *(value32 + 3));
1401          break;
1402       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1403          fprintf(stderr, "A%u: 0x%08x\n", field->index, *value32);
1404          break;
1405       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1406          fprintf(stderr, "B%u: 0x%08x\n", field->index, *value32);
1407          break;
1408       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1409          fprintf(stderr, "C%u: 0x%08x\n", field->index, *value32);
1410          break;
1411       default:
1412          break;
1413       }
1414    }
1415 }
1416 
1417 static int
intel_perf_compare_query_names(const void * v1,const void * v2)1418 intel_perf_compare_query_names(const void *v1, const void *v2)
1419 {
1420    const struct intel_perf_query_info *q1 = v1;
1421    const struct intel_perf_query_info *q2 = v2;
1422 
1423    return strcmp(q1->name, q2->name);
1424 }
1425 
1426 static inline struct intel_perf_query_field *
add_query_register(struct intel_perf_query_field_layout * layout,enum intel_perf_query_field_type type,uint16_t offset,uint16_t size,uint8_t index)1427 add_query_register(struct intel_perf_query_field_layout *layout,
1428                    enum intel_perf_query_field_type type,
1429                    uint16_t offset,
1430                    uint16_t size,
1431                    uint8_t index)
1432 {
1433    /* Align MI_RPC to 64bytes (HW requirement) & 64bit registers to 8bytes
1434     * (shows up nicely in the debugger).
1435     */
1436    if (type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC)
1437       layout->size = align(layout->size, 64);
1438    else if (size % 8 == 0)
1439       layout->size = align(layout->size, 8);
1440 
1441    layout->fields[layout->n_fields++] = (struct intel_perf_query_field) {
1442       .mmio_offset = offset,
1443       .location = layout->size,
1444       .type = type,
1445       .index = index,
1446       .size = size,
1447    };
1448    layout->size += size;
1449 
1450    return &layout->fields[layout->n_fields - 1];
1451 }
1452 
1453 static void
intel_perf_init_query_fields(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,bool use_register_snapshots)1454 intel_perf_init_query_fields(struct intel_perf_config *perf_cfg,
1455                              const struct intel_device_info *devinfo,
1456                              bool use_register_snapshots)
1457 {
1458    struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
1459 
1460    layout->n_fields = 0;
1461 
1462    /* MI_RPC requires a 64byte alignment. */
1463    layout->alignment = 64;
1464 
1465    layout->fields = rzalloc_array(perf_cfg, struct intel_perf_query_field, 5 + 16);
1466 
1467    add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC,
1468                       0, 256, 0);
1469 
1470    if (use_register_snapshots) {
1471       if (devinfo->ver <= 11) {
1472          struct intel_perf_query_field *field =
1473             add_query_register(layout,
1474                                INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1475                                PERF_CNT_1_DW0, 8, 0);
1476          field->mask = PERF_CNT_VALUE_MASK;
1477 
1478          field = add_query_register(layout,
1479                                     INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1480                                     PERF_CNT_2_DW0, 8, 1);
1481          field->mask = PERF_CNT_VALUE_MASK;
1482       }
1483 
1484       if (devinfo->ver == 8 && devinfo->platform != INTEL_PLATFORM_CHV) {
1485          add_query_register(layout,
1486                          INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1487                             GFX7_RPSTAT1, 4, 0);
1488       }
1489 
1490       if (devinfo->ver >= 9) {
1491          add_query_register(layout,
1492                             INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1493                             GFX9_RPSTAT0, 4, 0);
1494       }
1495 
1496       if (!can_use_mi_rpc_bc_counters(devinfo)) {
1497          if (devinfo->ver >= 8 && devinfo->ver <= 11) {
1498             for (uint32_t i = 0; i < GFX8_N_OA_PERF_B32; i++) {
1499                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1500                                   GFX8_OA_PERF_B32(i), 4, i);
1501             }
1502             for (uint32_t i = 0; i < GFX8_N_OA_PERF_C32; i++) {
1503                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1504                                   GFX8_OA_PERF_C32(i), 4, i);
1505             }
1506          } else if (devinfo->verx10 == 120) {
1507             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1508                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1509                                   GFX12_OAG_PERF_B32(i), 4, i);
1510             }
1511             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1512                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1513                                   GFX12_OAG_PERF_C32(i), 4, i);
1514             }
1515          } else if (devinfo->verx10 == 125) {
1516             add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1517                                GFX125_OAG_PERF_A36, 4, 36);
1518             add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1519                                GFX125_OAG_PERF_A37, 4, 37);
1520             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1521                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1522                                   GFX12_OAG_PERF_B32(i), 4, i);
1523             }
1524             for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1525                add_query_register(layout, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1526                                   GFX12_OAG_PERF_C32(i), 4, i);
1527             }
1528          }
1529       }
1530    }
1531 
1532    /* Align the whole package to 64bytes so that 2 snapshots can be put
1533     * together without extract alignment for the user.
1534     */
1535    layout->size = align(layout->size, 64);
1536 }
1537 
1538 void
intel_perf_init_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,int drm_fd,bool include_pipeline_statistics,bool use_register_snapshots)1539 intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
1540                         const struct intel_device_info *devinfo,
1541                         int drm_fd,
1542                         bool include_pipeline_statistics,
1543                         bool use_register_snapshots)
1544 {
1545    intel_perf_init_query_fields(perf_cfg, devinfo, use_register_snapshots);
1546 
1547    if (include_pipeline_statistics) {
1548       load_pipeline_statistic_metrics(perf_cfg, devinfo);
1549       intel_perf_register_mdapi_statistic_query(perf_cfg, devinfo);
1550    }
1551 
1552    bool oa_metrics = oa_metrics_available(perf_cfg, drm_fd, devinfo,
1553                                           use_register_snapshots);
1554    if (oa_metrics)
1555       load_oa_metrics(perf_cfg, drm_fd, devinfo);
1556 
1557    /* sort query groups by name */
1558    qsort(perf_cfg->queries, perf_cfg->n_queries,
1559          sizeof(perf_cfg->queries[0]), intel_perf_compare_query_names);
1560 
1561    build_unique_counter_list(perf_cfg);
1562 
1563    if (oa_metrics)
1564       intel_perf_register_mdapi_oa_query(perf_cfg, devinfo);
1565 }
1566