• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef INTEL_PERF_H
25 #define INTEL_PERF_H
26 
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <stdint.h>
30 #include <string.h>
31 
32 #if defined(MAJOR_IN_SYSMACROS)
33 #include <sys/sysmacros.h>
34 #elif defined(MAJOR_IN_MKDEV)
35 #include <sys/mkdev.h>
36 #endif
37 
38 #include "compiler/glsl/list.h"
39 #include "dev/intel_device_info.h"
40 #include "util/bitscan.h"
41 #include "util/bitset.h"
42 #include "util/hash_table.h"
43 #include "util/ralloc.h"
44 
45 #include "drm-uapi/i915_drm.h"
46 
47 #define INTEL_PERF_MAX_METRIC_SETS (1500)
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 struct intel_perf_config;
54 struct intel_perf_query_info;
55 
56 #define INTEL_PERF_INVALID_CTX_ID (0xffffffff)
57 
58 enum ENUM_PACKED intel_perf_counter_type {
59    INTEL_PERF_COUNTER_TYPE_EVENT,
60    INTEL_PERF_COUNTER_TYPE_DURATION_NORM,
61    INTEL_PERF_COUNTER_TYPE_DURATION_RAW,
62    INTEL_PERF_COUNTER_TYPE_THROUGHPUT,
63    INTEL_PERF_COUNTER_TYPE_RAW,
64    INTEL_PERF_COUNTER_TYPE_TIMESTAMP,
65 };
66 
67 enum ENUM_PACKED intel_perf_counter_data_type {
68    INTEL_PERF_COUNTER_DATA_TYPE_BOOL32,
69    INTEL_PERF_COUNTER_DATA_TYPE_UINT32,
70    INTEL_PERF_COUNTER_DATA_TYPE_UINT64,
71    INTEL_PERF_COUNTER_DATA_TYPE_FLOAT,
72    INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE,
73 };
74 
75 enum ENUM_PACKED intel_perf_counter_units {
76    /* size */
77    INTEL_PERF_COUNTER_UNITS_BYTES,
78    INTEL_PERF_COUNTER_UNITS_GBPS,
79 
80    /* frequency */
81    INTEL_PERF_COUNTER_UNITS_HZ,
82 
83    /* time */
84    INTEL_PERF_COUNTER_UNITS_NS,
85    INTEL_PERF_COUNTER_UNITS_US,
86 
87    /**/
88    INTEL_PERF_COUNTER_UNITS_PIXELS,
89    INTEL_PERF_COUNTER_UNITS_TEXELS,
90    INTEL_PERF_COUNTER_UNITS_THREADS,
91    INTEL_PERF_COUNTER_UNITS_PERCENT,
92 
93    /* events */
94    INTEL_PERF_COUNTER_UNITS_MESSAGES,
95    INTEL_PERF_COUNTER_UNITS_NUMBER,
96    INTEL_PERF_COUNTER_UNITS_CYCLES,
97    INTEL_PERF_COUNTER_UNITS_EVENTS,
98    INTEL_PERF_COUNTER_UNITS_UTILIZATION,
99 
100    /**/
101    INTEL_PERF_COUNTER_UNITS_EU_SENDS_TO_L3_CACHE_LINES,
102    INTEL_PERF_COUNTER_UNITS_EU_ATOMIC_REQUESTS_TO_L3_CACHE_LINES,
103    INTEL_PERF_COUNTER_UNITS_EU_REQUESTS_TO_L3_CACHE_LINES,
104    INTEL_PERF_COUNTER_UNITS_EU_BYTES_PER_L3_CACHE_LINE,
105 
106    INTEL_PERF_COUNTER_UNITS_MAX
107 };
108 
109 struct intel_pipeline_stat {
110    uint32_t reg;
111    uint32_t numerator;
112    uint32_t denominator;
113 };
114 
115 /*
116  * The largest OA formats we can use include:
117  * For Haswell:
118  *   1 timestamp, 45 A counters, 8 B counters and 8 C counters.
119  * For Gfx8+
120  *   1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
121  *
122  * Plus 2 PERF_CNT registers and 1 RPSTAT register.
123  */
124 #define MAX_OA_REPORT_COUNTERS (62 + 2 + 1)
125 
126 /*
127  * When currently allocate only one page for pipeline statistics queries. Here
128  * we derived the maximum number of counters for that amount.
129  */
130 #define STATS_BO_SIZE               4096
131 #define STATS_BO_END_OFFSET_BYTES   (STATS_BO_SIZE / 2)
132 #define MAX_STAT_COUNTERS           (STATS_BO_END_OFFSET_BYTES / 8)
133 
134 #define I915_PERF_OA_SAMPLE_SIZE (8 +   /* drm_i915_perf_record_header */ \
135                                   256)  /* OA counter report */
136 
137 struct intel_perf_query_result {
138    /**
139     * Storage for the final accumulated OA counters.
140     */
141    uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
142 
143    /**
144     * Hw ID used by the context on which the query was running.
145     */
146    uint32_t hw_id;
147 
148    /**
149     * Number of reports accumulated to produce the results.
150     */
151    uint32_t reports_accumulated;
152 
153    /**
154     * Frequency in the slices of the GT at the begin and end of the
155     * query.
156     */
157    uint64_t slice_frequency[2];
158 
159    /**
160     * Frequency in the unslice of the GT at the begin and end of the
161     * query.
162     */
163    uint64_t unslice_frequency[2];
164 
165    /**
166     * Frequency of the whole GT at the begin and end of the query.
167     */
168    uint64_t gt_frequency[2];
169 
170    /**
171     * Timestamp of the query.
172     */
173    uint64_t begin_timestamp;
174 
175    /**
176     * Timestamp of the query.
177     */
178    uint64_t end_timestamp;
179 
180    /**
181     * Whether the query was interrupted by another workload (aka preemption).
182     */
183    bool query_disjoint;
184 };
185 
186 typedef uint64_t (*intel_counter_read_uint64_t)(struct intel_perf_config *perf,
187                                                 const struct intel_perf_query_info *query,
188                                                 const struct intel_perf_query_result *results);
189 
190 typedef float (*intel_counter_read_float_t)(struct intel_perf_config *perf,
191                                             const struct intel_perf_query_info *query,
192                                             const struct intel_perf_query_result *results);
193 
194 struct intel_perf_query_counter {
195    const char *name;
196    const char *desc;
197    const char *symbol_name;
198    const char *category;
199    enum intel_perf_counter_type type;
200    enum intel_perf_counter_data_type data_type;
201    enum intel_perf_counter_units units;
202    size_t offset;
203 
204    union {
205       intel_counter_read_uint64_t oa_counter_max_uint64;
206       intel_counter_read_float_t  oa_counter_max_float;
207    };
208 
209    union {
210       intel_counter_read_uint64_t oa_counter_read_uint64;
211       intel_counter_read_float_t  oa_counter_read_float;
212       struct intel_pipeline_stat pipeline_stat;
213    };
214 };
215 
216 struct intel_perf_query_register_prog {
217    uint32_t reg;
218    uint32_t val;
219 };
220 
221 /* Register programming for a given query */
222 struct intel_perf_registers {
223    const struct intel_perf_query_register_prog *flex_regs;
224    uint32_t n_flex_regs;
225 
226    const struct intel_perf_query_register_prog *mux_regs;
227    uint32_t n_mux_regs;
228 
229    const struct intel_perf_query_register_prog *b_counter_regs;
230    uint32_t n_b_counter_regs;
231 };
232 
233 struct intel_perf_query_info {
234    struct intel_perf_config *perf;
235 
236    enum intel_perf_query_type {
237       INTEL_PERF_QUERY_TYPE_OA,
238       INTEL_PERF_QUERY_TYPE_RAW,
239       INTEL_PERF_QUERY_TYPE_PIPELINE,
240    } kind;
241    const char *name;
242    const char *symbol_name;
243    const char *guid;
244    struct intel_perf_query_counter *counters;
245    int n_counters;
246    int max_counters;
247    size_t data_size;
248 
249    /* OA specific */
250    uint64_t oa_metrics_set_id;
251    int oa_format;
252 
253    /* For indexing into the accumulator[] ... */
254    int gpu_time_offset;
255    int gpu_clock_offset;
256    int a_offset;
257    int b_offset;
258    int c_offset;
259    int perfcnt_offset;
260    int rpstat_offset;
261 
262    struct intel_perf_registers config;
263 };
264 
265 /* When not using the MI_RPC command, this structure describes the list of
266  * register offsets as well as their storage location so that they can be
267  * stored through a series of MI_SRM commands and accumulated with
268  * intel_perf_query_result_accumulate_snapshots().
269  */
270 struct intel_perf_query_field_layout {
271    /* Alignment for the layout */
272    uint32_t alignment;
273 
274    /* Size of the whole layout */
275    uint32_t size;
276 
277    uint32_t n_fields;
278 
279    struct intel_perf_query_field {
280       /* MMIO location of this register */
281       uint16_t mmio_offset;
282 
283       /* Location of this register in the storage */
284       uint16_t location;
285 
286       /* Type of register, for accumulation (see intel_perf_query_info:*_offset
287        * fields)
288        */
289       enum intel_perf_query_field_type {
290          INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC,
291          INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
292          INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
293          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
294          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
295          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
296       } type;
297 
298       /* Index of register in the given type (for instance A31 or B2,
299        * etc...)
300        */
301       uint8_t index;
302 
303       /* 4, 8 or 256 */
304       uint16_t size;
305 
306       /* If not 0, mask to apply to the register value. */
307       uint64_t mask;
308    } *fields;
309 };
310 
311 struct intel_perf_query_counter_info {
312    struct intel_perf_query_counter *counter;
313 
314    BITSET_DECLARE(query_mask, INTEL_PERF_MAX_METRIC_SETS);
315 
316    /**
317     * Each counter can be a part of many groups, each time at different index.
318     * This struct stores one of those locations.
319     */
320    struct {
321       int group_idx; /* query/group number */
322       int counter_idx; /* index inside of query/group */
323    } location;
324 };
325 
326 struct intel_perf_config {
327    /* Whether i915 has DRM_I915_QUERY_PERF_CONFIG support. */
328    bool i915_query_supported;
329 
330    /* Have extended metrics been enabled */
331    bool enable_all_metrics;
332 
333    /* Version of the i915-perf subsystem, refer to i915_drm.h. */
334    int i915_perf_version;
335 
336    /* Number of bits to shift the OA timestamp values by to match the ring
337     * timestamp.
338     */
339    int oa_timestamp_shift;
340 
341    /* Mask of bits valid from the OA report (for instance you might have the
342     * lower 31 bits [30:0] of timestamp value). This is useful if you want to
343     * recombine a full timestamp value captured from the CPU with OA
344     * timestamps captured on the device but that only include 31bits of data.
345     */
346    uint64_t oa_timestamp_mask;
347 
348    /* Powergating configuration for the running the query. */
349    struct drm_i915_gem_context_param_sseu sseu;
350 
351    struct intel_perf_query_info *queries;
352    int n_queries;
353 
354    struct intel_perf_query_counter_info *counter_infos;
355    int n_counters;
356 
357    struct intel_perf_query_field_layout query_layout;
358 
359    /* Variables referenced in the XML meta data for OA performance
360     * counters, e.g in the normalization equations.
361     *
362     * All uint64_t for consistent operand types in generated code
363     */
364    struct {
365       uint64_t n_eus;               /** $EuCoresTotalCount */
366       uint64_t n_eu_slices;         /** $EuSlicesTotalCount */
367       uint64_t n_eu_sub_slices;     /** $EuSubslicesTotalCount */
368       uint64_t n_eu_slice0123;      /** $EuDualSubslicesSlice0123Count */
369       uint64_t slice_mask;          /** $SliceMask */
370       uint64_t subslice_mask;       /** $SubsliceMask */
371       uint64_t gt_min_freq;         /** $GpuMinFrequency */
372       uint64_t gt_max_freq;         /** $GpuMaxFrequency */
373       bool     query_mode;          /** $QueryMode */
374    } sys_vars;
375 
376    struct intel_device_info devinfo;
377 
378    /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
379     * cross-reference with the GUIDs of configs advertised by the kernel at
380     * runtime
381     */
382    struct hash_table *oa_metrics_table;
383 
384    /* When MDAPI hasn't configured the metric we need to use by the time the
385     * query begins, this OA metric is used as a fallback.
386     */
387    uint64_t fallback_raw_oa_metric;
388 
389    /* Whether we have support for this platform. If true && n_queries == 0,
390     * this means we will not be able to use i915-perf because of it is in
391     * paranoid mode.
392     */
393    bool platform_supported;
394 
395    /* Location of the device's sysfs entry. */
396    char sysfs_dev_dir[256];
397 
398    struct {
399       void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
400       void (*bo_unreference)(void *bo);
401       void *(*bo_map)(void *ctx, void *bo, unsigned flags);
402       void (*bo_unmap)(void *bo);
403       bool (*batch_references)(void *batch, void *bo);
404       void (*bo_wait_rendering)(void *bo);
405       int (*bo_busy)(void *bo);
406       void (*emit_stall_at_pixel_scoreboard)(void *ctx);
407       void (*emit_mi_report_perf_count)(void *ctx,
408                                         void *bo,
409                                         uint32_t offset_in_bytes,
410                                         uint32_t report_id);
411       void (*batchbuffer_flush)(void *ctx,
412                                 const char *file, int line);
413       void (*store_register_mem)(void *ctx, void *bo, uint32_t reg, uint32_t reg_size, uint32_t offset);
414 
415    } vtbl;
416 };
417 
418 struct intel_perf_counter_pass {
419    struct intel_perf_query_info *query;
420    struct intel_perf_query_counter *counter;
421 };
422 
423 /** Initialize the intel_perf_config object for a given device.
424  *
425  *    include_pipeline_statistics : Whether to add a pipeline statistic query
426  *                                  intel_perf_query_info object
427  *
428  *    use_register_snapshots : Whether the queries should include counters
429  *                             that rely on register snapshots using command
430  *                             streamer instructions (not possible when using
431  *                             only the OA buffer data).
432  */
433 void intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
434                              const struct intel_device_info *devinfo,
435                              int drm_fd,
436                              bool include_pipeline_statistics,
437                              bool use_register_snapshots);
438 
439 /** Query i915 for a metric id using guid.
440  */
441 bool intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
442                                const char *guid,
443                                uint64_t *metric_id);
444 
445 /** Load a configuation's content from i915 using a guid.
446  */
447 struct intel_perf_registers *intel_perf_load_configuration(struct intel_perf_config *perf_cfg,
448                                                            int fd, const char *guid);
449 
450 /** Store a configuration into i915 using guid and return a new metric id.
451  *
452  * If guid is NULL, then a generated one will be provided by hashing the
453  * content of the configuration.
454  */
455 uint64_t intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
456                                         const struct intel_perf_registers *config,
457                                         const char *guid);
458 
459 static inline unsigned
intel_perf_query_counter_info_first_query(const struct intel_perf_query_counter_info * counter_info)460 intel_perf_query_counter_info_first_query(const struct intel_perf_query_counter_info *counter_info)
461 {
462    return BITSET_FFS(counter_info->query_mask);
463 }
464 
465 /** Read the slice/unslice frequency from 2 OA reports and store then into
466  *  result.
467  */
468 void intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
469                                               const struct intel_device_info *devinfo,
470                                               const uint32_t *start,
471                                               const uint32_t *end);
472 
473 /** Store the GT frequency as reported by the RPSTAT register.
474  */
475 void intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
476                                                const struct intel_device_info *devinfo,
477                                                const uint32_t start,
478                                                const uint32_t end);
479 
480 /** Store PERFCNT registers values.
481  */
482 void intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
483                                            const struct intel_perf_query_info *query,
484                                            const uint64_t *start,
485                                            const uint64_t *end);
486 
487 /** Accumulate the delta between 2 OA reports into result for a given query.
488  */
489 void intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
490                                         const struct intel_perf_query_info *query,
491                                         const uint32_t *start,
492                                         const uint32_t *end);
493 
494 /** Read the timestamp value in a report.
495  */
496 uint64_t intel_perf_report_timestamp(const struct intel_perf_query_info *query,
497                                      const uint32_t *report);
498 
499 /** Accumulate the delta between 2 snapshots of OA perf registers (layout
500  * should match description specified through intel_perf_query_register_layout).
501  */
502 void intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
503                                                const struct intel_perf_query_info *query,
504                                                const void *start,
505                                                const void *end,
506                                                bool no_oa_accumulate);
507 
508 void intel_perf_query_result_clear(struct intel_perf_query_result *result);
509 
510 /** Debug helper printing out query data.
511  */
512 void intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
513                                           const void *data);
514 
515 static inline size_t
intel_perf_query_counter_get_size(const struct intel_perf_query_counter * counter)516 intel_perf_query_counter_get_size(const struct intel_perf_query_counter *counter)
517 {
518    switch (counter->data_type) {
519    case INTEL_PERF_COUNTER_DATA_TYPE_BOOL32:
520       return sizeof(uint32_t);
521    case INTEL_PERF_COUNTER_DATA_TYPE_UINT32:
522       return sizeof(uint32_t);
523    case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
524       return sizeof(uint64_t);
525    case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
526       return sizeof(float);
527    case INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE:
528       return sizeof(double);
529    default:
530       unreachable("invalid counter data type");
531    }
532 }
533 
534 static inline struct intel_perf_config *
intel_perf_new(void * ctx)535 intel_perf_new(void *ctx)
536 {
537    struct intel_perf_config *perf = rzalloc(ctx, struct intel_perf_config);
538    return perf;
539 }
540 
541 /** Whether we have the ability to hold off preemption on a batch so we don't
542  * have to look at the OA buffer to subtract unrelated workloads off the
543  * values captured through MI_* commands.
544  */
545 static inline bool
intel_perf_has_hold_preemption(const struct intel_perf_config * perf)546 intel_perf_has_hold_preemption(const struct intel_perf_config *perf)
547 {
548    return perf->i915_perf_version >= 3;
549 }
550 
551 /** Whether we have the ability to lock EU array power configuration for the
552  * duration of the performance recording. This is useful on Gfx11 where the HW
553  * architecture requires half the EU for particular workloads.
554  */
555 static inline bool
intel_perf_has_global_sseu(const struct intel_perf_config * perf)556 intel_perf_has_global_sseu(const struct intel_perf_config *perf)
557 {
558    return perf->i915_perf_version >= 4;
559 }
560 
561 uint32_t intel_perf_get_n_passes(struct intel_perf_config *perf,
562                                  const uint32_t *counter_indices,
563                                  uint32_t counter_indices_count,
564                                  struct intel_perf_query_info **pass_queries);
565 void intel_perf_get_counters_passes(struct intel_perf_config *perf,
566                                     const uint32_t *counter_indices,
567                                     uint32_t counter_indices_count,
568                                     struct intel_perf_counter_pass *counter_pass);
569 
570 #ifdef __cplusplus
571 } // extern "C"
572 #endif
573 
574 #endif /* INTEL_PERF_H */
575