• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef INTEL_PERF_H
25 #define INTEL_PERF_H
26 
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <stdint.h>
30 #include <string.h>
31 
32 #include "compiler/glsl/list.h"
33 #include "common/intel_bind_timeline.h"
34 #include "dev/intel_device_info.h"
35 #include "util/bitscan.h"
36 #include "util/bitset.h"
37 #include "util/hash_table.h"
38 #include "util/ralloc.h"
39 
40 #define INTEL_PERF_MAX_METRIC_SETS (1500)
41 
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45 
46 struct intel_perf_config;
47 struct intel_perf_query_info;
48 
49 #define INTEL_PERF_INVALID_CTX_ID (0xffffffff)
50 
51 enum ENUM_PACKED intel_perf_counter_type {
52    INTEL_PERF_COUNTER_TYPE_EVENT,
53    INTEL_PERF_COUNTER_TYPE_DURATION_NORM,
54    INTEL_PERF_COUNTER_TYPE_DURATION_RAW,
55    INTEL_PERF_COUNTER_TYPE_THROUGHPUT,
56    INTEL_PERF_COUNTER_TYPE_RAW,
57    INTEL_PERF_COUNTER_TYPE_TIMESTAMP,
58 };
59 
60 enum ENUM_PACKED intel_perf_counter_data_type {
61    INTEL_PERF_COUNTER_DATA_TYPE_BOOL32,
62    INTEL_PERF_COUNTER_DATA_TYPE_UINT32,
63    INTEL_PERF_COUNTER_DATA_TYPE_UINT64,
64    INTEL_PERF_COUNTER_DATA_TYPE_FLOAT,
65    INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE,
66 };
67 
68 enum ENUM_PACKED intel_perf_counter_units {
69    /* size */
70    INTEL_PERF_COUNTER_UNITS_BYTES,
71    INTEL_PERF_COUNTER_UNITS_GBPS,
72 
73    /* frequency */
74    INTEL_PERF_COUNTER_UNITS_HZ,
75 
76    /* time */
77    INTEL_PERF_COUNTER_UNITS_NS,
78    INTEL_PERF_COUNTER_UNITS_US,
79 
80    /**/
81    INTEL_PERF_COUNTER_UNITS_PIXELS,
82    INTEL_PERF_COUNTER_UNITS_TEXELS,
83    INTEL_PERF_COUNTER_UNITS_THREADS,
84    INTEL_PERF_COUNTER_UNITS_PERCENT,
85 
86    /* events */
87    INTEL_PERF_COUNTER_UNITS_MESSAGES,
88    INTEL_PERF_COUNTER_UNITS_NUMBER,
89    INTEL_PERF_COUNTER_UNITS_CYCLES,
90    INTEL_PERF_COUNTER_UNITS_EVENTS,
91    INTEL_PERF_COUNTER_UNITS_UTILIZATION,
92 
93    /**/
94    INTEL_PERF_COUNTER_UNITS_EU_SENDS_TO_L3_CACHE_LINES,
95    INTEL_PERF_COUNTER_UNITS_EU_ATOMIC_REQUESTS_TO_L3_CACHE_LINES,
96    INTEL_PERF_COUNTER_UNITS_EU_REQUESTS_TO_L3_CACHE_LINES,
97    INTEL_PERF_COUNTER_UNITS_EU_BYTES_PER_L3_CACHE_LINE,
98 
99    INTEL_PERF_COUNTER_UNITS_MAX
100 };
101 
102 struct intel_pipeline_stat {
103    uint32_t reg;
104    uint32_t numerator;
105    uint32_t denominator;
106 };
107 
108 /*
109  * The largest OA formats we can use include:
110  * For Haswell:
111  *   1 timestamp, 45 A counters, 8 B counters and 8 C counters.
112  * For Gfx8+
113  *   1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
114  * For Xe2:
115  *   1 timestamp, 1 clock, 64 PEC counters
116  *
117  * Plus 2 PERF_CNT registers and 1 RPSTAT register.
118  */
119 #define MAX_OA_REPORT_COUNTERS (2 + 64 + 3)
120 
121 /*
122  * When currently allocate only one page for pipeline statistics queries. Here
123  * we derived the maximum number of counters for that amount.
124  */
125 #define STATS_BO_SIZE               4096
126 #define STATS_BO_END_OFFSET_BYTES   (STATS_BO_SIZE / 2)
127 #define MAX_STAT_COUNTERS           (STATS_BO_END_OFFSET_BYTES / 8)
128 
129 struct intel_perf_query_result {
130    /**
131     * Storage for the final accumulated OA counters.
132     */
133    uint64_t accumulator[MAX_OA_REPORT_COUNTERS];
134 
135    /**
136     * Hw ID used by the context on which the query was running.
137     */
138    uint32_t hw_id;
139 
140    /**
141     * Number of reports accumulated to produce the results.
142     */
143    uint32_t reports_accumulated;
144 
145    /**
146     * Frequency in the slices of the GT at the begin and end of the
147     * query.
148     */
149    uint64_t slice_frequency[2];
150 
151    /**
152     * Frequency in the unslice of the GT at the begin and end of the
153     * query.
154     */
155    uint64_t unslice_frequency[2];
156 
157    /**
158     * Frequency of the whole GT at the begin and end of the query.
159     */
160    uint64_t gt_frequency[2];
161 
162    /**
163     * Timestamp of the query.
164     */
165    uint64_t begin_timestamp;
166 
167    /**
168     * Timestamp of the query.
169     */
170    uint64_t end_timestamp;
171 
172    /**
173     * Whether the query was interrupted by another workload (aka preemption).
174     */
175    bool query_disjoint;
176 };
177 
178 typedef uint64_t (*intel_counter_read_uint64_t)(struct intel_perf_config *perf,
179                                                 const struct intel_perf_query_info *query,
180                                                 const struct intel_perf_query_result *results);
181 
182 typedef float (*intel_counter_read_float_t)(struct intel_perf_config *perf,
183                                             const struct intel_perf_query_info *query,
184                                             const struct intel_perf_query_result *results);
185 
186 struct intel_perf_query_counter {
187    const char *name;
188    const char *desc;
189    const char *symbol_name;
190    const char *category;
191    enum intel_perf_counter_type type;
192    enum intel_perf_counter_data_type data_type;
193    enum intel_perf_counter_units units;
194    size_t offset;
195 
196    union {
197       intel_counter_read_uint64_t oa_counter_max_uint64;
198       intel_counter_read_float_t  oa_counter_max_float;
199    };
200 
201    union {
202       intel_counter_read_uint64_t oa_counter_read_uint64;
203       intel_counter_read_float_t  oa_counter_read_float;
204       struct intel_pipeline_stat pipeline_stat;
205    };
206 };
207 
208 struct intel_perf_query_register_prog {
209    uint32_t reg;
210    uint32_t val;
211 };
212 
213 /* Register programming for a given query */
214 struct intel_perf_registers {
215    const struct intel_perf_query_register_prog *flex_regs;
216    uint32_t n_flex_regs;
217 
218    const struct intel_perf_query_register_prog *mux_regs;
219    uint32_t n_mux_regs;
220 
221    const struct intel_perf_query_register_prog *b_counter_regs;
222    uint32_t n_b_counter_regs;
223 };
224 
225 struct intel_perf_query_info {
226    struct intel_perf_config *perf;
227 
228    enum intel_perf_query_type {
229       INTEL_PERF_QUERY_TYPE_OA,
230       INTEL_PERF_QUERY_TYPE_RAW,
231       INTEL_PERF_QUERY_TYPE_PIPELINE,
232    } kind;
233    const char *name;
234    const char *symbol_name;
235    const char *guid;
236    struct intel_perf_query_counter *counters;
237    int n_counters;
238    int max_counters;
239    size_t data_size;
240 
241    /* OA specific */
242    uint64_t oa_metrics_set_id;
243    uint64_t oa_format;/* KMD value */
244 
245    /* For indexing into the accumulator[] ... */
246    int gpu_time_offset;
247    int gpu_clock_offset;
248    int a_offset;
249    int b_offset;
250    int c_offset;
251    int perfcnt_offset;
252    int rpstat_offset;
253    int pec_offset;
254 
255    struct intel_perf_registers config;
256 };
257 
258 /* When not using the MI_RPC command, this structure describes the list of
259  * register offsets as well as their storage location so that they can be
260  * stored through a series of MI_SRM commands and accumulated with
261  * intel_perf_query_result_accumulate_snapshots().
262  */
263 struct intel_perf_query_field_layout {
264    /* Alignment for the layout */
265    uint32_t alignment;
266 
267    /* Size of the whole layout */
268    uint32_t size;
269 
270    uint32_t n_fields;
271 
272    struct intel_perf_query_field {
273       /* MMIO location of this register */
274       uint32_t mmio_offset;
275 
276       /* Location of this register in the storage */
277       uint16_t location;
278 
279       /* Type of register, for accumulation (see intel_perf_query_info:*_offset
280        * fields)
281        */
282       enum intel_perf_query_field_type {
283          INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC,
284          INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
285          INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
286          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
287          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
288          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
289          INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC,
290       } type;
291 
292       /* Index of register in the given type (for instance A31 or B2,
293        * etc...)
294        */
295       uint8_t index;
296 
297       /* 4, 8 or 256 */
298       uint16_t size;
299 
300       /* If not 0, mask to apply to the register value. */
301       uint64_t mask;
302    } *fields;
303 };
304 
305 struct intel_perf_query_counter_info {
306    struct intel_perf_query_counter *counter;
307 
308    BITSET_DECLARE(query_mask, INTEL_PERF_MAX_METRIC_SETS);
309 
310    /**
311     * Each counter can be a part of many groups, each time at different index.
312     * This struct stores one of those locations.
313     */
314    struct {
315       int group_idx; /* query/group number */
316       int counter_idx; /* index inside of query/group */
317    } location;
318 };
319 
320 enum intel_perf_features {
321    INTEL_PERF_FEATURE_HOLD_PREEMPTION = (1 << 0),
322    INTEL_PERF_FEATURE_GLOBAL_SSEU = (1 << 1),
323    /* Whether i915 has DRM_I915_QUERY_PERF_CONFIG support. */
324    INTEL_PERF_FEATURE_QUERY_PERF = (1 << 2),
325    INTEL_PERF_FEATURE_METRIC_SYNC = (1 << 3),
326 };
327 
328 struct intel_perf_config {
329    /* Have extended metrics been enabled */
330    bool enable_all_metrics;
331 
332    enum intel_perf_features features_supported;
333 
334    /* Number of bits to shift the OA timestamp values by to match the ring
335     * timestamp.
336     */
337    int oa_timestamp_shift;
338 
339    /* Mask of bits valid from the OA report (for instance you might have the
340     * lower 31 bits [30:0] of timestamp value). This is useful if you want to
341     * recombine a full timestamp value captured from the CPU with OA
342     * timestamps captured on the device but that only include 31bits of data.
343     */
344    uint64_t oa_timestamp_mask;
345 
346    /* Powergating configuration for the running the query.
347     * Only used in i915, struct drm_i915_gem_context_param_sseu.
348     */
349    void *sseu;
350 
351    struct intel_perf_query_info *queries;
352    int n_queries;
353 
354    struct intel_perf_query_counter_info *counter_infos;
355    int n_counters;
356 
357    struct intel_perf_query_field_layout query_layout;
358    size_t oa_sample_size;
359 
360    /* Variables referenced in the XML meta data for OA performance
361     * counters, e.g in the normalization equations.
362     *
363     * All uint64_t for consistent operand types in generated code
364     */
365    struct {
366       uint64_t n_eus;               /** $EuCoresTotalCount */
367       uint64_t n_eu_slices;         /** $EuSlicesTotalCount */
368       uint64_t n_eu_sub_slices;     /** $EuSubslicesTotalCount */
369       uint64_t n_eu_slice0123;      /** $EuDualSubslicesSlice0123Count */
370       uint64_t n_l3_banks;          /** $L3BankTotalCount */
371       uint64_t n_l3_nodes;          /** $L3NodeTotalCount */
372       uint64_t n_sq_idis;           /** $SqidiTotalCount */
373       uint64_t slice_mask;          /** $SliceMask */
374       uint64_t subslice_mask;       /** $SubsliceMask */
375       uint64_t gt_min_freq;         /** $GpuMinFrequency */
376       uint64_t gt_max_freq;         /** $GpuMaxFrequency */
377       bool     query_mode;          /** $QueryMode */
378    } sys_vars;
379 
380    const struct intel_device_info *devinfo;
381 
382    /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
383     * cross-reference with the GUIDs of configs advertised by the kernel at
384     * runtime
385     */
386    struct hash_table *oa_metrics_table;
387 
388    /* When MDAPI hasn't configured the metric we need to use by the time the
389     * query begins, this OA metric is used as a fallback.
390     */
391    uint64_t fallback_raw_oa_metric;
392 
393    /* Location of the device's sysfs entry. */
394    char sysfs_dev_dir[256];
395 
396    struct {
397       void *(*bo_alloc)(void *bufmgr, const char *name, uint64_t size);
398       void (*bo_unreference)(void *bo);
399       void *(*bo_map)(void *ctx, void *bo, unsigned flags);
400       void (*bo_unmap)(void *bo);
401       bool (*batch_references)(void *batch, void *bo);
402       void (*bo_wait_rendering)(void *bo);
403       int (*bo_busy)(void *bo);
404       void (*emit_stall_at_pixel_scoreboard)(void *ctx);
405       void (*emit_mi_report_perf_count)(void *ctx,
406                                         void *bo,
407                                         uint32_t offset_in_bytes,
408                                         uint32_t report_id);
409       void (*batchbuffer_flush)(void *ctx,
410                                 const char *file, int line);
411       void (*store_register_mem)(void *ctx, void *bo, uint32_t reg, uint32_t reg_size, uint32_t offset);
412 
413    } vtbl;
414 };
415 
416 struct intel_perf_counter_pass {
417    struct intel_perf_query_info *query;
418    struct intel_perf_query_counter *counter;
419 };
420 
421 enum intel_perf_record_type {
422    INTEL_PERF_RECORD_TYPE_SAMPLE = 1,
423    INTEL_PERF_RECORD_TYPE_OA_REPORT_LOST = 2,
424    INTEL_PERF_RECORD_TYPE_OA_BUFFER_LOST = 3,
425    INTEL_PERF_RECORD_TYPE_COUNTER_OVERFLOW = 4,
426    INTEL_PERF_RECORD_TYPE_MMIO_TRG_Q_FULL = 5,
427    INTEL_PERF_RECORD_TYPE_MAX,
428 };
429 
430 struct intel_perf_record_header {
431    uint32_t type; /* enum intel_perf_record_type */
432    uint16_t pad;
433    uint16_t size;
434 };
435 
436 /** Initialize the intel_perf_config object for a given device.
437  *
438  *    include_pipeline_statistics : Whether to add a pipeline statistic query
439  *                                  intel_perf_query_info object
440  *
441  *    use_register_snapshots : Whether the queries should include counters
442  *                             that rely on register snapshots using command
443  *                             streamer instructions (not possible when using
444  *                             only the OA buffer data).
445  */
446 void intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
447                              const struct intel_device_info *devinfo,
448                              int drm_fd,
449                              bool include_pipeline_statistics,
450                              bool use_register_snapshots);
451 
452 /** Query i915 for a metric id using guid.
453  */
454 bool intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
455                                const char *guid,
456                                uint64_t *metric_id);
457 
458 /** Load a configuation's content from i915 using a guid.
459  */
460 struct intel_perf_registers *intel_perf_load_configuration(struct intel_perf_config *perf_cfg,
461                                                            int fd, const char *guid);
462 
463 /** Store a configuration into i915 using guid and return a new metric id.
464  *
465  * If guid is NULL, then a generated one will be provided by hashing the
466  * content of the configuration.
467  */
468 uint64_t intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
469                                         const struct intel_perf_registers *config,
470                                         const char *guid);
471 void intel_perf_remove_configuration(struct intel_perf_config *perf_cfg, int fd,
472                                      uint64_t config_id);
473 
474 static inline unsigned
intel_perf_query_counter_info_first_query(const struct intel_perf_query_counter_info * counter_info)475 intel_perf_query_counter_info_first_query(const struct intel_perf_query_counter_info *counter_info)
476 {
477    return BITSET_FFS(counter_info->query_mask);
478 }
479 
480 /** Read the slice/unslice frequency from 2 OA reports and store then into
481  *  result.
482  */
483 void intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
484                                               const struct intel_device_info *devinfo,
485                                               const uint32_t *start,
486                                               const uint32_t *end);
487 
488 /** Store the GT frequency as reported by the RPSTAT register.
489  */
490 void intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
491                                                const struct intel_device_info *devinfo,
492                                                const uint32_t start,
493                                                const uint32_t end);
494 
495 /** Store PERFCNT registers values.
496  */
497 void intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
498                                            const struct intel_perf_query_info *query,
499                                            const uint64_t *start,
500                                            const uint64_t *end);
501 
502 /** Accumulate the delta between 2 OA reports into result for a given query.
503  */
504 void intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
505                                         const struct intel_perf_query_info *query,
506                                         const uint32_t *start,
507                                         const uint32_t *end);
508 
509 /** Read the timestamp value in a report.
510  */
511 uint64_t intel_perf_report_timestamp(const struct intel_perf_query_info *query,
512                                      const struct intel_device_info *devinfo,
513                                      const uint32_t *report);
514 
515 /** Accumulate the delta between 2 snapshots of OA perf registers (layout
516  * should match description specified through intel_perf_query_register_layout).
517  */
518 void intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
519                                                const struct intel_perf_query_info *query,
520                                                const void *start,
521                                                const void *end,
522                                                bool no_oa_accumulate);
523 
524 void intel_perf_query_result_clear(struct intel_perf_query_result *result);
525 
526 /** Debug helper printing out query data.
527  */
528 void intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
529                                           const void *data);
530 
531 static inline size_t
intel_perf_query_counter_get_size(const struct intel_perf_query_counter * counter)532 intel_perf_query_counter_get_size(const struct intel_perf_query_counter *counter)
533 {
534    switch (counter->data_type) {
535    case INTEL_PERF_COUNTER_DATA_TYPE_BOOL32:
536       return sizeof(uint32_t);
537    case INTEL_PERF_COUNTER_DATA_TYPE_UINT32:
538       return sizeof(uint32_t);
539    case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
540       return sizeof(uint64_t);
541    case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
542       return sizeof(float);
543    case INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE:
544       return sizeof(double);
545    default:
546       unreachable("invalid counter data type");
547    }
548 }
549 
550 static inline struct intel_perf_config *
intel_perf_new(void * ctx)551 intel_perf_new(void *ctx)
552 {
553    struct intel_perf_config *perf = rzalloc(ctx, struct intel_perf_config);
554    return perf;
555 }
556 
557 void intel_perf_free(struct intel_perf_config *perf_cfg);
558 
559 uint64_t intel_perf_get_oa_format(struct intel_perf_config *perf_cfg);
560 
561 /** Whether we have the ability to hold off preemption on a batch so we don't
562  * have to look at the OA buffer to subtract unrelated workloads off the
563  * values captured through MI_* commands.
564  */
565 static inline bool
intel_perf_has_hold_preemption(const struct intel_perf_config * perf)566 intel_perf_has_hold_preemption(const struct intel_perf_config *perf)
567 {
568    return perf->features_supported & INTEL_PERF_FEATURE_HOLD_PREEMPTION;
569 }
570 
571 /** Whether we have the ability to lock EU array power configuration for the
572  * duration of the performance recording. This is useful on Gfx11 where the HW
573  * architecture requires half the EU for particular workloads.
574  */
575 static inline bool
intel_perf_has_global_sseu(const struct intel_perf_config * perf)576 intel_perf_has_global_sseu(const struct intel_perf_config *perf)
577 {
578    return perf->features_supported & INTEL_PERF_FEATURE_GLOBAL_SSEU;
579 }
580 
581 static inline bool
intel_perf_has_metric_sync(const struct intel_perf_config * perf)582 intel_perf_has_metric_sync(const struct intel_perf_config *perf)
583 {
584    return perf->features_supported & INTEL_PERF_FEATURE_METRIC_SYNC;
585 }
586 
587 uint32_t intel_perf_get_n_passes(struct intel_perf_config *perf,
588                                  const uint32_t *counter_indices,
589                                  uint32_t counter_indices_count,
590                                  struct intel_perf_query_info **pass_queries);
591 void intel_perf_get_counters_passes(struct intel_perf_config *perf,
592                                     const uint32_t *counter_indices,
593                                     uint32_t counter_indices_count,
594                                     struct intel_perf_counter_pass *counter_pass);
595 
596 int intel_perf_stream_open(struct intel_perf_config *perf_config, int drm_fd,
597                            uint32_t ctx_id, uint64_t metrics_set_id,
598                            uint64_t period_exponent, bool hold_preemption,
599                            bool enable, struct intel_bind_timeline *timeline);
600 int intel_perf_stream_read_samples(struct intel_perf_config *perf_config,
601                                    int perf_stream_fd, uint8_t *buffer,
602                                    size_t buffer_len);
603 int intel_perf_stream_set_state(struct intel_perf_config *perf_config,
604                                 int perf_stream_fd, bool enable);
605 int intel_perf_stream_set_metrics_id(struct intel_perf_config *perf_config,
606                                      int drm_fd, int perf_stream_fd,
607                                      uint32_t exec_queue,
608                                      uint64_t metrics_set_id,
609                                      struct intel_bind_timeline *timeline);
610 
611 #ifdef __cplusplus
612 } // extern "C"
613 #endif
614 
615 #endif /* INTEL_PERF_H */
616