• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <unistd.h>
25 #include <poll.h>
26 
27 #include "common/intel_gem.h"
28 
29 #include "dev/intel_debug.h"
30 #include "dev/intel_device_info.h"
31 
32 #include "perf/intel_perf.h"
33 #include "perf/intel_perf_mdapi.h"
34 #include "perf/intel_perf_private.h"
35 #include "perf/intel_perf_query.h"
36 #include "perf/intel_perf_regs.h"
37 
38 #include "drm-uapi/i915_drm.h"
39 
40 #include "util/compiler.h"
41 #include "util/u_math.h"
42 
43 #define FILE_DEBUG_FLAG DEBUG_PERFMON
44 
45 #define MI_RPC_BO_SIZE                (4096)
46 #define MI_FREQ_OFFSET_BYTES          (256)
47 #define MI_PERF_COUNTERS_OFFSET_BYTES (260)
48 
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
50 
51 /* Align to 64bytes, requirement for OA report write address. */
52 #define TOTAL_QUERY_DATA_SIZE            \
53    ALIGN(256 /* OA report */ +           \
54          4  /* freq register */ +        \
55          8 + 8 /* perf counter 1 & 2 */, \
56          64)
57 
58 
field_offset(bool end,uint32_t offset)59 static uint32_t field_offset(bool end, uint32_t offset)
60 {
61    return (end ? TOTAL_QUERY_DATA_SIZE : 0) + offset;
62 }
63 
64 #define MAP_READ  (1 << 0)
65 #define MAP_WRITE (1 << 1)
66 
67 /**
68  * Periodic OA samples are read() into these buffer structures via the
69  * i915 perf kernel interface and appended to the
70  * perf_ctx->sample_buffers linked list. When we process the
71  * results of an OA metrics query we need to consider all the periodic
72  * samples between the Begin and End MI_REPORT_PERF_COUNT command
73  * markers.
74  *
75  * 'Periodic' is a simplification as there are other automatic reports
76  * written by the hardware also buffered here.
77  *
78  * Considering three queries, A, B and C:
79  *
80  *  Time ---->
81  *                ________________A_________________
82  *                |                                |
83  *                | ________B_________ _____C___________
84  *                | |                | |           |   |
85  *
86  * And an illustration of sample buffers read over this time frame:
87  * [HEAD ][     ][     ][     ][     ][     ][     ][     ][TAIL ]
88  *
89  * These nodes may hold samples for query A:
90  * [     ][     ][  A  ][  A  ][  A  ][  A  ][  A  ][     ][     ]
91  *
92  * These nodes may hold samples for query B:
93  * [     ][     ][  B  ][  B  ][  B  ][     ][     ][     ][     ]
94  *
95  * These nodes may hold samples for query C:
96  * [     ][     ][     ][     ][     ][  C  ][  C  ][  C  ][     ]
97  *
98  * The illustration assumes we have an even distribution of periodic
99  * samples so all nodes have the same size plotted against time:
100  *
101  * Note, to simplify code, the list is never empty.
102  *
103  * With overlapping queries we can see that periodic OA reports may
104  * relate to multiple queries and care needs to be take to keep
105  * track of sample buffers until there are no queries that might
106  * depend on their contents.
107  *
108  * We use a node ref counting system where a reference ensures that a
109  * node and all following nodes can't be freed/recycled until the
110  * reference drops to zero.
111  *
112  * E.g. with a ref of one here:
113  * [  0  ][  0  ][  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
114  *
115  * These nodes could be freed or recycled ("reaped"):
116  * [  0  ][  0  ]
117  *
118  * These must be preserved until the leading ref drops to zero:
119  *               [  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
120  *
121  * When a query starts we take a reference on the current tail of
122  * the list, knowing that no already-buffered samples can possibly
123  * relate to the newly-started query. A pointer to this node is
124  * also saved in the query object's ->oa.samples_head.
125  *
126  * E.g. starting query A while there are two nodes in .sample_buffers:
127  *                ________________A________
128  *                |
129  *
130  * [  0  ][  1  ]
131  *           ^_______ Add a reference and store pointer to node in
132  *                    A->oa.samples_head
133  *
134  * Moving forward to when the B query starts with no new buffer nodes:
135  * (for reference, i915 perf reads() are only done when queries finish)
136  *                ________________A_______
137  *                | ________B___
138  *                | |
139  *
140  * [  0  ][  2  ]
141  *           ^_______ Add a reference and store pointer to
142  *                    node in B->oa.samples_head
143  *
144  * Once a query is finished, after an OA query has become 'Ready',
145  * once the End OA report has landed and after we we have processed
146  * all the intermediate periodic samples then we drop the
147  * ->oa.samples_head reference we took at the start.
148  *
149  * So when the B query has finished we have:
150  *                ________________A________
151  *                | ______B___________
152  *                | |                |
153  * [  0  ][  1  ][  0  ][  0  ][  0  ]
154  *           ^_______ Drop B->oa.samples_head reference
155  *
156  * We still can't free these due to the A->oa.samples_head ref:
157  *        [  1  ][  0  ][  0  ][  0  ]
158  *
159  * When the A query finishes: (note there's a new ref for C's samples_head)
160  *                ________________A_________________
161  *                |                                |
162  *                |                    _____C_________
163  *                |                    |           |
164  * [  0  ][  0  ][  0  ][  0  ][  1  ][  0  ][  0  ]
165  *           ^_______ Drop A->oa.samples_head reference
166  *
167  * And we can now reap these nodes up to the C->oa.samples_head:
168  * [  X  ][  X  ][  X  ][  X  ]
169  *                  keeping -> [  1  ][  0  ][  0  ]
170  *
171  * We reap old sample buffers each time we finish processing an OA
172  * query by iterating the sample_buffers list from the head until we
173  * find a referenced node and stop.
174  *
175  * Reaped buffers move to a perfquery.free_sample_buffers list and
176  * when we come to read() we first look to recycle a buffer from the
177  * free_sample_buffers list before allocating a new buffer.
178  */
179 struct oa_sample_buf {
180    struct exec_node link;
181    int refcount;
182    int len;
183    uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
184    uint32_t last_timestamp;
185 };
186 
187 /**
188  * gen representation of a performance query object.
189  *
190  * NB: We want to keep this structure relatively lean considering that
191  * applications may expect to allocate enough objects to be able to
192  * query around all draw calls in a frame.
193  */
194 struct intel_perf_query_object
195 {
196    const struct intel_perf_query_info *queryinfo;
197 
198    /* See query->kind to know which state below is in use... */
199    union {
200       struct {
201 
202          /**
203           * BO containing OA counter snapshots at query Begin/End time.
204           */
205          void *bo;
206 
207          /**
208           * Address of mapped of @bo
209           */
210          void *map;
211 
212          /**
213           * The MI_REPORT_PERF_COUNT command lets us specify a unique
214           * ID that will be reflected in the resulting OA report
215           * that's written by the GPU. This is the ID we're expecting
216           * in the begin report and the the end report should be
217           * @begin_report_id + 1.
218           */
219          int begin_report_id;
220 
221          /**
222           * Reference the head of the brw->perfquery.sample_buffers
223           * list at the time that the query started (so we only need
224           * to look at nodes after this point when looking for samples
225           * related to this query)
226           *
227           * (See struct brw_oa_sample_buf description for more details)
228           */
229          struct exec_node *samples_head;
230 
231          /**
232           * false while in the unaccumulated_elements list, and set to
233           * true when the final, end MI_RPC snapshot has been
234           * accumulated.
235           */
236          bool results_accumulated;
237 
238          /**
239           * Accumulated OA results between begin and end of the query.
240           */
241          struct intel_perf_query_result result;
242       } oa;
243 
244       struct {
245          /**
246           * BO containing starting and ending snapshots for the
247           * statistics counters.
248           */
249          void *bo;
250       } pipeline_stats;
251    };
252 };
253 
254 struct intel_perf_context {
255    struct intel_perf_config *perf;
256 
257    void * mem_ctx; /* ralloc context */
258    void * ctx;  /* driver context (eg, brw_context) */
259    void * bufmgr;
260    const struct intel_device_info *devinfo;
261 
262    uint32_t hw_ctx;
263    int drm_fd;
264 
265    /* The i915 perf stream we open to setup + enable the OA counters */
266    int oa_stream_fd;
267 
268    /* An i915 perf stream fd gives exclusive access to the OA unit that will
269     * report counter snapshots for a specific counter set/profile in a
270     * specific layout/format so we can only start OA queries that are
271     * compatible with the currently open fd...
272     */
273    int current_oa_metrics_set_id;
274    int current_oa_format;
275 
276    /* List of buffers containing OA reports */
277    struct exec_list sample_buffers;
278 
279    /* Cached list of empty sample buffers */
280    struct exec_list free_sample_buffers;
281 
282    int n_active_oa_queries;
283    int n_active_pipeline_stats_queries;
284 
285    /* The number of queries depending on running OA counters which
286     * extends beyond brw_end_perf_query() since we need to wait until
287     * the last MI_RPC command has parsed by the GPU.
288     *
289     * Accurate accounting is important here as emitting an
290     * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
291     * effectively hang the gpu.
292     */
293    int n_oa_users;
294 
295    /* To help catch an spurious problem with the hardware or perf
296     * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
297     * with a unique ID that we can explicitly check for...
298     */
299    int next_query_start_report_id;
300 
301    /**
302     * An array of queries whose results haven't yet been assembled
303     * based on the data in buffer objects.
304     *
305     * These may be active, or have already ended.  However, the
306     * results have not been requested.
307     */
308    struct intel_perf_query_object **unaccumulated;
309    int unaccumulated_elements;
310    int unaccumulated_array_size;
311 
312    /* The total number of query objects so we can relinquish
313     * our exclusive access to perf if the application deletes
314     * all of its objects. (NB: We only disable perf while
315     * there are no active queries)
316     */
317    int n_query_instances;
318 
319    int period_exponent;
320 };
321 
322 static bool
inc_n_users(struct intel_perf_context * perf_ctx)323 inc_n_users(struct intel_perf_context *perf_ctx)
324 {
325    if (perf_ctx->n_oa_users == 0 &&
326        intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
327    {
328       return false;
329    }
330    ++perf_ctx->n_oa_users;
331 
332    return true;
333 }
334 
335 static void
dec_n_users(struct intel_perf_context * perf_ctx)336 dec_n_users(struct intel_perf_context *perf_ctx)
337 {
338    /* Disabling the i915 perf stream will effectively disable the OA
339     * counters.  Note it's important to be sure there are no outstanding
340     * MI_RPC commands at this point since they could stall the CS
341     * indefinitely once OACONTROL is disabled.
342     */
343    --perf_ctx->n_oa_users;
344    if (perf_ctx->n_oa_users == 0 &&
345        intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
346    {
347       DBG("WARNING: Error disabling gen perf stream: %m\n");
348    }
349 }
350 
351 void
intel_perf_close(struct intel_perf_context * perfquery,const struct intel_perf_query_info * query)352 intel_perf_close(struct intel_perf_context *perfquery,
353                  const struct intel_perf_query_info *query)
354 {
355    if (perfquery->oa_stream_fd != -1) {
356       close(perfquery->oa_stream_fd);
357       perfquery->oa_stream_fd = -1;
358    }
359    if (query && query->kind == INTEL_PERF_QUERY_TYPE_RAW) {
360       struct intel_perf_query_info *raw_query =
361          (struct intel_perf_query_info *) query;
362       raw_query->oa_metrics_set_id = 0;
363    }
364 }
365 
366 bool
intel_perf_open(struct intel_perf_context * perf_ctx,int metrics_set_id,int report_format,int period_exponent,int drm_fd,uint32_t ctx_id,bool enable)367 intel_perf_open(struct intel_perf_context *perf_ctx,
368                 int metrics_set_id,
369                 int report_format,
370                 int period_exponent,
371                 int drm_fd,
372                 uint32_t ctx_id,
373                 bool enable)
374 {
375    uint64_t properties[DRM_I915_PERF_PROP_MAX * 2];
376    uint32_t p = 0;
377 
378    /* Single context sampling if valid context id. */
379    if (ctx_id != INTEL_PERF_INVALID_CTX_ID) {
380       properties[p++] = DRM_I915_PERF_PROP_CTX_HANDLE;
381       properties[p++] = ctx_id;
382    }
383 
384    /* Include OA reports in samples */
385    properties[p++] = DRM_I915_PERF_PROP_SAMPLE_OA;
386    properties[p++] = true;
387 
388    /* OA unit configuration */
389    properties[p++] = DRM_I915_PERF_PROP_OA_METRICS_SET;
390    properties[p++] = metrics_set_id;
391 
392    properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT;
393    properties[p++] = report_format;
394 
395    properties[p++] = DRM_I915_PERF_PROP_OA_EXPONENT;
396    properties[p++] = period_exponent;
397 
398    /* SSEU configuration */
399    if (intel_perf_has_global_sseu(perf_ctx->perf)) {
400       properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU;
401       properties[p++] = to_user_pointer(&perf_ctx->perf->sseu);
402    }
403 
404    assert(p <= ARRAY_SIZE(properties));
405 
406    struct drm_i915_perf_open_param param = {
407       .flags = I915_PERF_FLAG_FD_CLOEXEC |
408                I915_PERF_FLAG_FD_NONBLOCK |
409                (enable ? 0 : I915_PERF_FLAG_DISABLED),
410       .num_properties = p / 2,
411       .properties_ptr = (uintptr_t) properties,
412    };
413    int fd = intel_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
414    if (fd == -1) {
415       DBG("Error opening gen perf OA stream: %m\n");
416       return false;
417    }
418 
419    perf_ctx->oa_stream_fd = fd;
420 
421    perf_ctx->current_oa_metrics_set_id = metrics_set_id;
422    perf_ctx->current_oa_format = report_format;
423 
424    if (enable)
425       ++perf_ctx->n_oa_users;
426 
427    return true;
428 }
429 
430 static uint64_t
get_metric_id(struct intel_perf_config * perf,const struct intel_perf_query_info * query)431 get_metric_id(struct intel_perf_config *perf,
432               const struct intel_perf_query_info *query)
433 {
434    /* These queries are know not to ever change, their config ID has been
435     * loaded upon the first query creation. No need to look them up again.
436     */
437    if (query->kind == INTEL_PERF_QUERY_TYPE_OA)
438       return query->oa_metrics_set_id;
439 
440    assert(query->kind == INTEL_PERF_QUERY_TYPE_RAW);
441 
442    /* Raw queries can be reprogrammed up by an external application/library.
443     * When a raw query is used for the first time it's id is set to a value !=
444     * 0. When it stops being used the id returns to 0. No need to reload the
445     * ID when it's already loaded.
446     */
447    if (query->oa_metrics_set_id != 0) {
448       DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
449           query->name, query->guid, query->oa_metrics_set_id);
450       return query->oa_metrics_set_id;
451    }
452 
453    struct intel_perf_query_info *raw_query = (struct intel_perf_query_info *)query;
454    if (!intel_perf_load_metric_id(perf, query->guid,
455                                 &raw_query->oa_metrics_set_id)) {
456       DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
457       raw_query->oa_metrics_set_id = perf->fallback_raw_oa_metric;
458    } else {
459       DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
460           query->name, query->guid, query->oa_metrics_set_id);
461    }
462    return query->oa_metrics_set_id;
463 }
464 
465 static struct oa_sample_buf *
get_free_sample_buf(struct intel_perf_context * perf_ctx)466 get_free_sample_buf(struct intel_perf_context *perf_ctx)
467 {
468    struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
469    struct oa_sample_buf *buf;
470 
471    if (node)
472       buf = exec_node_data(struct oa_sample_buf, node, link);
473    else {
474       buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
475 
476       exec_node_init(&buf->link);
477       buf->refcount = 0;
478    }
479    buf->len = 0;
480 
481    return buf;
482 }
483 
484 static void
reap_old_sample_buffers(struct intel_perf_context * perf_ctx)485 reap_old_sample_buffers(struct intel_perf_context *perf_ctx)
486 {
487    struct exec_node *tail_node =
488       exec_list_get_tail(&perf_ctx->sample_buffers);
489    struct oa_sample_buf *tail_buf =
490       exec_node_data(struct oa_sample_buf, tail_node, link);
491 
492    /* Remove all old, unreferenced sample buffers walking forward from
493     * the head of the list, except always leave at least one node in
494     * the list so we always have a node to reference when we Begin
495     * a new query.
496     */
497    foreach_list_typed_safe(struct oa_sample_buf, buf, link,
498                            &perf_ctx->sample_buffers)
499    {
500       if (buf->refcount == 0 && buf != tail_buf) {
501          exec_node_remove(&buf->link);
502          exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
503       } else
504          return;
505    }
506 }
507 
508 static void
free_sample_bufs(struct intel_perf_context * perf_ctx)509 free_sample_bufs(struct intel_perf_context *perf_ctx)
510 {
511    foreach_list_typed_safe(struct oa_sample_buf, buf, link,
512                            &perf_ctx->free_sample_buffers)
513       ralloc_free(buf);
514 
515    exec_list_make_empty(&perf_ctx->free_sample_buffers);
516 }
517 
518 
519 struct intel_perf_query_object *
intel_perf_new_query(struct intel_perf_context * perf_ctx,unsigned query_index)520 intel_perf_new_query(struct intel_perf_context *perf_ctx, unsigned query_index)
521 {
522    const struct intel_perf_query_info *query =
523       &perf_ctx->perf->queries[query_index];
524 
525    switch (query->kind) {
526    case INTEL_PERF_QUERY_TYPE_OA:
527    case INTEL_PERF_QUERY_TYPE_RAW:
528       if (perf_ctx->period_exponent == 0)
529          return NULL;
530       break;
531    case INTEL_PERF_QUERY_TYPE_PIPELINE:
532       break;
533    }
534 
535    struct intel_perf_query_object *obj =
536       calloc(1, sizeof(struct intel_perf_query_object));
537 
538    if (!obj)
539       return NULL;
540 
541    obj->queryinfo = query;
542 
543    perf_ctx->n_query_instances++;
544    return obj;
545 }
546 
547 int
intel_perf_active_queries(struct intel_perf_context * perf_ctx,const struct intel_perf_query_info * query)548 intel_perf_active_queries(struct intel_perf_context *perf_ctx,
549                           const struct intel_perf_query_info *query)
550 {
551    assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
552 
553    switch (query->kind) {
554    case INTEL_PERF_QUERY_TYPE_OA:
555    case INTEL_PERF_QUERY_TYPE_RAW:
556       return perf_ctx->n_active_oa_queries;
557       break;
558 
559    case INTEL_PERF_QUERY_TYPE_PIPELINE:
560       return perf_ctx->n_active_pipeline_stats_queries;
561       break;
562 
563    default:
564       unreachable("Unknown query type");
565       break;
566    }
567 }
568 
569 const struct intel_perf_query_info*
intel_perf_query_info(const struct intel_perf_query_object * query)570 intel_perf_query_info(const struct intel_perf_query_object *query)
571 {
572    return query->queryinfo;
573 }
574 
575 struct intel_perf_context *
intel_perf_new_context(void * parent)576 intel_perf_new_context(void *parent)
577 {
578    struct intel_perf_context *ctx = rzalloc(parent, struct intel_perf_context);
579    if (! ctx)
580       fprintf(stderr, "%s: failed to alloc context\n", __func__);
581    return ctx;
582 }
583 
584 struct intel_perf_config *
intel_perf_config(struct intel_perf_context * ctx)585 intel_perf_config(struct intel_perf_context *ctx)
586 {
587    return ctx->perf;
588 }
589 
590 void
intel_perf_init_context(struct intel_perf_context * perf_ctx,struct intel_perf_config * perf_cfg,void * mem_ctx,void * ctx,void * bufmgr,const struct intel_device_info * devinfo,uint32_t hw_ctx,int drm_fd)591 intel_perf_init_context(struct intel_perf_context *perf_ctx,
592                         struct intel_perf_config *perf_cfg,
593                         void * mem_ctx, /* ralloc context */
594                         void * ctx,  /* driver context (eg, brw_context) */
595                         void * bufmgr,  /* eg brw_bufmgr */
596                         const struct intel_device_info *devinfo,
597                         uint32_t hw_ctx,
598                         int drm_fd)
599 {
600    perf_ctx->perf = perf_cfg;
601    perf_ctx->mem_ctx = mem_ctx;
602    perf_ctx->ctx = ctx;
603    perf_ctx->bufmgr = bufmgr;
604    perf_ctx->drm_fd = drm_fd;
605    perf_ctx->hw_ctx = hw_ctx;
606    perf_ctx->devinfo = devinfo;
607 
608    perf_ctx->unaccumulated =
609       ralloc_array(mem_ctx, struct intel_perf_query_object *, 2);
610    perf_ctx->unaccumulated_elements = 0;
611    perf_ctx->unaccumulated_array_size = 2;
612 
613    exec_list_make_empty(&perf_ctx->sample_buffers);
614    exec_list_make_empty(&perf_ctx->free_sample_buffers);
615 
616    /* It's convenient to guarantee that this linked list of sample
617     * buffers is never empty so we add an empty head so when we
618     * Begin an OA query we can always take a reference on a buffer
619     * in this list.
620     */
621    struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
622    exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
623 
624    perf_ctx->oa_stream_fd = -1;
625    perf_ctx->next_query_start_report_id = 1000;
626 
627    /* The period_exponent gives a sampling period as follows:
628     *   sample_period = timestamp_period * 2^(period_exponent + 1)
629     *
630     * The timestamps increments every 80ns (HSW), ~52ns (GFX9LP) or
631     * ~83ns (GFX8/9).
632     *
633     * The counter overflow period is derived from the EuActive counter
634     * which reads a counter that increments by the number of clock
635     * cycles multiplied by the number of EUs. It can be calculated as:
636     *
637     * 2^(number of bits in A counter) / (n_eus * max_intel_freq * 2)
638     *
639     * (E.g. 40 EUs @ 1GHz = ~53ms)
640     *
641     * We select a sampling period inferior to that overflow period to
642     * ensure we cannot see more than 1 counter overflow, otherwise we
643     * could loose information.
644     */
645 
646    int a_counter_in_bits = 32;
647    if (devinfo->ver >= 8)
648       a_counter_in_bits = 40;
649 
650    uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
651        /* drop 1GHz freq to have units in nanoseconds */
652        2);
653 
654    DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
655        overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
656 
657    int period_exponent = 0;
658    uint64_t prev_sample_period, next_sample_period;
659    for (int e = 0; e < 30; e++) {
660       prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
661       next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
662 
663       /* Take the previous sampling period, lower than the overflow
664        * period.
665        */
666       if (prev_sample_period < overflow_period &&
667           next_sample_period > overflow_period)
668          period_exponent = e + 1;
669    }
670 
671    perf_ctx->period_exponent = period_exponent;
672 
673    if (period_exponent == 0) {
674       DBG("WARNING: enable to find a sampling exponent\n");
675    } else {
676       DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
677             prev_sample_period / 1000000ul);
678    }
679 }
680 
681 /**
682  * Add a query to the global list of "unaccumulated queries."
683  *
684  * Queries are tracked here until all the associated OA reports have
685  * been accumulated via accumulate_oa_reports() after the end
686  * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
687  */
688 static void
add_to_unaccumulated_query_list(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * obj)689 add_to_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
690                                 struct intel_perf_query_object *obj)
691 {
692    if (perf_ctx->unaccumulated_elements >=
693        perf_ctx->unaccumulated_array_size)
694    {
695       perf_ctx->unaccumulated_array_size *= 1.5;
696       perf_ctx->unaccumulated =
697          reralloc(perf_ctx->mem_ctx, perf_ctx->unaccumulated,
698                   struct intel_perf_query_object *,
699                   perf_ctx->unaccumulated_array_size);
700    }
701 
702    perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
703 }
704 
705 /**
706  * Emit MI_STORE_REGISTER_MEM commands to capture all of the
707  * pipeline statistics for the performance query object.
708  */
709 static void
snapshot_statistics_registers(struct intel_perf_context * ctx,struct intel_perf_query_object * obj,uint32_t offset_in_bytes)710 snapshot_statistics_registers(struct intel_perf_context *ctx,
711                               struct intel_perf_query_object *obj,
712                               uint32_t offset_in_bytes)
713 {
714    struct intel_perf_config *perf = ctx->perf;
715    const struct intel_perf_query_info *query = obj->queryinfo;
716    const int n_counters = query->n_counters;
717 
718    for (int i = 0; i < n_counters; i++) {
719       const struct intel_perf_query_counter *counter = &query->counters[i];
720 
721       assert(counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
722 
723       perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
724                                     counter->pipeline_stat.reg, 8,
725                                     offset_in_bytes + counter->offset);
726    }
727 }
728 
729 static void
snapshot_query_layout(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,bool end_snapshot)730 snapshot_query_layout(struct intel_perf_context *perf_ctx,
731                       struct intel_perf_query_object *query,
732                       bool end_snapshot)
733 {
734    struct intel_perf_config *perf_cfg = perf_ctx->perf;
735    const struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
736    uint32_t offset = end_snapshot ? align(layout->size, layout->alignment) : 0;
737 
738    for (uint32_t f = 0; f < layout->n_fields; f++) {
739       const struct intel_perf_query_field *field =
740          &layout->fields[end_snapshot ? f : (layout->n_fields - 1 - f)];
741 
742       switch (field->type) {
743       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
744          perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
745                                                   offset + field->location,
746                                                   query->oa.begin_report_id +
747                                                   (end_snapshot ? 1 : 0));
748          break;
749       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
750       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
751       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
752       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
753          perf_cfg->vtbl.store_register_mem(perf_ctx->ctx, query->oa.bo,
754                                            field->mmio_offset, field->size,
755                                            offset + field->location);
756          break;
757       default:
758          unreachable("Invalid field type");
759       }
760    }
761 }
762 
763 bool
intel_perf_begin_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)764 intel_perf_begin_query(struct intel_perf_context *perf_ctx,
765                        struct intel_perf_query_object *query)
766 {
767    struct intel_perf_config *perf_cfg = perf_ctx->perf;
768    const struct intel_perf_query_info *queryinfo = query->queryinfo;
769 
770    /* XXX: We have to consider that the command parser unit that parses batch
771     * buffer commands and is used to capture begin/end counter snapshots isn't
772     * implicitly synchronized with what's currently running across other GPU
773     * units (such as the EUs running shaders) that the performance counters are
774     * associated with.
775     *
776     * The intention of performance queries is to measure the work associated
777     * with commands between the begin/end delimiters and so for that to be the
778     * case we need to explicitly synchronize the parsing of commands to capture
779     * Begin/End counter snapshots with what's running across other parts of the
780     * GPU.
781     *
782     * When the command parser reaches a Begin marker it effectively needs to
783     * drain everything currently running on the GPU until the hardware is idle
784     * before capturing the first snapshot of counters - otherwise the results
785     * would also be measuring the effects of earlier commands.
786     *
787     * When the command parser reaches an End marker it needs to stall until
788     * everything currently running on the GPU has finished before capturing the
789     * end snapshot - otherwise the results won't be a complete representation
790     * of the work.
791     *
792     * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
793     * additional work to be processed by the pipeline until all pixels of the
794     * previous draw has be completed).
795     *
796     * N.B. The final results are based on deltas of counters between (inside)
797     * Begin/End markers so even though the total wall clock time of the
798     * workload is stretched by larger pipeline bubbles the bubbles themselves
799     * are generally invisible to the query results. Whether that's a good or a
800     * bad thing depends on the use case. For a lower real-time impact while
801     * capturing metrics then periodic sampling may be a better choice than
802     * INTEL_performance_query.
803     *
804     *
805     * This is our Begin synchronization point to drain current work on the
806     * GPU before we capture our first counter snapshot...
807     */
808    perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
809 
810    switch (queryinfo->kind) {
811    case INTEL_PERF_QUERY_TYPE_OA:
812    case INTEL_PERF_QUERY_TYPE_RAW: {
813 
814       /* Opening an i915 perf stream implies exclusive access to the OA unit
815        * which will generate counter reports for a specific counter set with a
816        * specific layout/format so we can't begin any OA based queries that
817        * require a different counter set or format unless we get an opportunity
818        * to close the stream and open a new one...
819        */
820       uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
821 
822       if (perf_ctx->oa_stream_fd != -1 &&
823           perf_ctx->current_oa_metrics_set_id != metric_id) {
824 
825          if (perf_ctx->n_oa_users != 0) {
826             DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
827                 perf_ctx->current_oa_metrics_set_id, metric_id);
828             return false;
829          } else
830             intel_perf_close(perf_ctx, queryinfo);
831       }
832 
833       /* If the OA counters aren't already on, enable them. */
834       if (perf_ctx->oa_stream_fd == -1) {
835          assert(perf_ctx->period_exponent != 0);
836 
837          if (!intel_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
838                             perf_ctx->period_exponent, perf_ctx->drm_fd,
839                             perf_ctx->hw_ctx, false))
840             return false;
841       } else {
842          assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
843                 perf_ctx->current_oa_format == queryinfo->oa_format);
844       }
845 
846       if (!inc_n_users(perf_ctx)) {
847          DBG("WARNING: Error enabling i915 perf stream: %m\n");
848          return false;
849       }
850 
851       if (query->oa.bo) {
852          perf_cfg->vtbl.bo_unreference(query->oa.bo);
853          query->oa.bo = NULL;
854       }
855 
856       query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
857                                              "perf. query OA MI_RPC bo",
858                                              MI_RPC_BO_SIZE);
859 #ifdef DEBUG
860       /* Pre-filling the BO helps debug whether writes landed. */
861       void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
862       memset(map, 0x80, MI_RPC_BO_SIZE);
863       perf_cfg->vtbl.bo_unmap(query->oa.bo);
864 #endif
865 
866       query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
867       perf_ctx->next_query_start_report_id += 2;
868 
869       snapshot_query_layout(perf_ctx, query, false /* end_snapshot */);
870 
871       ++perf_ctx->n_active_oa_queries;
872 
873       /* No already-buffered samples can possibly be associated with this query
874        * so create a marker within the list of sample buffers enabling us to
875        * easily ignore earlier samples when processing this query after
876        * completion.
877        */
878       assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
879       query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
880 
881       struct oa_sample_buf *buf =
882          exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
883 
884       /* This reference will ensure that future/following sample
885        * buffers (that may relate to this query) can't be freed until
886        * this drops to zero.
887        */
888       buf->refcount++;
889 
890       intel_perf_query_result_clear(&query->oa.result);
891       query->oa.results_accumulated = false;
892 
893       add_to_unaccumulated_query_list(perf_ctx, query);
894       break;
895    }
896 
897    case INTEL_PERF_QUERY_TYPE_PIPELINE:
898       if (query->pipeline_stats.bo) {
899          perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
900          query->pipeline_stats.bo = NULL;
901       }
902 
903       query->pipeline_stats.bo =
904          perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
905                                  "perf. query pipeline stats bo",
906                                  STATS_BO_SIZE);
907 
908       /* Take starting snapshots. */
909       snapshot_statistics_registers(perf_ctx, query, 0);
910 
911       ++perf_ctx->n_active_pipeline_stats_queries;
912       break;
913 
914    default:
915       unreachable("Unknown query type");
916       break;
917    }
918 
919    return true;
920 }
921 
922 void
intel_perf_end_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)923 intel_perf_end_query(struct intel_perf_context *perf_ctx,
924                      struct intel_perf_query_object *query)
925 {
926    struct intel_perf_config *perf_cfg = perf_ctx->perf;
927 
928    /* Ensure that the work associated with the queried commands will have
929     * finished before taking our query end counter readings.
930     *
931     * For more details see comment in brw_begin_perf_query for
932     * corresponding flush.
933     */
934    perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
935 
936    switch (query->queryinfo->kind) {
937    case INTEL_PERF_QUERY_TYPE_OA:
938    case INTEL_PERF_QUERY_TYPE_RAW:
939 
940       /* NB: It's possible that the query will have already been marked
941        * as 'accumulated' if an error was seen while reading samples
942        * from perf. In this case we mustn't try and emit a closing
943        * MI_RPC command in case the OA unit has already been disabled
944        */
945       if (!query->oa.results_accumulated)
946          snapshot_query_layout(perf_ctx, query, true /* end_snapshot */);
947 
948       --perf_ctx->n_active_oa_queries;
949 
950       /* NB: even though the query has now ended, it can't be accumulated
951        * until the end MI_REPORT_PERF_COUNT snapshot has been written
952        * to query->oa.bo
953        */
954       break;
955 
956    case INTEL_PERF_QUERY_TYPE_PIPELINE:
957       snapshot_statistics_registers(perf_ctx, query,
958                                     STATS_BO_END_OFFSET_BYTES);
959       --perf_ctx->n_active_pipeline_stats_queries;
960       break;
961 
962    default:
963       unreachable("Unknown query type");
964       break;
965    }
966 }
967 
intel_perf_oa_stream_ready(struct intel_perf_context * perf_ctx)968 bool intel_perf_oa_stream_ready(struct intel_perf_context *perf_ctx)
969 {
970    struct pollfd pfd;
971 
972    pfd.fd = perf_ctx->oa_stream_fd;
973    pfd.events = POLLIN;
974    pfd.revents = 0;
975 
976    if (poll(&pfd, 1, 0) < 0) {
977       DBG("Error polling OA stream\n");
978       return false;
979    }
980 
981    if (!(pfd.revents & POLLIN))
982       return false;
983 
984    return true;
985 }
986 
987 ssize_t
intel_perf_read_oa_stream(struct intel_perf_context * perf_ctx,void * buf,size_t nbytes)988 intel_perf_read_oa_stream(struct intel_perf_context *perf_ctx,
989                           void* buf,
990                           size_t nbytes)
991 {
992    return read(perf_ctx->oa_stream_fd, buf, nbytes);
993 }
994 
995 enum OaReadStatus {
996    OA_READ_STATUS_ERROR,
997    OA_READ_STATUS_UNFINISHED,
998    OA_READ_STATUS_FINISHED,
999 };
1000 
1001 static enum OaReadStatus
read_oa_samples_until(struct intel_perf_context * perf_ctx,uint32_t start_timestamp,uint32_t end_timestamp)1002 read_oa_samples_until(struct intel_perf_context *perf_ctx,
1003                       uint32_t start_timestamp,
1004                       uint32_t end_timestamp)
1005 {
1006    struct exec_node *tail_node =
1007       exec_list_get_tail(&perf_ctx->sample_buffers);
1008    struct oa_sample_buf *tail_buf =
1009       exec_node_data(struct oa_sample_buf, tail_node, link);
1010    uint32_t last_timestamp =
1011       tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
1012 
1013    while (1) {
1014       struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
1015       uint32_t offset;
1016       int len;
1017 
1018       while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
1019                          sizeof(buf->buf))) < 0 && errno == EINTR)
1020          ;
1021 
1022       if (len <= 0) {
1023          exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
1024 
1025          if (len == 0) {
1026             DBG("Spurious EOF reading i915 perf samples\n");
1027             return OA_READ_STATUS_ERROR;
1028          }
1029 
1030          if (errno != EAGAIN) {
1031             DBG("Error reading i915 perf samples: %m\n");
1032             return OA_READ_STATUS_ERROR;
1033          }
1034 
1035          if ((last_timestamp - start_timestamp) >= INT32_MAX)
1036             return OA_READ_STATUS_UNFINISHED;
1037 
1038          if ((last_timestamp - start_timestamp) <
1039               (end_timestamp - start_timestamp))
1040             return OA_READ_STATUS_UNFINISHED;
1041 
1042          return OA_READ_STATUS_FINISHED;
1043       }
1044 
1045       buf->len = len;
1046       exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
1047 
1048       /* Go through the reports and update the last timestamp. */
1049       offset = 0;
1050       while (offset < buf->len) {
1051          const struct drm_i915_perf_record_header *header =
1052             (const struct drm_i915_perf_record_header *) &buf->buf[offset];
1053          uint32_t *report = (uint32_t *) (header + 1);
1054 
1055          if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
1056             last_timestamp = report[1];
1057 
1058          offset += header->size;
1059       }
1060 
1061       buf->last_timestamp = last_timestamp;
1062    }
1063 
1064    unreachable("not reached");
1065    return OA_READ_STATUS_ERROR;
1066 }
1067 
1068 /**
1069  * Try to read all the reports until either the delimiting timestamp
1070  * or an error arises.
1071  */
1072 static bool
read_oa_samples_for_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1073 read_oa_samples_for_query(struct intel_perf_context *perf_ctx,
1074                           struct intel_perf_query_object *query,
1075                           void *current_batch)
1076 {
1077    uint32_t *start;
1078    uint32_t *last;
1079    uint32_t *end;
1080    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1081 
1082    /* We need the MI_REPORT_PERF_COUNT to land before we can start
1083     * accumulate. */
1084    assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1085           !perf_cfg->vtbl.bo_busy(query->oa.bo));
1086 
1087    /* Map the BO once here and let accumulate_oa_reports() unmap
1088     * it. */
1089    if (query->oa.map == NULL)
1090       query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
1091 
1092    start = last = query->oa.map + field_offset(false, 0);
1093    end = query->oa.map + field_offset(true, 0);
1094 
1095    if (start[0] != query->oa.begin_report_id) {
1096       DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1097       return true;
1098    }
1099    if (end[0] != (query->oa.begin_report_id + 1)) {
1100       DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1101       return true;
1102    }
1103 
1104    /* Read the reports until the end timestamp. */
1105    switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
1106    case OA_READ_STATUS_ERROR:
1107       FALLTHROUGH; /* Let accumulate_oa_reports() deal with the error. */
1108    case OA_READ_STATUS_FINISHED:
1109       return true;
1110    case OA_READ_STATUS_UNFINISHED:
1111       return false;
1112    }
1113 
1114    unreachable("invalid read status");
1115    return false;
1116 }
1117 
1118 void
intel_perf_wait_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1119 intel_perf_wait_query(struct intel_perf_context *perf_ctx,
1120                       struct intel_perf_query_object *query,
1121                       void *current_batch)
1122 {
1123    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1124    struct brw_bo *bo = NULL;
1125 
1126    switch (query->queryinfo->kind) {
1127    case INTEL_PERF_QUERY_TYPE_OA:
1128    case INTEL_PERF_QUERY_TYPE_RAW:
1129       bo = query->oa.bo;
1130       break;
1131 
1132    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1133       bo = query->pipeline_stats.bo;
1134       break;
1135 
1136    default:
1137       unreachable("Unknown query type");
1138       break;
1139    }
1140 
1141    if (bo == NULL)
1142       return;
1143 
1144    /* If the current batch references our results bo then we need to
1145     * flush first...
1146     */
1147    if (perf_cfg->vtbl.batch_references(current_batch, bo))
1148       perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1149 
1150    perf_cfg->vtbl.bo_wait_rendering(bo);
1151 }
1152 
1153 bool
intel_perf_is_query_ready(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch)1154 intel_perf_is_query_ready(struct intel_perf_context *perf_ctx,
1155                           struct intel_perf_query_object *query,
1156                           void *current_batch)
1157 {
1158    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1159 
1160    switch (query->queryinfo->kind) {
1161    case INTEL_PERF_QUERY_TYPE_OA:
1162    case INTEL_PERF_QUERY_TYPE_RAW:
1163       return (query->oa.results_accumulated ||
1164               (query->oa.bo &&
1165                !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1166                !perf_cfg->vtbl.bo_busy(query->oa.bo)));
1167 
1168    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1169       return (query->pipeline_stats.bo &&
1170               !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
1171               !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
1172 
1173    default:
1174       unreachable("Unknown query type");
1175       break;
1176    }
1177 
1178    return false;
1179 }
1180 
1181 /**
1182  * Remove a query from the global list of unaccumulated queries once
1183  * after successfully accumulating the OA reports associated with the
1184  * query in accumulate_oa_reports() or when discarding unwanted query
1185  * results.
1186  */
1187 static void
drop_from_unaccumulated_query_list(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1188 drop_from_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
1189                                    struct intel_perf_query_object *query)
1190 {
1191    for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
1192       if (perf_ctx->unaccumulated[i] == query) {
1193          int last_elt = --perf_ctx->unaccumulated_elements;
1194 
1195          if (i == last_elt)
1196             perf_ctx->unaccumulated[i] = NULL;
1197          else {
1198             perf_ctx->unaccumulated[i] =
1199                perf_ctx->unaccumulated[last_elt];
1200          }
1201 
1202          break;
1203       }
1204    }
1205 
1206    /* Drop our samples_head reference so that associated periodic
1207     * sample data buffers can potentially be reaped if they aren't
1208     * referenced by any other queries...
1209     */
1210 
1211    struct oa_sample_buf *buf =
1212       exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1213 
1214    assert(buf->refcount > 0);
1215    buf->refcount--;
1216 
1217    query->oa.samples_head = NULL;
1218 
1219    reap_old_sample_buffers(perf_ctx);
1220 }
1221 
1222 /* In general if we see anything spurious while accumulating results,
1223  * we don't try and continue accumulating the current query, hoping
1224  * for the best, we scrap anything outstanding, and then hope for the
1225  * best with new queries.
1226  */
1227 static void
discard_all_queries(struct intel_perf_context * perf_ctx)1228 discard_all_queries(struct intel_perf_context *perf_ctx)
1229 {
1230    while (perf_ctx->unaccumulated_elements) {
1231       struct intel_perf_query_object *query = perf_ctx->unaccumulated[0];
1232 
1233       query->oa.results_accumulated = true;
1234       drop_from_unaccumulated_query_list(perf_ctx, query);
1235 
1236       dec_n_users(perf_ctx);
1237    }
1238 }
1239 
1240 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
1241 static bool
oa_report_ctx_id_valid(const struct intel_device_info * devinfo,const uint32_t * report)1242 oa_report_ctx_id_valid(const struct intel_device_info *devinfo,
1243                        const uint32_t *report)
1244 {
1245    assert(devinfo->ver >= 8);
1246    if (devinfo->ver == 8)
1247       return (report[0] & (1 << 25)) != 0;
1248    return (report[0] & (1 << 16)) != 0;
1249 }
1250 
1251 /**
1252  * Accumulate raw OA counter values based on deltas between pairs of
1253  * OA reports.
1254  *
1255  * Accumulation starts from the first report captured via
1256  * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
1257  * last MI_RPC report requested by brw_end_perf_query(). Between these
1258  * two reports there may also some number of periodically sampled OA
1259  * reports collected via the i915 perf interface - depending on the
1260  * duration of the query.
1261  *
1262  * These periodic snapshots help to ensure we handle counter overflow
1263  * correctly by being frequent enough to ensure we don't miss multiple
1264  * overflows of a counter between snapshots. For Gfx8+ the i915 perf
1265  * snapshots provide the extra context-switch reports that let us
1266  * subtract out the progress of counters associated with other
1267  * contexts running on the system.
1268  */
1269 static void
accumulate_oa_reports(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1270 accumulate_oa_reports(struct intel_perf_context *perf_ctx,
1271                       struct intel_perf_query_object *query)
1272 {
1273    const struct intel_device_info *devinfo = perf_ctx->devinfo;
1274    uint32_t *start;
1275    uint32_t *last;
1276    uint32_t *end;
1277    struct exec_node *first_samples_node;
1278    bool last_report_ctx_match = true;
1279    int out_duration = 0;
1280 
1281    assert(query->oa.map != NULL);
1282 
1283    start = last = query->oa.map + field_offset(false, 0);
1284    end = query->oa.map + field_offset(true, 0);
1285 
1286    if (start[0] != query->oa.begin_report_id) {
1287       DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1288       goto error;
1289    }
1290    if (end[0] != (query->oa.begin_report_id + 1)) {
1291       DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1292       goto error;
1293    }
1294 
1295    /* On Gfx12+ OA reports are sourced from per context counters, so we don't
1296     * ever have to look at the global OA buffer. Yey \o/
1297     */
1298    if (perf_ctx->devinfo->ver >= 12) {
1299       last = start;
1300       goto end;
1301    }
1302 
1303    /* See if we have any periodic reports to accumulate too... */
1304 
1305    /* N.B. The oa.samples_head was set when the query began and
1306     * pointed to the tail of the perf_ctx->sample_buffers list at
1307     * the time the query started. Since the buffer existed before the
1308     * first MI_REPORT_PERF_COUNT command was emitted we therefore know
1309     * that no data in this particular node's buffer can possibly be
1310     * associated with the query - so skip ahead one...
1311     */
1312    first_samples_node = query->oa.samples_head->next;
1313 
1314    foreach_list_typed_from(struct oa_sample_buf, buf, link,
1315                            &perf_ctx->sample_buffers,
1316                            first_samples_node)
1317    {
1318       int offset = 0;
1319 
1320       while (offset < buf->len) {
1321          const struct drm_i915_perf_record_header *header =
1322             (const struct drm_i915_perf_record_header *)(buf->buf + offset);
1323 
1324          assert(header->size != 0);
1325          assert(header->size <= buf->len);
1326 
1327          offset += header->size;
1328 
1329          switch (header->type) {
1330          case DRM_I915_PERF_RECORD_SAMPLE: {
1331             uint32_t *report = (uint32_t *)(header + 1);
1332             bool report_ctx_match = true;
1333             bool add = true;
1334 
1335             /* Ignore reports that come before the start marker.
1336              * (Note: takes care to allow overflow of 32bit timestamps)
1337              */
1338             if (intel_device_info_timebase_scale(devinfo,
1339                                                report[1] - start[1]) > 5000000000) {
1340                continue;
1341             }
1342 
1343             /* Ignore reports that come after the end marker.
1344              * (Note: takes care to allow overflow of 32bit timestamps)
1345              */
1346             if (intel_device_info_timebase_scale(devinfo,
1347                                                report[1] - end[1]) <= 5000000000) {
1348                goto end;
1349             }
1350 
1351             /* For Gfx8+ since the counters continue while other
1352              * contexts are running we need to discount any unrelated
1353              * deltas. The hardware automatically generates a report
1354              * on context switch which gives us a new reference point
1355              * to continuing adding deltas from.
1356              *
1357              * For Haswell we can rely on the HW to stop the progress
1358              * of OA counters while any other context is acctive.
1359              */
1360             if (devinfo->ver >= 8) {
1361                /* Consider that the current report matches our context only if
1362                 * the report says the report ID is valid.
1363                 */
1364                report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
1365                   report[2] == start[2];
1366                if (report_ctx_match)
1367                   out_duration = 0;
1368                else
1369                   out_duration++;
1370 
1371                /* Only add the delta between <last, report> if the last report
1372                 * was clearly identified as our context, or if we have at most
1373                 * 1 report without a matching ID.
1374                 *
1375                 * The OA unit will sometimes label reports with an invalid
1376                 * context ID when i915 rewrites the execlist submit register
1377                 * with the same context as the one currently running. This
1378                 * happens when i915 wants to notify the HW of ringbuffer tail
1379                 * register update. We have to consider this report as part of
1380                 * our context as the 3d pipeline behind the OACS unit is still
1381                 * processing the operations started at the previous execlist
1382                 * submission.
1383                 */
1384                add = last_report_ctx_match && out_duration < 2;
1385             }
1386 
1387             if (add) {
1388                intel_perf_query_result_accumulate(&query->oa.result,
1389                                                 query->queryinfo,
1390                                                 devinfo,
1391                                                 last, report);
1392             } else {
1393                /* We're not adding the delta because we've identified it's not
1394                 * for the context we filter for. We can consider that the
1395                 * query was split.
1396                 */
1397                query->oa.result.query_disjoint = true;
1398             }
1399 
1400             last = report;
1401             last_report_ctx_match = report_ctx_match;
1402 
1403             break;
1404          }
1405 
1406          case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
1407              DBG("i915 perf: OA error: all reports lost\n");
1408              goto error;
1409          case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
1410              DBG("i915 perf: OA report lost\n");
1411              break;
1412          }
1413       }
1414    }
1415 
1416 end:
1417 
1418    intel_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
1419                                     devinfo, last, end);
1420 
1421    query->oa.results_accumulated = true;
1422    drop_from_unaccumulated_query_list(perf_ctx, query);
1423    dec_n_users(perf_ctx);
1424 
1425    return;
1426 
1427 error:
1428 
1429    discard_all_queries(perf_ctx);
1430 }
1431 
1432 void
intel_perf_delete_query(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query)1433 intel_perf_delete_query(struct intel_perf_context *perf_ctx,
1434                         struct intel_perf_query_object *query)
1435 {
1436    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1437 
1438    /* We can assume that the frontend waits for a query to complete
1439     * before ever calling into here, so we don't have to worry about
1440     * deleting an in-flight query object.
1441     */
1442    switch (query->queryinfo->kind) {
1443    case INTEL_PERF_QUERY_TYPE_OA:
1444    case INTEL_PERF_QUERY_TYPE_RAW:
1445       if (query->oa.bo) {
1446          if (!query->oa.results_accumulated) {
1447             drop_from_unaccumulated_query_list(perf_ctx, query);
1448             dec_n_users(perf_ctx);
1449          }
1450 
1451          perf_cfg->vtbl.bo_unreference(query->oa.bo);
1452          query->oa.bo = NULL;
1453       }
1454 
1455       query->oa.results_accumulated = false;
1456       break;
1457 
1458    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1459       if (query->pipeline_stats.bo) {
1460          perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1461          query->pipeline_stats.bo = NULL;
1462       }
1463       break;
1464 
1465    default:
1466       unreachable("Unknown query type");
1467       break;
1468    }
1469 
1470    /* As an indication that the INTEL_performance_query extension is no
1471     * longer in use, it's a good time to free our cache of sample
1472     * buffers and close any current i915-perf stream.
1473     */
1474    if (--perf_ctx->n_query_instances == 0) {
1475       free_sample_bufs(perf_ctx);
1476       intel_perf_close(perf_ctx, query->queryinfo);
1477    }
1478 
1479    free(query);
1480 }
1481 
1482 static int
get_oa_counter_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,size_t data_size,uint8_t * data)1483 get_oa_counter_data(struct intel_perf_context *perf_ctx,
1484                     struct intel_perf_query_object *query,
1485                     size_t data_size,
1486                     uint8_t *data)
1487 {
1488    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1489    const struct intel_perf_query_info *queryinfo = query->queryinfo;
1490    int n_counters = queryinfo->n_counters;
1491    int written = 0;
1492 
1493    for (int i = 0; i < n_counters; i++) {
1494       const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
1495       uint64_t *out_uint64;
1496       float *out_float;
1497       size_t counter_size = intel_perf_query_counter_get_size(counter);
1498 
1499       if (counter_size) {
1500          switch (counter->data_type) {
1501          case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
1502             out_uint64 = (uint64_t *)(data + counter->offset);
1503             *out_uint64 =
1504                counter->oa_counter_read_uint64(perf_cfg, queryinfo,
1505                                                &query->oa.result);
1506             break;
1507          case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
1508             out_float = (float *)(data + counter->offset);
1509             *out_float =
1510                counter->oa_counter_read_float(perf_cfg, queryinfo,
1511                                               &query->oa.result);
1512             break;
1513          default:
1514             /* So far we aren't using uint32, double or bool32... */
1515             unreachable("unexpected counter data type");
1516          }
1517 
1518          if (counter->offset + counter_size > written)
1519             written = counter->offset + counter_size;
1520       }
1521    }
1522 
1523    return written;
1524 }
1525 
1526 static int
get_pipeline_stats_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,size_t data_size,uint8_t * data)1527 get_pipeline_stats_data(struct intel_perf_context *perf_ctx,
1528                         struct intel_perf_query_object *query,
1529                         size_t data_size,
1530                         uint8_t *data)
1531 
1532 {
1533    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1534    const struct intel_perf_query_info *queryinfo = query->queryinfo;
1535    int n_counters = queryinfo->n_counters;
1536    uint8_t *p = data;
1537 
1538    uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
1539    uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1540 
1541    for (int i = 0; i < n_counters; i++) {
1542       const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
1543       uint64_t value = end[i] - start[i];
1544 
1545       if (counter->pipeline_stat.numerator !=
1546           counter->pipeline_stat.denominator) {
1547          value *= counter->pipeline_stat.numerator;
1548          value /= counter->pipeline_stat.denominator;
1549       }
1550 
1551       *((uint64_t *)p) = value;
1552       p += 8;
1553    }
1554 
1555    perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
1556 
1557    return p - data;
1558 }
1559 
1560 void
intel_perf_get_query_data(struct intel_perf_context * perf_ctx,struct intel_perf_query_object * query,void * current_batch,int data_size,unsigned * data,unsigned * bytes_written)1561 intel_perf_get_query_data(struct intel_perf_context *perf_ctx,
1562                           struct intel_perf_query_object *query,
1563                           void *current_batch,
1564                           int data_size,
1565                           unsigned *data,
1566                           unsigned *bytes_written)
1567 {
1568    struct intel_perf_config *perf_cfg = perf_ctx->perf;
1569    int written = 0;
1570 
1571    switch (query->queryinfo->kind) {
1572    case INTEL_PERF_QUERY_TYPE_OA:
1573    case INTEL_PERF_QUERY_TYPE_RAW:
1574       if (!query->oa.results_accumulated) {
1575          /* Due to the sampling frequency of the OA buffer by the i915-perf
1576           * driver, there can be a 5ms delay between the Mesa seeing the query
1577           * complete and i915 making all the OA buffer reports available to us.
1578           * We need to wait for all the reports to come in before we can do
1579           * the post processing removing unrelated deltas.
1580           * There is a i915-perf series to address this issue, but it's
1581           * not been merged upstream yet.
1582           */
1583          while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
1584             ;
1585 
1586          uint32_t *begin_report = query->oa.map;
1587          uint32_t *end_report = query->oa.map + perf_cfg->query_layout.size;
1588          intel_perf_query_result_accumulate_fields(&query->oa.result,
1589                                                  query->queryinfo,
1590                                                  perf_ctx->devinfo,
1591                                                  begin_report,
1592                                                  end_report,
1593                                                  true /* no_oa_accumulate */);
1594          accumulate_oa_reports(perf_ctx, query);
1595          assert(query->oa.results_accumulated);
1596 
1597          perf_cfg->vtbl.bo_unmap(query->oa.bo);
1598          query->oa.map = NULL;
1599       }
1600       if (query->queryinfo->kind == INTEL_PERF_QUERY_TYPE_OA) {
1601          written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
1602       } else {
1603          const struct intel_device_info *devinfo = perf_ctx->devinfo;
1604 
1605          written = intel_perf_query_result_write_mdapi((uint8_t *)data, data_size,
1606                                                      devinfo, query->queryinfo,
1607                                                      &query->oa.result);
1608       }
1609       break;
1610 
1611    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1612       written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
1613       break;
1614 
1615    default:
1616       unreachable("Unknown query type");
1617       break;
1618    }
1619 
1620    if (bytes_written)
1621       *bytes_written = written;
1622 }
1623 
1624 void
intel_perf_dump_query_count(struct intel_perf_context * perf_ctx)1625 intel_perf_dump_query_count(struct intel_perf_context *perf_ctx)
1626 {
1627    DBG("Queries: (Open queries = %d, OA users = %d)\n",
1628        perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
1629 }
1630 
1631 void
intel_perf_dump_query(struct intel_perf_context * ctx,struct intel_perf_query_object * obj,void * current_batch)1632 intel_perf_dump_query(struct intel_perf_context *ctx,
1633                       struct intel_perf_query_object *obj,
1634                       void *current_batch)
1635 {
1636    switch (obj->queryinfo->kind) {
1637    case INTEL_PERF_QUERY_TYPE_OA:
1638    case INTEL_PERF_QUERY_TYPE_RAW:
1639       DBG("BO: %-4s OA data: %-10s %-15s\n",
1640           obj->oa.bo ? "yes," : "no,",
1641           intel_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
1642           obj->oa.results_accumulated ? "accumulated" : "not accumulated");
1643       break;
1644    case INTEL_PERF_QUERY_TYPE_PIPELINE:
1645       DBG("BO: %-4s\n",
1646           obj->pipeline_stats.bo ? "yes" : "no");
1647       break;
1648    default:
1649       unreachable("Unknown query type");
1650       break;
1651    }
1652 }
1653