1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <unistd.h>
25
26 #include "common/gen_gem.h"
27
28 #include "dev/gen_debug.h"
29 #include "dev/gen_device_info.h"
30
31 #include "perf/gen_perf.h"
32 #include "perf/gen_perf_mdapi.h"
33 #include "perf/gen_perf_private.h"
34 #include "perf/gen_perf_query.h"
35 #include "perf/gen_perf_regs.h"
36
37 #include "drm-uapi/i915_drm.h"
38
39 #include "util/u_math.h"
40
41 #define FILE_DEBUG_FLAG DEBUG_PERFMON
42 #define MI_RPC_BO_SIZE 4096
43 #define MI_FREQ_START_OFFSET_BYTES (3072)
44 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
45 #define MI_FREQ_END_OFFSET_BYTES (3076)
46
47 #define MAP_READ (1 << 0)
48 #define MAP_WRITE (1 << 1)
49
50 /**
51 * Periodic OA samples are read() into these buffer structures via the
52 * i915 perf kernel interface and appended to the
53 * perf_ctx->sample_buffers linked list. When we process the
54 * results of an OA metrics query we need to consider all the periodic
55 * samples between the Begin and End MI_REPORT_PERF_COUNT command
56 * markers.
57 *
58 * 'Periodic' is a simplification as there are other automatic reports
59 * written by the hardware also buffered here.
60 *
61 * Considering three queries, A, B and C:
62 *
63 * Time ---->
64 * ________________A_________________
65 * | |
66 * | ________B_________ _____C___________
67 * | | | | | |
68 *
69 * And an illustration of sample buffers read over this time frame:
70 * [HEAD ][ ][ ][ ][ ][ ][ ][ ][TAIL ]
71 *
72 * These nodes may hold samples for query A:
73 * [ ][ ][ A ][ A ][ A ][ A ][ A ][ ][ ]
74 *
75 * These nodes may hold samples for query B:
76 * [ ][ ][ B ][ B ][ B ][ ][ ][ ][ ]
77 *
78 * These nodes may hold samples for query C:
79 * [ ][ ][ ][ ][ ][ C ][ C ][ C ][ ]
80 *
81 * The illustration assumes we have an even distribution of periodic
82 * samples so all nodes have the same size plotted against time:
83 *
84 * Note, to simplify code, the list is never empty.
85 *
86 * With overlapping queries we can see that periodic OA reports may
87 * relate to multiple queries and care needs to be take to keep
88 * track of sample buffers until there are no queries that might
89 * depend on their contents.
90 *
91 * We use a node ref counting system where a reference ensures that a
92 * node and all following nodes can't be freed/recycled until the
93 * reference drops to zero.
94 *
95 * E.g. with a ref of one here:
96 * [ 0 ][ 0 ][ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
97 *
98 * These nodes could be freed or recycled ("reaped"):
99 * [ 0 ][ 0 ]
100 *
101 * These must be preserved until the leading ref drops to zero:
102 * [ 1 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ][ 0 ]
103 *
104 * When a query starts we take a reference on the current tail of
105 * the list, knowing that no already-buffered samples can possibly
106 * relate to the newly-started query. A pointer to this node is
107 * also saved in the query object's ->oa.samples_head.
108 *
109 * E.g. starting query A while there are two nodes in .sample_buffers:
110 * ________________A________
111 * |
112 *
113 * [ 0 ][ 1 ]
114 * ^_______ Add a reference and store pointer to node in
115 * A->oa.samples_head
116 *
117 * Moving forward to when the B query starts with no new buffer nodes:
118 * (for reference, i915 perf reads() are only done when queries finish)
119 * ________________A_______
120 * | ________B___
121 * | |
122 *
123 * [ 0 ][ 2 ]
124 * ^_______ Add a reference and store pointer to
125 * node in B->oa.samples_head
126 *
127 * Once a query is finished, after an OA query has become 'Ready',
128 * once the End OA report has landed and after we we have processed
129 * all the intermediate periodic samples then we drop the
130 * ->oa.samples_head reference we took at the start.
131 *
132 * So when the B query has finished we have:
133 * ________________A________
134 * | ______B___________
135 * | | |
136 * [ 0 ][ 1 ][ 0 ][ 0 ][ 0 ]
137 * ^_______ Drop B->oa.samples_head reference
138 *
139 * We still can't free these due to the A->oa.samples_head ref:
140 * [ 1 ][ 0 ][ 0 ][ 0 ]
141 *
142 * When the A query finishes: (note there's a new ref for C's samples_head)
143 * ________________A_________________
144 * | |
145 * | _____C_________
146 * | | |
147 * [ 0 ][ 0 ][ 0 ][ 0 ][ 1 ][ 0 ][ 0 ]
148 * ^_______ Drop A->oa.samples_head reference
149 *
150 * And we can now reap these nodes up to the C->oa.samples_head:
151 * [ X ][ X ][ X ][ X ]
152 * keeping -> [ 1 ][ 0 ][ 0 ]
153 *
154 * We reap old sample buffers each time we finish processing an OA
155 * query by iterating the sample_buffers list from the head until we
156 * find a referenced node and stop.
157 *
158 * Reaped buffers move to a perfquery.free_sample_buffers list and
159 * when we come to read() we first look to recycle a buffer from the
160 * free_sample_buffers list before allocating a new buffer.
161 */
162 struct oa_sample_buf {
163 struct exec_node link;
164 int refcount;
165 int len;
166 uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
167 uint32_t last_timestamp;
168 };
169
170 /**
171 * gen representation of a performance query object.
172 *
173 * NB: We want to keep this structure relatively lean considering that
174 * applications may expect to allocate enough objects to be able to
175 * query around all draw calls in a frame.
176 */
177 struct gen_perf_query_object
178 {
179 const struct gen_perf_query_info *queryinfo;
180
181 /* See query->kind to know which state below is in use... */
182 union {
183 struct {
184
185 /**
186 * BO containing OA counter snapshots at query Begin/End time.
187 */
188 void *bo;
189
190 /**
191 * Address of mapped of @bo
192 */
193 void *map;
194
195 /**
196 * The MI_REPORT_PERF_COUNT command lets us specify a unique
197 * ID that will be reflected in the resulting OA report
198 * that's written by the GPU. This is the ID we're expecting
199 * in the begin report and the the end report should be
200 * @begin_report_id + 1.
201 */
202 int begin_report_id;
203
204 /**
205 * Reference the head of the brw->perfquery.sample_buffers
206 * list at the time that the query started (so we only need
207 * to look at nodes after this point when looking for samples
208 * related to this query)
209 *
210 * (See struct brw_oa_sample_buf description for more details)
211 */
212 struct exec_node *samples_head;
213
214 /**
215 * false while in the unaccumulated_elements list, and set to
216 * true when the final, end MI_RPC snapshot has been
217 * accumulated.
218 */
219 bool results_accumulated;
220
221 /**
222 * Frequency of the GT at begin and end of the query.
223 */
224 uint64_t gt_frequency[2];
225
226 /**
227 * Accumulated OA results between begin and end of the query.
228 */
229 struct gen_perf_query_result result;
230 } oa;
231
232 struct {
233 /**
234 * BO containing starting and ending snapshots for the
235 * statistics counters.
236 */
237 void *bo;
238 } pipeline_stats;
239 };
240 };
241
242 struct gen_perf_context {
243 struct gen_perf_config *perf;
244
245 void * ctx; /* driver context (eg, brw_context) */
246 void * bufmgr;
247 const struct gen_device_info *devinfo;
248
249 uint32_t hw_ctx;
250 int drm_fd;
251
252 /* The i915 perf stream we open to setup + enable the OA counters */
253 int oa_stream_fd;
254
255 /* An i915 perf stream fd gives exclusive access to the OA unit that will
256 * report counter snapshots for a specific counter set/profile in a
257 * specific layout/format so we can only start OA queries that are
258 * compatible with the currently open fd...
259 */
260 int current_oa_metrics_set_id;
261 int current_oa_format;
262
263 /* List of buffers containing OA reports */
264 struct exec_list sample_buffers;
265
266 /* Cached list of empty sample buffers */
267 struct exec_list free_sample_buffers;
268
269 int n_active_oa_queries;
270 int n_active_pipeline_stats_queries;
271
272 /* The number of queries depending on running OA counters which
273 * extends beyond brw_end_perf_query() since we need to wait until
274 * the last MI_RPC command has parsed by the GPU.
275 *
276 * Accurate accounting is important here as emitting an
277 * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
278 * effectively hang the gpu.
279 */
280 int n_oa_users;
281
282 /* To help catch an spurious problem with the hardware or perf
283 * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
284 * with a unique ID that we can explicitly check for...
285 */
286 int next_query_start_report_id;
287
288 /**
289 * An array of queries whose results haven't yet been assembled
290 * based on the data in buffer objects.
291 *
292 * These may be active, or have already ended. However, the
293 * results have not been requested.
294 */
295 struct gen_perf_query_object **unaccumulated;
296 int unaccumulated_elements;
297 int unaccumulated_array_size;
298
299 /* The total number of query objects so we can relinquish
300 * our exclusive access to perf if the application deletes
301 * all of its objects. (NB: We only disable perf while
302 * there are no active queries)
303 */
304 int n_query_instances;
305 };
306
307 static bool
inc_n_users(struct gen_perf_context * perf_ctx)308 inc_n_users(struct gen_perf_context *perf_ctx)
309 {
310 if (perf_ctx->n_oa_users == 0 &&
311 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
312 {
313 return false;
314 }
315 ++perf_ctx->n_oa_users;
316
317 return true;
318 }
319
320 static void
dec_n_users(struct gen_perf_context * perf_ctx)321 dec_n_users(struct gen_perf_context *perf_ctx)
322 {
323 /* Disabling the i915 perf stream will effectively disable the OA
324 * counters. Note it's important to be sure there are no outstanding
325 * MI_RPC commands at this point since they could stall the CS
326 * indefinitely once OACONTROL is disabled.
327 */
328 --perf_ctx->n_oa_users;
329 if (perf_ctx->n_oa_users == 0 &&
330 gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
331 {
332 DBG("WARNING: Error disabling gen perf stream: %m\n");
333 }
334 }
335
336 static void
gen_perf_close(struct gen_perf_context * perfquery,const struct gen_perf_query_info * query)337 gen_perf_close(struct gen_perf_context *perfquery,
338 const struct gen_perf_query_info *query)
339 {
340 if (perfquery->oa_stream_fd != -1) {
341 close(perfquery->oa_stream_fd);
342 perfquery->oa_stream_fd = -1;
343 }
344 if (query->kind == GEN_PERF_QUERY_TYPE_RAW) {
345 struct gen_perf_query_info *raw_query =
346 (struct gen_perf_query_info *) query;
347 raw_query->oa_metrics_set_id = 0;
348 }
349 }
350
351 #define NUM_PERF_PROPERTIES(array) (ARRAY_SIZE(array) / 2)
352
353 static bool
gen_perf_open(struct gen_perf_context * perf_ctx,int metrics_set_id,int report_format,int period_exponent,int drm_fd,uint32_t ctx_id)354 gen_perf_open(struct gen_perf_context *perf_ctx,
355 int metrics_set_id,
356 int report_format,
357 int period_exponent,
358 int drm_fd,
359 uint32_t ctx_id)
360 {
361 uint64_t properties[] = {
362 /* Single context sampling */
363 DRM_I915_PERF_PROP_CTX_HANDLE, ctx_id,
364
365 /* Include OA reports in samples */
366 DRM_I915_PERF_PROP_SAMPLE_OA, true,
367
368 /* OA unit configuration */
369 DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id,
370 DRM_I915_PERF_PROP_OA_FORMAT, report_format,
371 DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent,
372
373 /* SSEU configuration */
374 DRM_I915_PERF_PROP_GLOBAL_SSEU, to_user_pointer(&perf_ctx->perf->sseu),
375 };
376 struct drm_i915_perf_open_param param = {
377 .flags = I915_PERF_FLAG_FD_CLOEXEC |
378 I915_PERF_FLAG_FD_NONBLOCK |
379 I915_PERF_FLAG_DISABLED,
380 .num_properties = perf_ctx->perf->i915_perf_version >= 4 ?
381 NUM_PERF_PROPERTIES(properties) :
382 NUM_PERF_PROPERTIES(properties) - 1,
383 .properties_ptr = (uintptr_t) properties,
384 };
385 int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m);
386 if (fd == -1) {
387 DBG("Error opening gen perf OA stream: %m\n");
388 return false;
389 }
390
391 perf_ctx->oa_stream_fd = fd;
392
393 perf_ctx->current_oa_metrics_set_id = metrics_set_id;
394 perf_ctx->current_oa_format = report_format;
395
396 return true;
397 }
398
399 static uint64_t
get_metric_id(struct gen_perf_config * perf,const struct gen_perf_query_info * query)400 get_metric_id(struct gen_perf_config *perf,
401 const struct gen_perf_query_info *query)
402 {
403 /* These queries are know not to ever change, their config ID has been
404 * loaded upon the first query creation. No need to look them up again.
405 */
406 if (query->kind == GEN_PERF_QUERY_TYPE_OA)
407 return query->oa_metrics_set_id;
408
409 assert(query->kind == GEN_PERF_QUERY_TYPE_RAW);
410
411 /* Raw queries can be reprogrammed up by an external application/library.
412 * When a raw query is used for the first time it's id is set to a value !=
413 * 0. When it stops being used the id returns to 0. No need to reload the
414 * ID when it's already loaded.
415 */
416 if (query->oa_metrics_set_id != 0) {
417 DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
418 query->name, query->guid, query->oa_metrics_set_id);
419 return query->oa_metrics_set_id;
420 }
421
422 struct gen_perf_query_info *raw_query = (struct gen_perf_query_info *)query;
423 if (!gen_perf_load_metric_id(perf, query->guid,
424 &raw_query->oa_metrics_set_id)) {
425 DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
426 raw_query->oa_metrics_set_id = perf->fallback_raw_oa_metric;
427 } else {
428 DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
429 query->name, query->guid, query->oa_metrics_set_id);
430 }
431 return query->oa_metrics_set_id;
432 }
433
434 static struct oa_sample_buf *
get_free_sample_buf(struct gen_perf_context * perf_ctx)435 get_free_sample_buf(struct gen_perf_context *perf_ctx)
436 {
437 struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
438 struct oa_sample_buf *buf;
439
440 if (node)
441 buf = exec_node_data(struct oa_sample_buf, node, link);
442 else {
443 buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
444
445 exec_node_init(&buf->link);
446 buf->refcount = 0;
447 }
448 buf->len = 0;
449
450 return buf;
451 }
452
453 static void
reap_old_sample_buffers(struct gen_perf_context * perf_ctx)454 reap_old_sample_buffers(struct gen_perf_context *perf_ctx)
455 {
456 struct exec_node *tail_node =
457 exec_list_get_tail(&perf_ctx->sample_buffers);
458 struct oa_sample_buf *tail_buf =
459 exec_node_data(struct oa_sample_buf, tail_node, link);
460
461 /* Remove all old, unreferenced sample buffers walking forward from
462 * the head of the list, except always leave at least one node in
463 * the list so we always have a node to reference when we Begin
464 * a new query.
465 */
466 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
467 &perf_ctx->sample_buffers)
468 {
469 if (buf->refcount == 0 && buf != tail_buf) {
470 exec_node_remove(&buf->link);
471 exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
472 } else
473 return;
474 }
475 }
476
477 static void
free_sample_bufs(struct gen_perf_context * perf_ctx)478 free_sample_bufs(struct gen_perf_context *perf_ctx)
479 {
480 foreach_list_typed_safe(struct oa_sample_buf, buf, link,
481 &perf_ctx->free_sample_buffers)
482 ralloc_free(buf);
483
484 exec_list_make_empty(&perf_ctx->free_sample_buffers);
485 }
486
487
488 struct gen_perf_query_object *
gen_perf_new_query(struct gen_perf_context * perf_ctx,unsigned query_index)489 gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
490 {
491 const struct gen_perf_query_info *query =
492 &perf_ctx->perf->queries[query_index];
493 struct gen_perf_query_object *obj =
494 calloc(1, sizeof(struct gen_perf_query_object));
495
496 if (!obj)
497 return NULL;
498
499 obj->queryinfo = query;
500
501 perf_ctx->n_query_instances++;
502 return obj;
503 }
504
505 int
gen_perf_active_queries(struct gen_perf_context * perf_ctx,const struct gen_perf_query_info * query)506 gen_perf_active_queries(struct gen_perf_context *perf_ctx,
507 const struct gen_perf_query_info *query)
508 {
509 assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
510
511 switch (query->kind) {
512 case GEN_PERF_QUERY_TYPE_OA:
513 case GEN_PERF_QUERY_TYPE_RAW:
514 return perf_ctx->n_active_oa_queries;
515 break;
516
517 case GEN_PERF_QUERY_TYPE_PIPELINE:
518 return perf_ctx->n_active_pipeline_stats_queries;
519 break;
520
521 default:
522 unreachable("Unknown query type");
523 break;
524 }
525 }
526
527 const struct gen_perf_query_info*
gen_perf_query_info(const struct gen_perf_query_object * query)528 gen_perf_query_info(const struct gen_perf_query_object *query)
529 {
530 return query->queryinfo;
531 }
532
533 struct gen_perf_context *
gen_perf_new_context(void * parent)534 gen_perf_new_context(void *parent)
535 {
536 struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
537 if (! ctx)
538 fprintf(stderr, "%s: failed to alloc context\n", __func__);
539 return ctx;
540 }
541
542 struct gen_perf_config *
gen_perf_config(struct gen_perf_context * ctx)543 gen_perf_config(struct gen_perf_context *ctx)
544 {
545 return ctx->perf;
546 }
547
548 void
gen_perf_init_context(struct gen_perf_context * perf_ctx,struct gen_perf_config * perf_cfg,void * ctx,void * bufmgr,const struct gen_device_info * devinfo,uint32_t hw_ctx,int drm_fd)549 gen_perf_init_context(struct gen_perf_context *perf_ctx,
550 struct gen_perf_config *perf_cfg,
551 void * ctx, /* driver context (eg, brw_context) */
552 void * bufmgr, /* eg brw_bufmgr */
553 const struct gen_device_info *devinfo,
554 uint32_t hw_ctx,
555 int drm_fd)
556 {
557 perf_ctx->perf = perf_cfg;
558 perf_ctx->ctx = ctx;
559 perf_ctx->bufmgr = bufmgr;
560 perf_ctx->drm_fd = drm_fd;
561 perf_ctx->hw_ctx = hw_ctx;
562 perf_ctx->devinfo = devinfo;
563
564 perf_ctx->unaccumulated =
565 ralloc_array(ctx, struct gen_perf_query_object *, 2);
566 perf_ctx->unaccumulated_elements = 0;
567 perf_ctx->unaccumulated_array_size = 2;
568
569 exec_list_make_empty(&perf_ctx->sample_buffers);
570 exec_list_make_empty(&perf_ctx->free_sample_buffers);
571
572 /* It's convenient to guarantee that this linked list of sample
573 * buffers is never empty so we add an empty head so when we
574 * Begin an OA query we can always take a reference on a buffer
575 * in this list.
576 */
577 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
578 exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
579
580 perf_ctx->oa_stream_fd = -1;
581 perf_ctx->next_query_start_report_id = 1000;
582 }
583
584 /**
585 * Add a query to the global list of "unaccumulated queries."
586 *
587 * Queries are tracked here until all the associated OA reports have
588 * been accumulated via accumulate_oa_reports() after the end
589 * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
590 */
591 static void
add_to_unaccumulated_query_list(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * obj)592 add_to_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
593 struct gen_perf_query_object *obj)
594 {
595 if (perf_ctx->unaccumulated_elements >=
596 perf_ctx->unaccumulated_array_size)
597 {
598 perf_ctx->unaccumulated_array_size *= 1.5;
599 perf_ctx->unaccumulated =
600 reralloc(perf_ctx->ctx, perf_ctx->unaccumulated,
601 struct gen_perf_query_object *,
602 perf_ctx->unaccumulated_array_size);
603 }
604
605 perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
606 }
607
608 /**
609 * Emit MI_STORE_REGISTER_MEM commands to capture all of the
610 * pipeline statistics for the performance query object.
611 */
612 static void
snapshot_statistics_registers(struct gen_perf_context * ctx,struct gen_perf_query_object * obj,uint32_t offset_in_bytes)613 snapshot_statistics_registers(struct gen_perf_context *ctx,
614 struct gen_perf_query_object *obj,
615 uint32_t offset_in_bytes)
616 {
617 struct gen_perf_config *perf = ctx->perf;
618 const struct gen_perf_query_info *query = obj->queryinfo;
619 const int n_counters = query->n_counters;
620
621 for (int i = 0; i < n_counters; i++) {
622 const struct gen_perf_query_counter *counter = &query->counters[i];
623
624 assert(counter->data_type == GEN_PERF_COUNTER_DATA_TYPE_UINT64);
625
626 perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
627 counter->pipeline_stat.reg, 8,
628 offset_in_bytes + counter->offset);
629 }
630 }
631
632 static void
snapshot_freq_register(struct gen_perf_context * ctx,struct gen_perf_query_object * query,uint32_t bo_offset)633 snapshot_freq_register(struct gen_perf_context *ctx,
634 struct gen_perf_query_object *query,
635 uint32_t bo_offset)
636 {
637 struct gen_perf_config *perf = ctx->perf;
638 const struct gen_device_info *devinfo = ctx->devinfo;
639
640 if (devinfo->gen == 8 && !devinfo->is_cherryview)
641 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN7_RPSTAT1, 4, bo_offset);
642 else if (devinfo->gen >= 9)
643 perf->vtbl.store_register_mem(ctx->ctx, query->oa.bo, GEN9_RPSTAT0, 4, bo_offset);
644 }
645
646 bool
gen_perf_begin_query(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query)647 gen_perf_begin_query(struct gen_perf_context *perf_ctx,
648 struct gen_perf_query_object *query)
649 {
650 struct gen_perf_config *perf_cfg = perf_ctx->perf;
651 const struct gen_perf_query_info *queryinfo = query->queryinfo;
652
653 /* XXX: We have to consider that the command parser unit that parses batch
654 * buffer commands and is used to capture begin/end counter snapshots isn't
655 * implicitly synchronized with what's currently running across other GPU
656 * units (such as the EUs running shaders) that the performance counters are
657 * associated with.
658 *
659 * The intention of performance queries is to measure the work associated
660 * with commands between the begin/end delimiters and so for that to be the
661 * case we need to explicitly synchronize the parsing of commands to capture
662 * Begin/End counter snapshots with what's running across other parts of the
663 * GPU.
664 *
665 * When the command parser reaches a Begin marker it effectively needs to
666 * drain everything currently running on the GPU until the hardware is idle
667 * before capturing the first snapshot of counters - otherwise the results
668 * would also be measuring the effects of earlier commands.
669 *
670 * When the command parser reaches an End marker it needs to stall until
671 * everything currently running on the GPU has finished before capturing the
672 * end snapshot - otherwise the results won't be a complete representation
673 * of the work.
674 *
675 * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
676 * additional work to be processed by the pipeline until all pixels of the
677 * previous draw has be completed).
678 *
679 * N.B. The final results are based on deltas of counters between (inside)
680 * Begin/End markers so even though the total wall clock time of the
681 * workload is stretched by larger pipeline bubbles the bubbles themselves
682 * are generally invisible to the query results. Whether that's a good or a
683 * bad thing depends on the use case. For a lower real-time impact while
684 * capturing metrics then periodic sampling may be a better choice than
685 * INTEL_performance_query.
686 *
687 *
688 * This is our Begin synchronization point to drain current work on the
689 * GPU before we capture our first counter snapshot...
690 */
691 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
692
693 switch (queryinfo->kind) {
694 case GEN_PERF_QUERY_TYPE_OA:
695 case GEN_PERF_QUERY_TYPE_RAW: {
696
697 /* Opening an i915 perf stream implies exclusive access to the OA unit
698 * which will generate counter reports for a specific counter set with a
699 * specific layout/format so we can't begin any OA based queries that
700 * require a different counter set or format unless we get an opportunity
701 * to close the stream and open a new one...
702 */
703 uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
704
705 if (perf_ctx->oa_stream_fd != -1 &&
706 perf_ctx->current_oa_metrics_set_id != metric_id) {
707
708 if (perf_ctx->n_oa_users != 0) {
709 DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
710 perf_ctx->current_oa_metrics_set_id, metric_id);
711 return false;
712 } else
713 gen_perf_close(perf_ctx, queryinfo);
714 }
715
716 /* If the OA counters aren't already on, enable them. */
717 if (perf_ctx->oa_stream_fd == -1) {
718 const struct gen_device_info *devinfo = perf_ctx->devinfo;
719
720 /* The period_exponent gives a sampling period as follows:
721 * sample_period = timestamp_period * 2^(period_exponent + 1)
722 *
723 * The timestamps increments every 80ns (HSW), ~52ns (GEN9LP) or
724 * ~83ns (GEN8/9).
725 *
726 * The counter overflow period is derived from the EuActive counter
727 * which reads a counter that increments by the number of clock
728 * cycles multiplied by the number of EUs. It can be calculated as:
729 *
730 * 2^(number of bits in A counter) / (n_eus * max_gen_freq * 2)
731 *
732 * (E.g. 40 EUs @ 1GHz = ~53ms)
733 *
734 * We select a sampling period inferior to that overflow period to
735 * ensure we cannot see more than 1 counter overflow, otherwise we
736 * could loose information.
737 */
738
739 int a_counter_in_bits = 32;
740 if (devinfo->gen >= 8)
741 a_counter_in_bits = 40;
742
743 uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
744 /* drop 1GHz freq to have units in nanoseconds */
745 2);
746
747 DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
748 overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
749
750 int period_exponent = 0;
751 uint64_t prev_sample_period, next_sample_period;
752 for (int e = 0; e < 30; e++) {
753 prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
754 next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
755
756 /* Take the previous sampling period, lower than the overflow
757 * period.
758 */
759 if (prev_sample_period < overflow_period &&
760 next_sample_period > overflow_period)
761 period_exponent = e + 1;
762 }
763
764 if (period_exponent == 0) {
765 DBG("WARNING: enable to find a sampling exponent\n");
766 return false;
767 }
768
769 DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
770 prev_sample_period / 1000000ul);
771
772 if (!gen_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
773 period_exponent, perf_ctx->drm_fd,
774 perf_ctx->hw_ctx))
775 return false;
776 } else {
777 assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
778 perf_ctx->current_oa_format == queryinfo->oa_format);
779 }
780
781 if (!inc_n_users(perf_ctx)) {
782 DBG("WARNING: Error enabling i915 perf stream: %m\n");
783 return false;
784 }
785
786 if (query->oa.bo) {
787 perf_cfg->vtbl.bo_unreference(query->oa.bo);
788 query->oa.bo = NULL;
789 }
790
791 query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
792 "perf. query OA MI_RPC bo",
793 MI_RPC_BO_SIZE);
794 #ifdef DEBUG
795 /* Pre-filling the BO helps debug whether writes landed. */
796 void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
797 memset(map, 0x80, MI_RPC_BO_SIZE);
798 perf_cfg->vtbl.bo_unmap(query->oa.bo);
799 #endif
800
801 query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
802 perf_ctx->next_query_start_report_id += 2;
803
804 /* Take a starting OA counter snapshot. */
805 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo, 0,
806 query->oa.begin_report_id);
807 snapshot_freq_register(perf_ctx, query, MI_FREQ_START_OFFSET_BYTES);
808
809 ++perf_ctx->n_active_oa_queries;
810
811 /* No already-buffered samples can possibly be associated with this query
812 * so create a marker within the list of sample buffers enabling us to
813 * easily ignore earlier samples when processing this query after
814 * completion.
815 */
816 assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
817 query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
818
819 struct oa_sample_buf *buf =
820 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
821
822 /* This reference will ensure that future/following sample
823 * buffers (that may relate to this query) can't be freed until
824 * this drops to zero.
825 */
826 buf->refcount++;
827
828 gen_perf_query_result_clear(&query->oa.result);
829 query->oa.results_accumulated = false;
830
831 add_to_unaccumulated_query_list(perf_ctx, query);
832 break;
833 }
834
835 case GEN_PERF_QUERY_TYPE_PIPELINE:
836 if (query->pipeline_stats.bo) {
837 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
838 query->pipeline_stats.bo = NULL;
839 }
840
841 query->pipeline_stats.bo =
842 perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
843 "perf. query pipeline stats bo",
844 STATS_BO_SIZE);
845
846 /* Take starting snapshots. */
847 snapshot_statistics_registers(perf_ctx, query, 0);
848
849 ++perf_ctx->n_active_pipeline_stats_queries;
850 break;
851
852 default:
853 unreachable("Unknown query type");
854 break;
855 }
856
857 return true;
858 }
859
860 void
gen_perf_end_query(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query)861 gen_perf_end_query(struct gen_perf_context *perf_ctx,
862 struct gen_perf_query_object *query)
863 {
864 struct gen_perf_config *perf_cfg = perf_ctx->perf;
865
866 /* Ensure that the work associated with the queried commands will have
867 * finished before taking our query end counter readings.
868 *
869 * For more details see comment in brw_begin_perf_query for
870 * corresponding flush.
871 */
872 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
873
874 switch (query->queryinfo->kind) {
875 case GEN_PERF_QUERY_TYPE_OA:
876 case GEN_PERF_QUERY_TYPE_RAW:
877
878 /* NB: It's possible that the query will have already been marked
879 * as 'accumulated' if an error was seen while reading samples
880 * from perf. In this case we mustn't try and emit a closing
881 * MI_RPC command in case the OA unit has already been disabled
882 */
883 if (!query->oa.results_accumulated) {
884 /* Take an ending OA counter snapshot. */
885 snapshot_freq_register(perf_ctx, query, MI_FREQ_END_OFFSET_BYTES);
886 perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
887 MI_RPC_BO_END_OFFSET_BYTES,
888 query->oa.begin_report_id + 1);
889 }
890
891 --perf_ctx->n_active_oa_queries;
892
893 /* NB: even though the query has now ended, it can't be accumulated
894 * until the end MI_REPORT_PERF_COUNT snapshot has been written
895 * to query->oa.bo
896 */
897 break;
898
899 case GEN_PERF_QUERY_TYPE_PIPELINE:
900 snapshot_statistics_registers(perf_ctx, query,
901 STATS_BO_END_OFFSET_BYTES);
902 --perf_ctx->n_active_pipeline_stats_queries;
903 break;
904
905 default:
906 unreachable("Unknown query type");
907 break;
908 }
909 }
910
911 enum OaReadStatus {
912 OA_READ_STATUS_ERROR,
913 OA_READ_STATUS_UNFINISHED,
914 OA_READ_STATUS_FINISHED,
915 };
916
917 static enum OaReadStatus
read_oa_samples_until(struct gen_perf_context * perf_ctx,uint32_t start_timestamp,uint32_t end_timestamp)918 read_oa_samples_until(struct gen_perf_context *perf_ctx,
919 uint32_t start_timestamp,
920 uint32_t end_timestamp)
921 {
922 struct exec_node *tail_node =
923 exec_list_get_tail(&perf_ctx->sample_buffers);
924 struct oa_sample_buf *tail_buf =
925 exec_node_data(struct oa_sample_buf, tail_node, link);
926 uint32_t last_timestamp =
927 tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
928
929 while (1) {
930 struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
931 uint32_t offset;
932 int len;
933
934 while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
935 sizeof(buf->buf))) < 0 && errno == EINTR)
936 ;
937
938 if (len <= 0) {
939 exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
940
941 if (len == 0) {
942 DBG("Spurious EOF reading i915 perf samples\n");
943 return OA_READ_STATUS_ERROR;
944 }
945
946 if (errno != EAGAIN) {
947 DBG("Error reading i915 perf samples: %m\n");
948 return OA_READ_STATUS_ERROR;
949 }
950
951 if ((last_timestamp - start_timestamp) >= INT32_MAX)
952 return OA_READ_STATUS_UNFINISHED;
953
954 if ((last_timestamp - start_timestamp) <
955 (end_timestamp - start_timestamp))
956 return OA_READ_STATUS_UNFINISHED;
957
958 return OA_READ_STATUS_FINISHED;
959 }
960
961 buf->len = len;
962 exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
963
964 /* Go through the reports and update the last timestamp. */
965 offset = 0;
966 while (offset < buf->len) {
967 const struct drm_i915_perf_record_header *header =
968 (const struct drm_i915_perf_record_header *) &buf->buf[offset];
969 uint32_t *report = (uint32_t *) (header + 1);
970
971 if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
972 last_timestamp = report[1];
973
974 offset += header->size;
975 }
976
977 buf->last_timestamp = last_timestamp;
978 }
979
980 unreachable("not reached");
981 return OA_READ_STATUS_ERROR;
982 }
983
984 /**
985 * Try to read all the reports until either the delimiting timestamp
986 * or an error arises.
987 */
988 static bool
read_oa_samples_for_query(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,void * current_batch)989 read_oa_samples_for_query(struct gen_perf_context *perf_ctx,
990 struct gen_perf_query_object *query,
991 void *current_batch)
992 {
993 uint32_t *start;
994 uint32_t *last;
995 uint32_t *end;
996 struct gen_perf_config *perf_cfg = perf_ctx->perf;
997
998 /* We need the MI_REPORT_PERF_COUNT to land before we can start
999 * accumulate. */
1000 assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1001 !perf_cfg->vtbl.bo_busy(query->oa.bo));
1002
1003 /* Map the BO once here and let accumulate_oa_reports() unmap
1004 * it. */
1005 if (query->oa.map == NULL)
1006 query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
1007
1008 start = last = query->oa.map;
1009 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1010
1011 if (start[0] != query->oa.begin_report_id) {
1012 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1013 return true;
1014 }
1015 if (end[0] != (query->oa.begin_report_id + 1)) {
1016 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1017 return true;
1018 }
1019
1020 /* Read the reports until the end timestamp. */
1021 switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
1022 case OA_READ_STATUS_ERROR:
1023 /* Fallthrough and let accumulate_oa_reports() deal with the
1024 * error. */
1025 case OA_READ_STATUS_FINISHED:
1026 return true;
1027 case OA_READ_STATUS_UNFINISHED:
1028 return false;
1029 }
1030
1031 unreachable("invalid read status");
1032 return false;
1033 }
1034
1035 void
gen_perf_wait_query(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,void * current_batch)1036 gen_perf_wait_query(struct gen_perf_context *perf_ctx,
1037 struct gen_perf_query_object *query,
1038 void *current_batch)
1039 {
1040 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1041 struct brw_bo *bo = NULL;
1042
1043 switch (query->queryinfo->kind) {
1044 case GEN_PERF_QUERY_TYPE_OA:
1045 case GEN_PERF_QUERY_TYPE_RAW:
1046 bo = query->oa.bo;
1047 break;
1048
1049 case GEN_PERF_QUERY_TYPE_PIPELINE:
1050 bo = query->pipeline_stats.bo;
1051 break;
1052
1053 default:
1054 unreachable("Unknown query type");
1055 break;
1056 }
1057
1058 if (bo == NULL)
1059 return;
1060
1061 /* If the current batch references our results bo then we need to
1062 * flush first...
1063 */
1064 if (perf_cfg->vtbl.batch_references(current_batch, bo))
1065 perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
1066
1067 perf_cfg->vtbl.bo_wait_rendering(bo);
1068 }
1069
1070 bool
gen_perf_is_query_ready(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,void * current_batch)1071 gen_perf_is_query_ready(struct gen_perf_context *perf_ctx,
1072 struct gen_perf_query_object *query,
1073 void *current_batch)
1074 {
1075 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1076
1077 switch (query->queryinfo->kind) {
1078 case GEN_PERF_QUERY_TYPE_OA:
1079 case GEN_PERF_QUERY_TYPE_RAW:
1080 return (query->oa.results_accumulated ||
1081 (query->oa.bo &&
1082 !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
1083 !perf_cfg->vtbl.bo_busy(query->oa.bo)));
1084
1085 case GEN_PERF_QUERY_TYPE_PIPELINE:
1086 return (query->pipeline_stats.bo &&
1087 !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
1088 !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
1089
1090 default:
1091 unreachable("Unknown query type");
1092 break;
1093 }
1094
1095 return false;
1096 }
1097
1098 /**
1099 * Remove a query from the global list of unaccumulated queries once
1100 * after successfully accumulating the OA reports associated with the
1101 * query in accumulate_oa_reports() or when discarding unwanted query
1102 * results.
1103 */
1104 static void
drop_from_unaccumulated_query_list(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query)1105 drop_from_unaccumulated_query_list(struct gen_perf_context *perf_ctx,
1106 struct gen_perf_query_object *query)
1107 {
1108 for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
1109 if (perf_ctx->unaccumulated[i] == query) {
1110 int last_elt = --perf_ctx->unaccumulated_elements;
1111
1112 if (i == last_elt)
1113 perf_ctx->unaccumulated[i] = NULL;
1114 else {
1115 perf_ctx->unaccumulated[i] =
1116 perf_ctx->unaccumulated[last_elt];
1117 }
1118
1119 break;
1120 }
1121 }
1122
1123 /* Drop our samples_head reference so that associated periodic
1124 * sample data buffers can potentially be reaped if they aren't
1125 * referenced by any other queries...
1126 */
1127
1128 struct oa_sample_buf *buf =
1129 exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
1130
1131 assert(buf->refcount > 0);
1132 buf->refcount--;
1133
1134 query->oa.samples_head = NULL;
1135
1136 reap_old_sample_buffers(perf_ctx);
1137 }
1138
1139 /* In general if we see anything spurious while accumulating results,
1140 * we don't try and continue accumulating the current query, hoping
1141 * for the best, we scrap anything outstanding, and then hope for the
1142 * best with new queries.
1143 */
1144 static void
discard_all_queries(struct gen_perf_context * perf_ctx)1145 discard_all_queries(struct gen_perf_context *perf_ctx)
1146 {
1147 while (perf_ctx->unaccumulated_elements) {
1148 struct gen_perf_query_object *query = perf_ctx->unaccumulated[0];
1149
1150 query->oa.results_accumulated = true;
1151 drop_from_unaccumulated_query_list(perf_ctx, query);
1152
1153 dec_n_users(perf_ctx);
1154 }
1155 }
1156
1157 /* Looks for the validity bit of context ID (dword 2) of an OA report. */
1158 static bool
oa_report_ctx_id_valid(const struct gen_device_info * devinfo,const uint32_t * report)1159 oa_report_ctx_id_valid(const struct gen_device_info *devinfo,
1160 const uint32_t *report)
1161 {
1162 assert(devinfo->gen >= 8);
1163 if (devinfo->gen == 8)
1164 return (report[0] & (1 << 25)) != 0;
1165 return (report[0] & (1 << 16)) != 0;
1166 }
1167
1168 /**
1169 * Accumulate raw OA counter values based on deltas between pairs of
1170 * OA reports.
1171 *
1172 * Accumulation starts from the first report captured via
1173 * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
1174 * last MI_RPC report requested by brw_end_perf_query(). Between these
1175 * two reports there may also some number of periodically sampled OA
1176 * reports collected via the i915 perf interface - depending on the
1177 * duration of the query.
1178 *
1179 * These periodic snapshots help to ensure we handle counter overflow
1180 * correctly by being frequent enough to ensure we don't miss multiple
1181 * overflows of a counter between snapshots. For Gen8+ the i915 perf
1182 * snapshots provide the extra context-switch reports that let us
1183 * subtract out the progress of counters associated with other
1184 * contexts running on the system.
1185 */
1186 static void
accumulate_oa_reports(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query)1187 accumulate_oa_reports(struct gen_perf_context *perf_ctx,
1188 struct gen_perf_query_object *query)
1189 {
1190 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1191 uint32_t *start;
1192 uint32_t *last;
1193 uint32_t *end;
1194 struct exec_node *first_samples_node;
1195 bool last_report_ctx_match = true;
1196 int out_duration = 0;
1197
1198 assert(query->oa.map != NULL);
1199
1200 start = last = query->oa.map;
1201 end = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1202
1203 if (start[0] != query->oa.begin_report_id) {
1204 DBG("Spurious start report id=%"PRIu32"\n", start[0]);
1205 goto error;
1206 }
1207 if (end[0] != (query->oa.begin_report_id + 1)) {
1208 DBG("Spurious end report id=%"PRIu32"\n", end[0]);
1209 goto error;
1210 }
1211
1212 /* On Gen12+ OA reports are sourced from per context counters, so we don't
1213 * ever have to look at the global OA buffer. Yey \o/
1214 */
1215 if (perf_ctx->devinfo->gen >= 12) {
1216 last = start;
1217 goto end;
1218 }
1219
1220 /* See if we have any periodic reports to accumulate too... */
1221
1222 /* N.B. The oa.samples_head was set when the query began and
1223 * pointed to the tail of the perf_ctx->sample_buffers list at
1224 * the time the query started. Since the buffer existed before the
1225 * first MI_REPORT_PERF_COUNT command was emitted we therefore know
1226 * that no data in this particular node's buffer can possibly be
1227 * associated with the query - so skip ahead one...
1228 */
1229 first_samples_node = query->oa.samples_head->next;
1230
1231 foreach_list_typed_from(struct oa_sample_buf, buf, link,
1232 &perf_ctx->sample_buffers,
1233 first_samples_node)
1234 {
1235 int offset = 0;
1236
1237 while (offset < buf->len) {
1238 const struct drm_i915_perf_record_header *header =
1239 (const struct drm_i915_perf_record_header *)(buf->buf + offset);
1240
1241 assert(header->size != 0);
1242 assert(header->size <= buf->len);
1243
1244 offset += header->size;
1245
1246 switch (header->type) {
1247 case DRM_I915_PERF_RECORD_SAMPLE: {
1248 uint32_t *report = (uint32_t *)(header + 1);
1249 bool report_ctx_match = true;
1250 bool add = true;
1251
1252 /* Ignore reports that come before the start marker.
1253 * (Note: takes care to allow overflow of 32bit timestamps)
1254 */
1255 if (gen_device_info_timebase_scale(devinfo,
1256 report[1] - start[1]) > 5000000000) {
1257 continue;
1258 }
1259
1260 /* Ignore reports that come after the end marker.
1261 * (Note: takes care to allow overflow of 32bit timestamps)
1262 */
1263 if (gen_device_info_timebase_scale(devinfo,
1264 report[1] - end[1]) <= 5000000000) {
1265 goto end;
1266 }
1267
1268 /* For Gen8+ since the counters continue while other
1269 * contexts are running we need to discount any unrelated
1270 * deltas. The hardware automatically generates a report
1271 * on context switch which gives us a new reference point
1272 * to continuing adding deltas from.
1273 *
1274 * For Haswell we can rely on the HW to stop the progress
1275 * of OA counters while any other context is acctive.
1276 */
1277 if (devinfo->gen >= 8) {
1278 /* Consider that the current report matches our context only if
1279 * the report says the report ID is valid.
1280 */
1281 report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
1282 report[2] == start[2];
1283 if (report_ctx_match)
1284 out_duration = 0;
1285 else
1286 out_duration++;
1287
1288 /* Only add the delta between <last, report> if the last report
1289 * was clearly identified as our context, or if we have at most
1290 * 1 report without a matching ID.
1291 *
1292 * The OA unit will sometimes label reports with an invalid
1293 * context ID when i915 rewrites the execlist submit register
1294 * with the same context as the one currently running. This
1295 * happens when i915 wants to notify the HW of ringbuffer tail
1296 * register update. We have to consider this report as part of
1297 * our context as the 3d pipeline behind the OACS unit is still
1298 * processing the operations started at the previous execlist
1299 * submission.
1300 */
1301 add = last_report_ctx_match && out_duration < 2;
1302 }
1303
1304 if (add) {
1305 gen_perf_query_result_accumulate(&query->oa.result,
1306 query->queryinfo,
1307 last, report);
1308 } else {
1309 /* We're not adding the delta because we've identified it's not
1310 * for the context we filter for. We can consider that the
1311 * query was split.
1312 */
1313 query->oa.result.query_disjoint = true;
1314 }
1315
1316 last = report;
1317 last_report_ctx_match = report_ctx_match;
1318
1319 break;
1320 }
1321
1322 case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
1323 DBG("i915 perf: OA error: all reports lost\n");
1324 goto error;
1325 case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
1326 DBG("i915 perf: OA report lost\n");
1327 break;
1328 }
1329 }
1330 }
1331
1332 end:
1333
1334 gen_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
1335 last, end);
1336
1337 query->oa.results_accumulated = true;
1338 drop_from_unaccumulated_query_list(perf_ctx, query);
1339 dec_n_users(perf_ctx);
1340
1341 return;
1342
1343 error:
1344
1345 discard_all_queries(perf_ctx);
1346 }
1347
1348 void
gen_perf_delete_query(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query)1349 gen_perf_delete_query(struct gen_perf_context *perf_ctx,
1350 struct gen_perf_query_object *query)
1351 {
1352 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1353
1354 /* We can assume that the frontend waits for a query to complete
1355 * before ever calling into here, so we don't have to worry about
1356 * deleting an in-flight query object.
1357 */
1358 switch (query->queryinfo->kind) {
1359 case GEN_PERF_QUERY_TYPE_OA:
1360 case GEN_PERF_QUERY_TYPE_RAW:
1361 if (query->oa.bo) {
1362 if (!query->oa.results_accumulated) {
1363 drop_from_unaccumulated_query_list(perf_ctx, query);
1364 dec_n_users(perf_ctx);
1365 }
1366
1367 perf_cfg->vtbl.bo_unreference(query->oa.bo);
1368 query->oa.bo = NULL;
1369 }
1370
1371 query->oa.results_accumulated = false;
1372 break;
1373
1374 case GEN_PERF_QUERY_TYPE_PIPELINE:
1375 if (query->pipeline_stats.bo) {
1376 perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
1377 query->pipeline_stats.bo = NULL;
1378 }
1379 break;
1380
1381 default:
1382 unreachable("Unknown query type");
1383 break;
1384 }
1385
1386 /* As an indication that the INTEL_performance_query extension is no
1387 * longer in use, it's a good time to free our cache of sample
1388 * buffers and close any current i915-perf stream.
1389 */
1390 if (--perf_ctx->n_query_instances == 0) {
1391 free_sample_bufs(perf_ctx);
1392 gen_perf_close(perf_ctx, query->queryinfo);
1393 }
1394
1395 free(query);
1396 }
1397
1398 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1399
1400 static void
read_gt_frequency(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * obj)1401 read_gt_frequency(struct gen_perf_context *perf_ctx,
1402 struct gen_perf_query_object *obj)
1403 {
1404 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1405 uint32_t start = *((uint32_t *)(obj->oa.map + MI_FREQ_START_OFFSET_BYTES)),
1406 end = *((uint32_t *)(obj->oa.map + MI_FREQ_END_OFFSET_BYTES));
1407
1408 switch (devinfo->gen) {
1409 case 7:
1410 case 8:
1411 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1412 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1413 break;
1414 case 9:
1415 case 11:
1416 case 12:
1417 obj->oa.gt_frequency[0] = GET_FIELD(start, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1418 obj->oa.gt_frequency[1] = GET_FIELD(end, GEN9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1419 break;
1420 default:
1421 unreachable("unexpected gen");
1422 }
1423
1424 /* Put the numbers into Hz. */
1425 obj->oa.gt_frequency[0] *= 1000000ULL;
1426 obj->oa.gt_frequency[1] *= 1000000ULL;
1427 }
1428
1429 static int
get_oa_counter_data(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,size_t data_size,uint8_t * data)1430 get_oa_counter_data(struct gen_perf_context *perf_ctx,
1431 struct gen_perf_query_object *query,
1432 size_t data_size,
1433 uint8_t *data)
1434 {
1435 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1436 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1437 int n_counters = queryinfo->n_counters;
1438 int written = 0;
1439
1440 for (int i = 0; i < n_counters; i++) {
1441 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
1442 uint64_t *out_uint64;
1443 float *out_float;
1444 size_t counter_size = gen_perf_query_counter_get_size(counter);
1445
1446 if (counter_size) {
1447 switch (counter->data_type) {
1448 case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
1449 out_uint64 = (uint64_t *)(data + counter->offset);
1450 *out_uint64 =
1451 counter->oa_counter_read_uint64(perf_cfg, queryinfo,
1452 query->oa.result.accumulator);
1453 break;
1454 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
1455 out_float = (float *)(data + counter->offset);
1456 *out_float =
1457 counter->oa_counter_read_float(perf_cfg, queryinfo,
1458 query->oa.result.accumulator);
1459 break;
1460 default:
1461 /* So far we aren't using uint32, double or bool32... */
1462 unreachable("unexpected counter data type");
1463 }
1464
1465 if (counter->offset + counter_size > written)
1466 written = counter->offset + counter_size;
1467 }
1468 }
1469
1470 return written;
1471 }
1472
1473 static int
get_pipeline_stats_data(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,size_t data_size,uint8_t * data)1474 get_pipeline_stats_data(struct gen_perf_context *perf_ctx,
1475 struct gen_perf_query_object *query,
1476 size_t data_size,
1477 uint8_t *data)
1478
1479 {
1480 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1481 const struct gen_perf_query_info *queryinfo = query->queryinfo;
1482 int n_counters = queryinfo->n_counters;
1483 uint8_t *p = data;
1484
1485 uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
1486 uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
1487
1488 for (int i = 0; i < n_counters; i++) {
1489 const struct gen_perf_query_counter *counter = &queryinfo->counters[i];
1490 uint64_t value = end[i] - start[i];
1491
1492 if (counter->pipeline_stat.numerator !=
1493 counter->pipeline_stat.denominator) {
1494 value *= counter->pipeline_stat.numerator;
1495 value /= counter->pipeline_stat.denominator;
1496 }
1497
1498 *((uint64_t *)p) = value;
1499 p += 8;
1500 }
1501
1502 perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
1503
1504 return p - data;
1505 }
1506
1507 void
gen_perf_get_query_data(struct gen_perf_context * perf_ctx,struct gen_perf_query_object * query,void * current_batch,int data_size,unsigned * data,unsigned * bytes_written)1508 gen_perf_get_query_data(struct gen_perf_context *perf_ctx,
1509 struct gen_perf_query_object *query,
1510 void *current_batch,
1511 int data_size,
1512 unsigned *data,
1513 unsigned *bytes_written)
1514 {
1515 struct gen_perf_config *perf_cfg = perf_ctx->perf;
1516 int written = 0;
1517
1518 switch (query->queryinfo->kind) {
1519 case GEN_PERF_QUERY_TYPE_OA:
1520 case GEN_PERF_QUERY_TYPE_RAW:
1521 if (!query->oa.results_accumulated) {
1522 /* Due to the sampling frequency of the OA buffer by the i915-perf
1523 * driver, there can be a 5ms delay between the Mesa seeing the query
1524 * complete and i915 making all the OA buffer reports available to us.
1525 * We need to wait for all the reports to come in before we can do
1526 * the post processing removing unrelated deltas.
1527 * There is a i915-perf series to address this issue, but it's
1528 * not been merged upstream yet.
1529 */
1530 while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
1531 ;
1532
1533 read_gt_frequency(perf_ctx, query);
1534 uint32_t *begin_report = query->oa.map;
1535 uint32_t *end_report = query->oa.map + MI_RPC_BO_END_OFFSET_BYTES;
1536 gen_perf_query_result_read_frequencies(&query->oa.result,
1537 perf_ctx->devinfo,
1538 begin_report,
1539 end_report);
1540 accumulate_oa_reports(perf_ctx, query);
1541 assert(query->oa.results_accumulated);
1542
1543 perf_cfg->vtbl.bo_unmap(query->oa.bo);
1544 query->oa.map = NULL;
1545 }
1546 if (query->queryinfo->kind == GEN_PERF_QUERY_TYPE_OA) {
1547 written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
1548 } else {
1549 const struct gen_device_info *devinfo = perf_ctx->devinfo;
1550
1551 written = gen_perf_query_result_write_mdapi((uint8_t *)data, data_size,
1552 devinfo, &query->oa.result,
1553 query->oa.gt_frequency[0],
1554 query->oa.gt_frequency[1]);
1555 }
1556 break;
1557
1558 case GEN_PERF_QUERY_TYPE_PIPELINE:
1559 written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
1560 break;
1561
1562 default:
1563 unreachable("Unknown query type");
1564 break;
1565 }
1566
1567 if (bytes_written)
1568 *bytes_written = written;
1569 }
1570
1571 void
gen_perf_dump_query_count(struct gen_perf_context * perf_ctx)1572 gen_perf_dump_query_count(struct gen_perf_context *perf_ctx)
1573 {
1574 DBG("Queries: (Open queries = %d, OA users = %d)\n",
1575 perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
1576 }
1577
1578 void
gen_perf_dump_query(struct gen_perf_context * ctx,struct gen_perf_query_object * obj,void * current_batch)1579 gen_perf_dump_query(struct gen_perf_context *ctx,
1580 struct gen_perf_query_object *obj,
1581 void *current_batch)
1582 {
1583 switch (obj->queryinfo->kind) {
1584 case GEN_PERF_QUERY_TYPE_OA:
1585 case GEN_PERF_QUERY_TYPE_RAW:
1586 DBG("BO: %-4s OA data: %-10s %-15s\n",
1587 obj->oa.bo ? "yes," : "no,",
1588 gen_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
1589 obj->oa.results_accumulated ? "accumulated" : "not accumulated");
1590 break;
1591 case GEN_PERF_QUERY_TYPE_PIPELINE:
1592 DBG("BO: %-4s\n",
1593 obj->pipeline_stats.bo ? "yes" : "no");
1594 break;
1595 default:
1596 unreachable("Unknown query type");
1597 break;
1598 }
1599 }
1600