1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_performance_query.c
26 *
27 * Implementation of the GL_INTEL_performance_query extension.
28 *
29 * Currently there are two possible counter sources exposed here:
30 *
31 * On Gen6+ hardware we have numerous 64bit Pipeline Statistics Registers
32 * that we can snapshot at the beginning and end of a query.
33 *
34 * On Gen7.5+ we have Observability Architecture counters which are
35 * covered in separate document from the rest of the PRMs. It is available at:
36 * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37 * => 2013 Intel Core Processor Family => Observability Performance Counters
38 * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39 * though notably we currently only support OA counters for Haswell+)
40 */
41
42 #include <limits.h>
43
44 /* put before sys/types.h to silence glibc warnings */
45 #ifdef MAJOR_IN_MKDEV
46 #include <sys/mkdev.h>
47 #endif
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
50 #endif
51 #include <sys/types.h>
52 #include <sys/stat.h>
53 #include <fcntl.h>
54 #include <sys/mman.h>
55 #include <sys/ioctl.h>
56
57 #include <xf86drm.h>
58 #include "drm-uapi/i915_drm.h"
59
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
64
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
70
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "intel_batchbuffer.h"
74
75 #include "perf/gen_perf.h"
76 #include "perf/gen_perf_regs.h"
77 #include "perf/gen_perf_mdapi.h"
78 #include "perf/gen_perf_query.h"
79
80 #define FILE_DEBUG_FLAG DEBUG_PERFMON
81
82 #define OAREPORT_REASON_MASK 0x3f
83 #define OAREPORT_REASON_SHIFT 19
84 #define OAREPORT_REASON_TIMER (1<<0)
85 #define OAREPORT_REASON_TRIGGER1 (1<<1)
86 #define OAREPORT_REASON_TRIGGER2 (1<<2)
87 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
88 #define OAREPORT_REASON_GO_TRANSITION (1<<4)
89
90 struct brw_perf_query_object {
91 struct gl_perf_query_object base;
92 struct gen_perf_query_object *query;
93 };
94
95 /** Downcasting convenience macro. */
96 static inline struct brw_perf_query_object *
brw_perf_query(struct gl_perf_query_object * o)97 brw_perf_query(struct gl_perf_query_object *o)
98 {
99 return (struct brw_perf_query_object *) o;
100 }
101
102 #define MI_RPC_BO_SIZE 4096
103 #define MI_RPC_BO_END_OFFSET_BYTES (MI_RPC_BO_SIZE / 2)
104 #define MI_FREQ_START_OFFSET_BYTES (3072)
105 #define MI_FREQ_END_OFFSET_BYTES (3076)
106
107 /******************************************************************************/
108
109 static bool
110 brw_is_perf_query_ready(struct gl_context *ctx,
111 struct gl_perf_query_object *o);
112
113 static void
dump_perf_query_callback(void * query_void,void * brw_void)114 dump_perf_query_callback(void *query_void, void *brw_void)
115 {
116 struct brw_context *ctx = brw_void;
117 struct gen_perf_context *perf_ctx = ctx->perf_ctx;
118 struct gl_perf_query_object *o = query_void;
119 struct brw_perf_query_object * brw_query = brw_perf_query(o);
120 struct gen_perf_query_object *obj = brw_query->query;
121
122 DBG("%4d: %-6s %-8s ",
123 o->Id,
124 o->Used ? "Dirty," : "New,",
125 o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"));
126 gen_perf_dump_query(perf_ctx, obj, &ctx->batch);
127 }
128
129 static void
dump_perf_queries(struct brw_context * brw)130 dump_perf_queries(struct brw_context *brw)
131 {
132 struct gl_context *ctx = &brw->ctx;
133 gen_perf_dump_query_count(brw->perf_ctx);
134 _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
135 }
136
137 /**
138 * Driver hook for glGetPerfQueryInfoINTEL().
139 */
140 static void
brw_get_perf_query_info(struct gl_context * ctx,unsigned query_index,const char ** name,GLuint * data_size,GLuint * n_counters,GLuint * n_active)141 brw_get_perf_query_info(struct gl_context *ctx,
142 unsigned query_index,
143 const char **name,
144 GLuint *data_size,
145 GLuint *n_counters,
146 GLuint *n_active)
147 {
148 struct brw_context *brw = brw_context(ctx);
149 struct gen_perf_context *perf_ctx = brw->perf_ctx;
150 struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
151 const struct gen_perf_query_info *query = &perf_cfg->queries[query_index];
152
153 *name = query->name;
154 *data_size = query->data_size;
155 *n_counters = query->n_counters;
156 *n_active = gen_perf_active_queries(perf_ctx, query);
157 }
158
159 static GLuint
gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type)160 gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type)
161 {
162 switch (type) {
163 case GEN_PERF_COUNTER_TYPE_EVENT: return GL_PERFQUERY_COUNTER_EVENT_INTEL;
164 case GEN_PERF_COUNTER_TYPE_DURATION_NORM: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL;
165 case GEN_PERF_COUNTER_TYPE_DURATION_RAW: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL;
166 case GEN_PERF_COUNTER_TYPE_THROUGHPUT: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL;
167 case GEN_PERF_COUNTER_TYPE_RAW: return GL_PERFQUERY_COUNTER_RAW_INTEL;
168 case GEN_PERF_COUNTER_TYPE_TIMESTAMP: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL;
169 default:
170 unreachable("Unknown counter type");
171 }
172 }
173
174 static GLuint
gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type)175 gen_counter_data_type_to_gl_type(enum gen_perf_counter_data_type type)
176 {
177 switch (type) {
178 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL;
179 case GEN_PERF_COUNTER_DATA_TYPE_UINT32: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL;
180 case GEN_PERF_COUNTER_DATA_TYPE_UINT64: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
181 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL;
182 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL;
183 default:
184 unreachable("Unknown counter data type");
185 }
186 }
187
188 /**
189 * Driver hook for glGetPerfCounterInfoINTEL().
190 */
191 static void
brw_get_perf_counter_info(struct gl_context * ctx,unsigned query_index,unsigned counter_index,const char ** name,const char ** desc,GLuint * offset,GLuint * data_size,GLuint * type_enum,GLuint * data_type_enum,GLuint64 * raw_max)192 brw_get_perf_counter_info(struct gl_context *ctx,
193 unsigned query_index,
194 unsigned counter_index,
195 const char **name,
196 const char **desc,
197 GLuint *offset,
198 GLuint *data_size,
199 GLuint *type_enum,
200 GLuint *data_type_enum,
201 GLuint64 *raw_max)
202 {
203 struct brw_context *brw = brw_context(ctx);
204 struct gen_perf_config *perf_cfg = gen_perf_config(brw->perf_ctx);
205 const struct gen_perf_query_info *query =
206 &perf_cfg->queries[query_index];
207 const struct gen_perf_query_counter *counter =
208 &query->counters[counter_index];
209
210 *name = counter->name;
211 *desc = counter->desc;
212 *offset = counter->offset;
213 *data_size = gen_perf_query_counter_get_size(counter);
214 *type_enum = gen_counter_type_enum_to_gl_type(counter->type);
215 *data_type_enum = gen_counter_data_type_to_gl_type(counter->data_type);
216 *raw_max = counter->raw_max;
217 }
218
219 enum OaReadStatus {
220 OA_READ_STATUS_ERROR,
221 OA_READ_STATUS_UNFINISHED,
222 OA_READ_STATUS_FINISHED,
223 };
224
225 /******************************************************************************/
226
227 /**
228 * Driver hook for glBeginPerfQueryINTEL().
229 */
230 static bool
brw_begin_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)231 brw_begin_perf_query(struct gl_context *ctx,
232 struct gl_perf_query_object *o)
233 {
234 struct brw_context *brw = brw_context(ctx);
235 struct brw_perf_query_object *brw_query = brw_perf_query(o);
236 struct gen_perf_query_object *obj = brw_query->query;
237 struct gen_perf_context *perf_ctx = brw->perf_ctx;
238
239 /* We can assume the frontend hides mistaken attempts to Begin a
240 * query object multiple times before its End. Similarly if an
241 * application reuses a query object before results have arrived
242 * the frontend will wait for prior results so we don't need
243 * to support abandoning in-flight results.
244 */
245 assert(!o->Active);
246 assert(!o->Used || o->Ready); /* no in-flight query to worry about */
247
248 DBG("Begin(%d)\n", o->Id);
249
250 bool ret = gen_perf_begin_query(perf_ctx, obj);
251
252 if (INTEL_DEBUG & DEBUG_PERFMON)
253 dump_perf_queries(brw);
254
255 return ret;
256 }
257
258 /**
259 * Driver hook for glEndPerfQueryINTEL().
260 */
261 static void
brw_end_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)262 brw_end_perf_query(struct gl_context *ctx,
263 struct gl_perf_query_object *o)
264 {
265 struct brw_context *brw = brw_context(ctx);
266 struct brw_perf_query_object *brw_query = brw_perf_query(o);
267 struct gen_perf_query_object *obj = brw_query->query;
268 struct gen_perf_context *perf_ctx = brw->perf_ctx;
269
270 DBG("End(%d)\n", o->Id);
271 gen_perf_end_query(perf_ctx, obj);
272 }
273
274 static void
brw_wait_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)275 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
276 {
277 struct brw_context *brw = brw_context(ctx);
278 struct brw_perf_query_object *brw_query = brw_perf_query(o);
279 struct gen_perf_query_object *obj = brw_query->query;
280
281 assert(!o->Ready);
282
283 gen_perf_wait_query(brw->perf_ctx, obj, &brw->batch);
284 }
285
286 static bool
brw_is_perf_query_ready(struct gl_context * ctx,struct gl_perf_query_object * o)287 brw_is_perf_query_ready(struct gl_context *ctx,
288 struct gl_perf_query_object *o)
289 {
290 struct brw_context *brw = brw_context(ctx);
291 struct brw_perf_query_object *brw_query = brw_perf_query(o);
292 struct gen_perf_query_object *obj = brw_query->query;
293
294 if (o->Ready)
295 return true;
296
297 return gen_perf_is_query_ready(brw->perf_ctx, obj, &brw->batch);
298 }
299
300 /**
301 * Driver hook for glGetPerfQueryDataINTEL().
302 */
303 static void
brw_get_perf_query_data(struct gl_context * ctx,struct gl_perf_query_object * o,GLsizei data_size,GLuint * data,GLuint * bytes_written)304 brw_get_perf_query_data(struct gl_context *ctx,
305 struct gl_perf_query_object *o,
306 GLsizei data_size,
307 GLuint *data,
308 GLuint *bytes_written)
309 {
310 struct brw_context *brw = brw_context(ctx);
311 struct brw_perf_query_object *brw_query = brw_perf_query(o);
312 struct gen_perf_query_object *obj = brw_query->query;
313
314 assert(brw_is_perf_query_ready(ctx, o));
315
316 DBG("GetData(%d)\n", o->Id);
317
318 if (INTEL_DEBUG & DEBUG_PERFMON)
319 dump_perf_queries(brw);
320
321 /* We expect that the frontend only calls this hook when it knows
322 * that results are available.
323 */
324 assert(o->Ready);
325
326 gen_perf_get_query_data(brw->perf_ctx, obj, &brw->batch,
327 data_size, data, bytes_written);
328 }
329
330 static struct gl_perf_query_object *
brw_new_perf_query_object(struct gl_context * ctx,unsigned query_index)331 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
332 {
333 struct brw_context *brw = brw_context(ctx);
334 struct gen_perf_context *perf_ctx = brw->perf_ctx;
335 struct gen_perf_query_object * obj = gen_perf_new_query(perf_ctx, query_index);
336 if (unlikely(!obj))
337 return NULL;
338
339 struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object));
340 if (unlikely(!brw_query)) {
341 gen_perf_delete_query(perf_ctx, obj);
342 return NULL;
343 }
344
345 brw_query->query = obj;
346 return &brw_query->base;
347 }
348
349 /**
350 * Driver hook for glDeletePerfQueryINTEL().
351 */
352 static void
brw_delete_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)353 brw_delete_perf_query(struct gl_context *ctx,
354 struct gl_perf_query_object *o)
355 {
356 struct brw_context *brw = brw_context(ctx);
357 struct brw_perf_query_object *brw_query = brw_perf_query(o);
358 struct gen_perf_query_object *obj = brw_query->query;
359 struct gen_perf_context *perf_ctx = brw->perf_ctx;
360
361 /* We can assume that the frontend waits for a query to complete
362 * before ever calling into here, so we don't have to worry about
363 * deleting an in-flight query object.
364 */
365 assert(!o->Active);
366 assert(!o->Used || o->Ready);
367
368 DBG("Delete(%d)\n", o->Id);
369
370 gen_perf_delete_query(perf_ctx, obj);
371 free(brw_query);
372 }
373
374 /******************************************************************************/
375 /* gen_device_info will have incorrect default topology values for unsupported kernels.
376 * verify kernel support to ensure OA metrics are accurate.
377 */
378 static bool
oa_metrics_kernel_support(int fd,const struct gen_device_info * devinfo)379 oa_metrics_kernel_support(int fd, const struct gen_device_info *devinfo)
380 {
381 if (devinfo->gen >= 10) {
382 /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
383 * to verify support
384 */
385 struct drm_i915_query_item item = {
386 .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
387 };
388 struct drm_i915_query query = {
389 .num_items = 1,
390 .items_ptr = (uintptr_t) &item,
391 };
392
393 /* kernel 4.17+ supports the query */
394 return drmIoctl(fd, DRM_IOCTL_I915_QUERY, &query) == 0;
395 }
396
397 if (devinfo->gen >= 8) {
398 /* 4.13+ api required for gen8 - gen9 */
399 int mask;
400 struct drm_i915_getparam gp = {
401 .param = I915_PARAM_SLICE_MASK,
402 .value = &mask,
403 };
404 /* kernel 4.13+ supports this parameter */
405 return drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0;
406 }
407
408 if (devinfo->gen == 7)
409 /* default topology values are correct for HSW */
410 return true;
411
412 /* oa not supported before gen 7*/
413 return false;
414 }
415
416 static void *
brw_oa_bo_alloc(void * bufmgr,const char * name,uint64_t size)417 brw_oa_bo_alloc(void *bufmgr, const char *name, uint64_t size)
418 {
419 return brw_bo_alloc(bufmgr, name, size, BRW_MEMZONE_OTHER);
420 }
421
422 static void
brw_oa_emit_mi_report_perf_count(void * c,void * bo,uint32_t offset_in_bytes,uint32_t report_id)423 brw_oa_emit_mi_report_perf_count(void *c,
424 void *bo,
425 uint32_t offset_in_bytes,
426 uint32_t report_id)
427 {
428 struct brw_context *ctx = c;
429 ctx->vtbl.emit_mi_report_perf_count(ctx,
430 bo,
431 offset_in_bytes,
432 report_id);
433 }
434
435 typedef void (*bo_unreference_t)(void *);
436 typedef void *(*bo_map_t)(void *, void *, unsigned flags);
437 typedef void (*bo_unmap_t)(void *);
438 typedef void (* emit_mi_report_t)(void *, void *, uint32_t, uint32_t);
439 typedef void (*emit_mi_flush_t)(void *);
440
441 static void
brw_oa_batchbuffer_flush(void * c,const char * file,int line)442 brw_oa_batchbuffer_flush(void *c, const char *file, int line)
443 {
444 struct brw_context *ctx = c;
445 _intel_batchbuffer_flush_fence(ctx, -1, NULL, file, line);
446 }
447
448 static void
brw_oa_emit_stall_at_pixel_scoreboard(void * c)449 brw_oa_emit_stall_at_pixel_scoreboard(void *c)
450 {
451 struct brw_context *brw = c;
452 brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_STALL_AT_SCOREBOARD);
453 }
454
455 static void
brw_perf_store_register(struct brw_context * brw,struct brw_bo * bo,uint32_t reg,uint32_t reg_size,uint32_t offset)456 brw_perf_store_register(struct brw_context *brw, struct brw_bo *bo,
457 uint32_t reg, uint32_t reg_size,
458 uint32_t offset)
459 {
460 if (reg_size == 8) {
461 brw_store_register_mem64(brw, bo, reg, offset);
462 } else {
463 assert(reg_size == 4);
464 brw_store_register_mem32(brw, bo, reg, offset);
465 }
466 }
467
468 typedef void (*store_register_mem_t)(void *ctx, void *bo,
469 uint32_t reg, uint32_t reg_size,
470 uint32_t offset);
471 typedef bool (*batch_references_t)(void *batch, void *bo);
472 typedef void (*bo_wait_rendering_t)(void *bo);
473 typedef int (*bo_busy_t)(void *bo);
474
475 static unsigned
brw_init_perf_query_info(struct gl_context * ctx)476 brw_init_perf_query_info(struct gl_context *ctx)
477 {
478 struct brw_context *brw = brw_context(ctx);
479 const struct gen_device_info *devinfo = &brw->screen->devinfo;
480
481 struct gen_perf_context *perf_ctx = brw->perf_ctx;
482 struct gen_perf_config *perf_cfg = gen_perf_config(perf_ctx);
483
484 if (perf_cfg)
485 return perf_cfg->n_queries;
486
487 if (!oa_metrics_kernel_support(brw->screen->fd, devinfo))
488 return 0;
489
490 perf_cfg = gen_perf_new(ctx);
491
492 perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc;
493 perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference;
494 perf_cfg->vtbl.bo_map = (bo_map_t)brw_bo_map;
495 perf_cfg->vtbl.bo_unmap = (bo_unmap_t)brw_bo_unmap;
496 perf_cfg->vtbl.emit_stall_at_pixel_scoreboard =
497 (emit_mi_flush_t)brw_oa_emit_stall_at_pixel_scoreboard;
498 perf_cfg->vtbl.emit_mi_report_perf_count =
499 (emit_mi_report_t)brw_oa_emit_mi_report_perf_count;
500 perf_cfg->vtbl.batchbuffer_flush = brw_oa_batchbuffer_flush;
501 perf_cfg->vtbl.store_register_mem =
502 (store_register_mem_t) brw_perf_store_register;
503 perf_cfg->vtbl.batch_references = (batch_references_t)brw_batch_references;
504 perf_cfg->vtbl.bo_wait_rendering = (bo_wait_rendering_t)brw_bo_wait_rendering;
505 perf_cfg->vtbl.bo_busy = (bo_busy_t)brw_bo_busy;
506
507 gen_perf_init_context(perf_ctx, perf_cfg, brw, brw->bufmgr, devinfo,
508 brw->hw_ctx, brw->screen->fd);
509 gen_perf_init_metrics(perf_cfg, devinfo, brw->screen->fd,
510 true /* pipeline stats */);
511
512 return perf_cfg->n_queries;
513 }
514
515 void
brw_init_performance_queries(struct brw_context * brw)516 brw_init_performance_queries(struct brw_context *brw)
517 {
518 struct gl_context *ctx = &brw->ctx;
519
520 ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
521 ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
522 ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
523 ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
524 ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
525 ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
526 ctx->Driver.EndPerfQuery = brw_end_perf_query;
527 ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
528 ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
529 ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
530 }
531