• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file brw_performance_query.c
26  *
27  * Implementation of the GL_INTEL_performance_query extension.
28  *
29  * Currently there are two possible counter sources exposed here:
30  *
31  * On Gfx6+ hardware we have numerous 64bit Pipeline Statistics Registers
32  * that we can snapshot at the beginning and end of a query.
33  *
34  * On Gfx7.5+ we have Observability Architecture counters which are
35  * covered in separate document from the rest of the PRMs.  It is available at:
36  * https://01.org/linuxgraphics/documentation/driver-documentation-prms
37  * => 2013 Intel Core Processor Family => Observability Performance Counters
38  * (This one volume covers Sandybridge, Ivybridge, Baytrail, and Haswell,
39  * though notably we currently only support OA counters for Haswell+)
40  */
41 
42 #include <limits.h>
43 
44 /* put before sys/types.h to silence glibc warnings */
45 #ifdef MAJOR_IN_MKDEV
46 #include <sys/mkdev.h>
47 #endif
48 #ifdef MAJOR_IN_SYSMACROS
49 #include <sys/sysmacros.h>
50 #endif
51 #include <sys/types.h>
52 #include <sys/stat.h>
53 #include <fcntl.h>
54 #include <sys/mman.h>
55 #include <sys/ioctl.h>
56 
57 #include <xf86drm.h>
58 #include "drm-uapi/i915_drm.h"
59 
60 #include "main/hash.h"
61 #include "main/macros.h"
62 #include "main/mtypes.h"
63 #include "main/performance_query.h"
64 
65 #include "util/bitset.h"
66 #include "util/ralloc.h"
67 #include "util/hash_table.h"
68 #include "util/list.h"
69 #include "util/u_math.h"
70 
71 #include "brw_context.h"
72 #include "brw_defines.h"
73 #include "brw_batch.h"
74 
75 #include "perf/intel_perf.h"
76 #include "perf/intel_perf_regs.h"
77 #include "perf/intel_perf_mdapi.h"
78 #include "perf/intel_perf_query.h"
79 
80 #define FILE_DEBUG_FLAG DEBUG_PERFMON
81 
82 #define OAREPORT_REASON_MASK           0x3f
83 #define OAREPORT_REASON_SHIFT          19
84 #define OAREPORT_REASON_TIMER          (1<<0)
85 #define OAREPORT_REASON_TRIGGER1       (1<<1)
86 #define OAREPORT_REASON_TRIGGER2       (1<<2)
87 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
88 #define OAREPORT_REASON_GO_TRANSITION  (1<<4)
89 
90 struct brw_perf_query_object {
91    struct gl_perf_query_object base;
92    struct intel_perf_query_object *query;
93 };
94 
95 /** Downcasting convenience macro. */
96 static inline struct brw_perf_query_object *
brw_perf_query(struct gl_perf_query_object * o)97 brw_perf_query(struct gl_perf_query_object *o)
98 {
99    return (struct brw_perf_query_object *) o;
100 }
101 
102 #define MI_RPC_BO_SIZE              4096
103 #define MI_RPC_BO_END_OFFSET_BYTES  (MI_RPC_BO_SIZE / 2)
104 #define MI_FREQ_START_OFFSET_BYTES  (3072)
105 #define MI_FREQ_END_OFFSET_BYTES    (3076)
106 
107 /******************************************************************************/
108 
109 static bool
110 brw_is_perf_query_ready(struct gl_context *ctx,
111                         struct gl_perf_query_object *o);
112 
113 static void
dump_perf_query_callback(void * query_void,void * brw_void)114 dump_perf_query_callback(void *query_void, void *brw_void)
115 {
116    struct brw_context *ctx = brw_void;
117    struct intel_perf_context *perf_ctx = ctx->perf_ctx;
118    struct gl_perf_query_object *o = query_void;
119    struct brw_perf_query_object * brw_query = brw_perf_query(o);
120    struct intel_perf_query_object *obj = brw_query->query;
121 
122    DBG("%4d: %-6s %-8s ",
123        o->Id,
124        o->Used ? "Dirty," : "New,",
125        o->Active ? "Active," : (o->Ready ? "Ready," : "Pending,"));
126    intel_perf_dump_query(perf_ctx, obj, &ctx->batch);
127 }
128 
129 static void
dump_perf_queries(struct brw_context * brw)130 dump_perf_queries(struct brw_context *brw)
131 {
132    struct gl_context *ctx = &brw->ctx;
133    intel_perf_dump_query_count(brw->perf_ctx);
134    _mesa_HashWalk(ctx->PerfQuery.Objects, dump_perf_query_callback, brw);
135 }
136 
137 /**
138  * Driver hook for glGetPerfQueryInfoINTEL().
139  */
140 static void
brw_get_perf_query_info(struct gl_context * ctx,unsigned query_index,const char ** name,GLuint * data_size,GLuint * n_counters,GLuint * n_active)141 brw_get_perf_query_info(struct gl_context *ctx,
142                         unsigned query_index,
143                         const char **name,
144                         GLuint *data_size,
145                         GLuint *n_counters,
146                         GLuint *n_active)
147 {
148    struct brw_context *brw = brw_context(ctx);
149    struct intel_perf_context *perf_ctx = brw->perf_ctx;
150    struct intel_perf_config *perf_cfg = intel_perf_config(perf_ctx);
151    const struct intel_perf_query_info *query = &perf_cfg->queries[query_index];
152 
153    *name = query->name;
154    *data_size = query->data_size;
155    *n_counters = query->n_counters;
156    *n_active = intel_perf_active_queries(perf_ctx, query);
157 }
158 
159 static GLuint
intel_counter_type_enum_to_gl_type(enum intel_perf_counter_type type)160 intel_counter_type_enum_to_gl_type(enum intel_perf_counter_type type)
161 {
162    switch (type) {
163    case INTEL_PERF_COUNTER_TYPE_EVENT: return GL_PERFQUERY_COUNTER_EVENT_INTEL;
164    case INTEL_PERF_COUNTER_TYPE_DURATION_NORM: return GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL;
165    case INTEL_PERF_COUNTER_TYPE_DURATION_RAW: return GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL;
166    case INTEL_PERF_COUNTER_TYPE_THROUGHPUT: return GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL;
167    case INTEL_PERF_COUNTER_TYPE_RAW: return GL_PERFQUERY_COUNTER_RAW_INTEL;
168    case INTEL_PERF_COUNTER_TYPE_TIMESTAMP: return GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL;
169    default:
170       unreachable("Unknown counter type");
171    }
172 }
173 
174 static GLuint
intel_counter_data_type_to_gl_type(enum intel_perf_counter_data_type type)175 intel_counter_data_type_to_gl_type(enum intel_perf_counter_data_type type)
176 {
177    switch (type) {
178    case INTEL_PERF_COUNTER_DATA_TYPE_BOOL32: return GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL;
179    case INTEL_PERF_COUNTER_DATA_TYPE_UINT32: return GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL;
180    case INTEL_PERF_COUNTER_DATA_TYPE_UINT64: return GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL;
181    case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT: return GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL;
182    case INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE: return GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL;
183    default:
184       unreachable("Unknown counter data type");
185    }
186 }
187 
188 /**
189  * Driver hook for glGetPerfCounterInfoINTEL().
190  */
191 static void
brw_get_perf_counter_info(struct gl_context * ctx,unsigned query_index,unsigned counter_index,const char ** name,const char ** desc,GLuint * offset,GLuint * data_size,GLuint * type_enum,GLuint * data_type_enum,GLuint64 * raw_max)192 brw_get_perf_counter_info(struct gl_context *ctx,
193                           unsigned query_index,
194                           unsigned counter_index,
195                           const char **name,
196                           const char **desc,
197                           GLuint *offset,
198                           GLuint *data_size,
199                           GLuint *type_enum,
200                           GLuint *data_type_enum,
201                           GLuint64 *raw_max)
202 {
203    struct brw_context *brw = brw_context(ctx);
204    struct intel_perf_config *perf_cfg = intel_perf_config(brw->perf_ctx);
205    const struct intel_perf_query_info *query =
206       &perf_cfg->queries[query_index];
207    const struct intel_perf_query_counter *counter =
208       &query->counters[counter_index];
209 
210    *name = counter->name;
211    *desc = counter->desc;
212    *offset = counter->offset;
213    *data_size = intel_perf_query_counter_get_size(counter);
214    *type_enum = intel_counter_type_enum_to_gl_type(counter->type);
215    *data_type_enum = intel_counter_data_type_to_gl_type(counter->data_type);
216    *raw_max = counter->raw_max;
217 }
218 
219 enum OaReadStatus {
220    OA_READ_STATUS_ERROR,
221    OA_READ_STATUS_UNFINISHED,
222    OA_READ_STATUS_FINISHED,
223 };
224 
225 /******************************************************************************/
226 
227 /**
228  * Driver hook for glBeginPerfQueryINTEL().
229  */
230 static bool
brw_begin_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)231 brw_begin_perf_query(struct gl_context *ctx,
232                      struct gl_perf_query_object *o)
233 {
234    struct brw_context *brw = brw_context(ctx);
235    struct brw_perf_query_object *brw_query = brw_perf_query(o);
236    struct intel_perf_query_object *obj = brw_query->query;
237    struct intel_perf_context *perf_ctx = brw->perf_ctx;
238 
239    /* We can assume the frontend hides mistaken attempts to Begin a
240     * query object multiple times before its End. Similarly if an
241     * application reuses a query object before results have arrived
242     * the frontend will wait for prior results so we don't need
243     * to support abandoning in-flight results.
244     */
245    assert(!o->Active);
246    assert(!o->Used || o->Ready); /* no in-flight query to worry about */
247 
248    DBG("Begin(%d)\n", o->Id);
249 
250    bool ret = intel_perf_begin_query(perf_ctx, obj);
251 
252    if (INTEL_DEBUG(DEBUG_PERFMON))
253       dump_perf_queries(brw);
254 
255    return ret;
256 }
257 
258 /**
259  * Driver hook for glEndPerfQueryINTEL().
260  */
261 static void
brw_end_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)262 brw_end_perf_query(struct gl_context *ctx,
263                      struct gl_perf_query_object *o)
264 {
265    struct brw_context *brw = brw_context(ctx);
266    struct brw_perf_query_object *brw_query = brw_perf_query(o);
267    struct intel_perf_query_object *obj = brw_query->query;
268    struct intel_perf_context *perf_ctx = brw->perf_ctx;
269 
270    DBG("End(%d)\n", o->Id);
271    intel_perf_end_query(perf_ctx, obj);
272 }
273 
274 static void
brw_wait_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)275 brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
276 {
277    struct brw_context *brw = brw_context(ctx);
278    struct brw_perf_query_object *brw_query = brw_perf_query(o);
279    struct intel_perf_query_object *obj = brw_query->query;
280 
281    assert(!o->Ready);
282 
283    intel_perf_wait_query(brw->perf_ctx, obj, &brw->batch);
284 }
285 
286 static bool
brw_is_perf_query_ready(struct gl_context * ctx,struct gl_perf_query_object * o)287 brw_is_perf_query_ready(struct gl_context *ctx,
288                         struct gl_perf_query_object *o)
289 {
290    struct brw_context *brw = brw_context(ctx);
291    struct brw_perf_query_object *brw_query = brw_perf_query(o);
292    struct intel_perf_query_object *obj = brw_query->query;
293 
294    if (o->Ready)
295       return true;
296 
297    return intel_perf_is_query_ready(brw->perf_ctx, obj, &brw->batch);
298 }
299 
300 /**
301  * Driver hook for glGetPerfQueryDataINTEL().
302  */
303 static bool
brw_get_perf_query_data(struct gl_context * ctx,struct gl_perf_query_object * o,GLsizei data_size,GLuint * data,GLuint * bytes_written)304 brw_get_perf_query_data(struct gl_context *ctx,
305                         struct gl_perf_query_object *o,
306                         GLsizei data_size,
307                         GLuint *data,
308                         GLuint *bytes_written)
309 {
310    struct brw_context *brw = brw_context(ctx);
311    struct brw_perf_query_object *brw_query = brw_perf_query(o);
312    struct intel_perf_query_object *obj = brw_query->query;
313 
314    assert(brw_is_perf_query_ready(ctx, o));
315 
316    DBG("GetData(%d)\n", o->Id);
317 
318    if (INTEL_DEBUG(DEBUG_PERFMON))
319       dump_perf_queries(brw);
320 
321    /* We expect that the frontend only calls this hook when it knows
322     * that results are available.
323     */
324    assert(o->Ready);
325 
326    intel_perf_get_query_data(brw->perf_ctx, obj, &brw->batch,
327                            data_size, data, bytes_written);
328 
329    return true;
330 }
331 
332 static struct gl_perf_query_object *
brw_new_perf_query_object(struct gl_context * ctx,unsigned query_index)333 brw_new_perf_query_object(struct gl_context *ctx, unsigned query_index)
334 {
335    struct brw_context *brw = brw_context(ctx);
336    struct intel_perf_context *perf_ctx = brw->perf_ctx;
337    struct intel_perf_query_object * obj = intel_perf_new_query(perf_ctx, query_index);
338    if (unlikely(!obj))
339       return NULL;
340 
341    struct brw_perf_query_object *brw_query = calloc(1, sizeof(struct brw_perf_query_object));
342    if (unlikely(!brw_query)) {
343       intel_perf_delete_query(perf_ctx, obj);
344       return NULL;
345    }
346 
347    brw_query->query = obj;
348    return &brw_query->base;
349 }
350 
351 /**
352  * Driver hook for glDeletePerfQueryINTEL().
353  */
354 static void
brw_delete_perf_query(struct gl_context * ctx,struct gl_perf_query_object * o)355 brw_delete_perf_query(struct gl_context *ctx,
356                       struct gl_perf_query_object *o)
357 {
358    struct brw_context *brw = brw_context(ctx);
359    struct brw_perf_query_object *brw_query = brw_perf_query(o);
360    struct intel_perf_query_object *obj = brw_query->query;
361    struct intel_perf_context *perf_ctx = brw->perf_ctx;
362 
363    /* We can assume that the frontend waits for a query to complete
364     * before ever calling into here, so we don't have to worry about
365     * deleting an in-flight query object.
366     */
367    assert(!o->Active);
368    assert(!o->Used || o->Ready);
369 
370    DBG("Delete(%d)\n", o->Id);
371 
372    intel_perf_delete_query(perf_ctx, obj);
373    free(brw_query);
374 }
375 
376 /******************************************************************************/
377 /* intel_device_info will have incorrect default topology values for unsupported
378  * kernels. Verify kernel support to ensure OA metrics are accurate.
379  */
380 static bool
oa_metrics_kernel_support(int fd,const struct intel_device_info * devinfo)381 oa_metrics_kernel_support(int fd, const struct intel_device_info *devinfo)
382 {
383    if (devinfo->ver >= 10) {
384       /* topology uAPI required for CNL+ (kernel 4.17+) make a call to the api
385        * to verify support
386        */
387       struct drm_i915_query_item item = {
388          .query_id = DRM_I915_QUERY_TOPOLOGY_INFO,
389       };
390       struct drm_i915_query query = {
391          .num_items = 1,
392          .items_ptr = (uintptr_t) &item,
393       };
394 
395       /* kernel 4.17+ supports the query */
396       return drmIoctl(fd, DRM_IOCTL_I915_QUERY, &query) == 0;
397    }
398 
399    if (devinfo->ver >= 8) {
400       /* 4.13+ api required for gfx8 - gfx9 */
401       int mask;
402       struct drm_i915_getparam gp = {
403          .param = I915_PARAM_SLICE_MASK,
404          .value = &mask,
405       };
406       /* kernel 4.13+ supports this parameter */
407       return drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0;
408    }
409 
410    if (devinfo->ver == 7)
411       /* default topology values are correct for HSW */
412       return true;
413 
414    /* oa not supported before gen 7*/
415    return false;
416 }
417 
418 static void *
brw_oa_bo_alloc(void * bufmgr,const char * name,uint64_t size)419 brw_oa_bo_alloc(void *bufmgr, const char *name, uint64_t size)
420 {
421    return brw_bo_alloc(bufmgr, name, size, BRW_MEMZONE_OTHER);
422 }
423 
424 static void
brw_oa_emit_mi_report_perf_count(void * c,void * bo,uint32_t offset_in_bytes,uint32_t report_id)425 brw_oa_emit_mi_report_perf_count(void *c,
426                                  void *bo,
427                                  uint32_t offset_in_bytes,
428                                  uint32_t report_id)
429 {
430    struct brw_context *ctx = c;
431    ctx->vtbl.emit_mi_report_perf_count(ctx,
432                                        bo,
433                                        offset_in_bytes,
434                                        report_id);
435 }
436 
437 typedef void (*bo_unreference_t)(void *);
438 typedef void *(*bo_map_t)(void *, void *, unsigned flags);
439 typedef void (*bo_unmap_t)(void *);
440 typedef void (* emit_mi_report_t)(void *, void *, uint32_t, uint32_t);
441 typedef void (*emit_mi_flush_t)(void *);
442 
443 static void
brw_oa_batchbuffer_flush(void * c,const char * file,int line)444 brw_oa_batchbuffer_flush(void *c, const char *file, int line)
445 {
446    struct brw_context *ctx = c;
447    _brw_batch_flush_fence(ctx, -1, NULL, file,  line);
448 }
449 
450 static void
brw_oa_emit_stall_at_pixel_scoreboard(void * c)451 brw_oa_emit_stall_at_pixel_scoreboard(void *c)
452 {
453    struct brw_context *brw = c;
454    brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_STALL_AT_SCOREBOARD);
455 }
456 
457 static void
brw_perf_store_register(struct brw_context * brw,struct brw_bo * bo,uint32_t reg,uint32_t reg_size,uint32_t offset)458 brw_perf_store_register(struct brw_context *brw, struct brw_bo *bo,
459                         uint32_t reg, uint32_t reg_size,
460                         uint32_t offset)
461 {
462    if (reg_size == 8) {
463       brw_store_register_mem64(brw, bo, reg, offset);
464    } else {
465       assert(reg_size == 4);
466       brw_store_register_mem32(brw, bo, reg, offset);
467    }
468 }
469 
470 typedef void (*store_register_mem_t)(void *ctx, void *bo,
471                                      uint32_t reg, uint32_t reg_size,
472                                      uint32_t offset);
473 typedef bool (*batch_references_t)(void *batch, void *bo);
474 typedef void (*bo_wait_rendering_t)(void *bo);
475 typedef int (*bo_busy_t)(void *bo);
476 
477 static unsigned
brw_init_perf_query_info(struct gl_context * ctx)478 brw_init_perf_query_info(struct gl_context *ctx)
479 {
480    struct brw_context *brw = brw_context(ctx);
481    const struct intel_device_info *devinfo = &brw->screen->devinfo;
482 
483    struct intel_perf_context *perf_ctx = brw->perf_ctx;
484    struct intel_perf_config *perf_cfg = intel_perf_config(perf_ctx);
485 
486    if (perf_cfg)
487       return perf_cfg->n_queries;
488 
489    if (!oa_metrics_kernel_support(brw->screen->fd, devinfo))
490       return 0;
491 
492    perf_cfg = intel_perf_new(brw->mem_ctx);
493 
494    perf_cfg->vtbl.bo_alloc = brw_oa_bo_alloc;
495    perf_cfg->vtbl.bo_unreference = (bo_unreference_t)brw_bo_unreference;
496    perf_cfg->vtbl.bo_map = (bo_map_t)brw_bo_map;
497    perf_cfg->vtbl.bo_unmap = (bo_unmap_t)brw_bo_unmap;
498    perf_cfg->vtbl.emit_stall_at_pixel_scoreboard =
499       (emit_mi_flush_t)brw_oa_emit_stall_at_pixel_scoreboard;
500    perf_cfg->vtbl.emit_mi_report_perf_count =
501       (emit_mi_report_t)brw_oa_emit_mi_report_perf_count;
502    perf_cfg->vtbl.batchbuffer_flush = brw_oa_batchbuffer_flush;
503    perf_cfg->vtbl.store_register_mem =
504       (store_register_mem_t) brw_perf_store_register;
505    perf_cfg->vtbl.batch_references = (batch_references_t)brw_batch_references;
506    perf_cfg->vtbl.bo_wait_rendering = (bo_wait_rendering_t)brw_bo_wait_rendering;
507    perf_cfg->vtbl.bo_busy = (bo_busy_t)brw_bo_busy;
508 
509    intel_perf_init_metrics(perf_cfg, devinfo, brw->screen->fd,
510                            true /* pipeline stats */,
511                            true /* register snapshots */);
512    intel_perf_init_context(perf_ctx, perf_cfg, brw->mem_ctx, brw, brw->bufmgr,
513                          devinfo, brw->hw_ctx, brw->screen->fd);
514 
515    return perf_cfg->n_queries;
516 }
517 
518 void
brw_init_performance_queries(struct brw_context * brw)519 brw_init_performance_queries(struct brw_context *brw)
520 {
521    struct gl_context *ctx = &brw->ctx;
522 
523    ctx->Driver.InitPerfQueryInfo = brw_init_perf_query_info;
524    ctx->Driver.GetPerfQueryInfo = brw_get_perf_query_info;
525    ctx->Driver.GetPerfCounterInfo = brw_get_perf_counter_info;
526    ctx->Driver.NewPerfQueryObject = brw_new_perf_query_object;
527    ctx->Driver.DeletePerfQuery = brw_delete_perf_query;
528    ctx->Driver.BeginPerfQuery = brw_begin_perf_query;
529    ctx->Driver.EndPerfQuery = brw_end_perf_query;
530    ctx->Driver.WaitPerfQuery = brw_wait_perf_query;
531    ctx->Driver.IsPerfQueryReady = brw_is_perf_query_ready;
532    ctx->Driver.GetPerfQueryData = brw_get_perf_query_data;
533 }
534