1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <stdint.h>
26 #include <stdbool.h>
27 #include <sys/types.h>
28 #include <sys/mman.h>
29 #include <sys/ioctl.h>
30 #include <unistd.h>
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <fcntl.h>
35 #include <errno.h>
36 #include <assert.h>
37
38 #include "igt_perf.h"
39
40 #include "gpu-perf.h"
41 #include "debugfs.h"
42
43 #if defined(__i386__)
44 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
45 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
46 #endif
47
48 #if defined(__x86_64__)
49 #define rmb() asm volatile("lfence" ::: "memory")
50 #define wmb() asm volatile("sfence" ::: "memory")
51 #endif
52
53 #define N_PAGES 32
54
55 struct sample_event {
56 struct perf_event_header header;
57 uint32_t pid, tid;
58 uint64_t time;
59 uint64_t id;
60 uint32_t raw_size;
61 uint8_t tracepoint_data[0];
62 };
63
64 enum {
65 TP_GEM_REQUEST_ADD,
66 TP_GEM_REQUEST_WAIT_BEGIN,
67 TP_GEM_REQUEST_WAIT_END,
68 TP_FLIP_COMPLETE,
69 TP_GEM_RING_SYNC_TO,
70 TP_GEM_RING_SWITCH_CONTEXT,
71
72 TP_NB
73 };
74
75 struct tracepoint {
76 const char *name;
77 int event_id;
78
79 struct {
80 char name[128];
81 int offset;
82 int size;
83 int is_signed;
84 } fields[20];
85 int n_fields;
86
87 int device_field;
88 int ctx_field;
89 int class_field;
90 int instance_field;
91 int seqno_field;
92 int global_seqno_field;
93 int plane_field;
94 } tracepoints[TP_NB] = {
95 [TP_GEM_REQUEST_ADD] = { .name = "i915/i915_request_add", },
96 [TP_GEM_REQUEST_WAIT_BEGIN] = { .name = "i915/i915_request_wait_begin", },
97 [TP_GEM_REQUEST_WAIT_END] = { .name = "i915/i915_request_wait_end", },
98 [TP_FLIP_COMPLETE] = { .name = "i915/flip_complete", },
99 [TP_GEM_RING_SYNC_TO] = { .name = "i915/gem_ring_sync_to", },
100 [TP_GEM_RING_SWITCH_CONTEXT] = { .name = "i915/gem_ring_switch_context", },
101 };
102
103 union parser_value {
104 char *string;
105 int integer;
106 };
107
108 struct parser_ctx {
109 struct tracepoint *tp;
110 FILE *fp;
111 };
112
113 #define YY_CTX_LOCAL
114 #define YY_CTX_MEMBERS struct parser_ctx ctx;
115 #define YYSTYPE union parser_value
116 #define YY_LOCAL(T) static __attribute__((unused)) T
117 #define YY_PARSE(T) static T
118 #define YY_INPUT(yy, buf, result, max) \
119 { \
120 int c = getc(yy->ctx.fp); \
121 result = (EOF == c) ? 0 : (*(buf)= c, 1); \
122 if (EOF != c) { \
123 yyprintf((stderr, "<%c>", c)); \
124 } \
125 }
126
127 #include "tracepoint_format.h"
128
129 static int
tracepoint_id(int tp_id)130 tracepoint_id(int tp_id)
131 {
132 struct tracepoint *tp = &tracepoints[tp_id];
133 yycontext ctx;
134 char buf[1024];
135
136 /* Already parsed? */
137 if (tp->event_id != 0)
138 return tp->event_id;
139
140 snprintf(buf, sizeof(buf), "%s/tracing/events/%s/format",
141 debugfs_path, tp->name);
142
143 memset(&ctx, 0, sizeof(ctx));
144 ctx.ctx.tp = tp;
145 ctx.ctx.fp = fopen(buf, "r");
146
147 if (ctx.ctx.fp == NULL)
148 return 0;
149
150 if (yyparse(&ctx)) {
151 for (int f = 0; f < tp->n_fields; f++) {
152 if (!strcmp(tp->fields[f].name, "device")) {
153 tp->device_field = f;
154 } else if (!strcmp(tp->fields[f].name, "ctx")) {
155 tp->ctx_field = f;
156 } else if (!strcmp(tp->fields[f].name, "class")) {
157 tp->class_field = f;
158 } else if (!strcmp(tp->fields[f].name, "instance")) {
159 tp->instance_field = f;
160 } else if (!strcmp(tp->fields[f].name, "seqno")) {
161 tp->seqno_field = f;
162 } else if (!strcmp(tp->fields[f].name, "global_seqno")) {
163 tp->global_seqno_field = f;
164 } else if (!strcmp(tp->fields[f].name, "plane")) {
165 tp->plane_field = f;
166 }
167 }
168 } else
169 tp->event_id = tp->n_fields = 0;
170
171 yyrelease(&ctx);
172 fclose(ctx.ctx.fp);
173
174 return tp->event_id;
175 }
176
177 #define READ_TP_FIELD_U32(sample, tp_id, field_name) \
178 (*(const uint32_t *)((sample)->tracepoint_data + \
179 tracepoints[tp_id].fields[ \
180 tracepoints[tp_id].field_name##_field].offset))
181
182 #define READ_TP_FIELD_U16(sample, tp_id, field_name) \
183 (*(const uint16_t *)((sample)->tracepoint_data + \
184 tracepoints[tp_id].fields[ \
185 tracepoints[tp_id].field_name##_field].offset))
186
187 #define GET_RING_ID(sample, tp_id) \
188 ({ \
189 unsigned char class, instance, ring; \
190 \
191 class = READ_TP_FIELD_U16(sample, tp_id, class); \
192 instance = READ_TP_FIELD_U16(sample, tp_id, instance); \
193 \
194 assert(class <= I915_ENGINE_CLASS_VIDEO_ENHANCE); \
195 assert(instance <= 4); \
196 \
197 ring = class * 4 + instance; \
198 \
199 ring; \
200 })
201
perf_tracepoint_open(struct gpu_perf * gp,int tp_id,int (* func)(struct gpu_perf *,const void *))202 static int perf_tracepoint_open(struct gpu_perf *gp, int tp_id,
203 int (*func)(struct gpu_perf *, const void *))
204 {
205 struct perf_event_attr attr;
206 struct gpu_perf_sample *sample;
207 int n, *fd;
208
209 memset(&attr, 0, sizeof (attr));
210
211 attr.type = PERF_TYPE_TRACEPOINT;
212 attr.config = tracepoint_id(tp_id);
213 if (attr.config == 0)
214 return ENOENT;
215
216 attr.sample_period = 1;
217 attr.sample_type = (PERF_SAMPLE_TIME | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_TID | PERF_SAMPLE_RAW);
218 attr.read_format = PERF_FORMAT_ID;
219
220 attr.exclude_guest = 1;
221
222 n = gp->nr_cpus * (gp->nr_events+1);
223 fd = realloc(gp->fd, n*sizeof(int));
224 sample = realloc(gp->sample, n*sizeof(*gp->sample));
225 if (fd == NULL || sample == NULL)
226 return ENOMEM;
227 gp->fd = fd;
228 gp->sample = sample;
229
230 fd += gp->nr_events * gp->nr_cpus;
231 sample += gp->nr_events * gp->nr_cpus;
232 for (n = 0; n < gp->nr_cpus; n++) {
233 uint64_t track[2];
234
235 fd[n] = perf_event_open(&attr, -1, n, -1, 0);
236 if (fd[n] == -1)
237 return errno;
238
239 /* read back the event to establish id->tracepoint */
240 if (read(fd[n], track, sizeof(track)) < 0)
241 return errno;
242 sample[n].id = track[1];
243 sample[n].func = func;
244 }
245
246 gp->nr_events++;
247 return 0;
248 }
249
perf_mmap(struct gpu_perf * gp)250 static int perf_mmap(struct gpu_perf *gp)
251 {
252 int size = (1 + N_PAGES) * gp->page_size;
253 int *fd, i, j;
254
255 gp->map = malloc(sizeof(void *)*gp->nr_cpus);
256 if (gp->map == NULL)
257 return ENOMEM;
258
259 fd = gp->fd;
260 for (j = 0; j < gp->nr_cpus; j++) {
261 gp->map[j] = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd++, 0);
262 if (gp->map[j] == (void *)-1)
263 goto err;
264 }
265
266 for (i = 1; i < gp->nr_events; i++) {
267 for (j = 0; j < gp->nr_cpus; j++)
268 ioctl(*fd++, PERF_EVENT_IOC_SET_OUTPUT, gp->fd[j]);
269 }
270
271 return 0;
272
273 err:
274 while (--j > 0)
275 munmap(gp->map[j], size);
276 free(gp->map);
277 gp->map = NULL;
278 return EINVAL;
279 }
280
get_comm(pid_t pid,char * comm,int len)281 static int get_comm(pid_t pid, char *comm, int len)
282 {
283 char filename[1024];
284 int fd;
285
286 *comm = '\0';
287 snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
288
289 fd = open(filename, 0);
290 if (fd >= 0) {
291 len = read(fd, comm, len-1);
292 if (len >= 0)
293 comm[len-1] = '\0';
294 close(fd);
295 } else
296 len = -1;
297
298 return len;
299 }
300
301 static struct gpu_perf_comm *
lookup_comm(struct gpu_perf * gp,pid_t pid)302 lookup_comm(struct gpu_perf *gp, pid_t pid)
303 {
304 struct gpu_perf_comm *comm;
305
306 if (pid == 0)
307 return NULL;
308
309 for (comm = gp->comm; comm != NULL; comm = comm->next) {
310 if (comm->pid == pid)
311 break;
312 }
313 if (comm == NULL) {
314 comm = calloc(1, sizeof(*comm));
315 if (comm == NULL)
316 return NULL;
317
318 if (get_comm(pid, comm->name, sizeof(comm->name)) < 0) {
319 free(comm);
320 return NULL;
321 }
322
323 comm->pid = pid;
324 comm->next = gp->comm;
325 gp->comm = comm;
326 }
327
328 return comm;
329 }
330
request_add(struct gpu_perf * gp,const void * event)331 static int request_add(struct gpu_perf *gp, const void *event)
332 {
333 const struct sample_event *sample = event;
334 struct gpu_perf_comm *comm;
335
336 comm = lookup_comm(gp, sample->pid);
337 if (comm == NULL)
338 return 0;
339
340 comm->nr_requests[GET_RING_ID(sample, TP_GEM_REQUEST_ADD)]++;
341 return 1;
342 }
343
flip_complete(struct gpu_perf * gp,const void * event)344 static int flip_complete(struct gpu_perf *gp, const void *event)
345 {
346 const struct sample_event *sample = event;
347
348 gp->flip_complete[READ_TP_FIELD_U32(sample, TP_FLIP_COMPLETE, plane)]++;
349 return 1;
350 }
351
ctx_switch(struct gpu_perf * gp,const void * event)352 static int ctx_switch(struct gpu_perf *gp, const void *event)
353 {
354 const struct sample_event *sample = event;
355
356 gp->ctx_switch[GET_RING_ID(sample, TP_GEM_RING_SWITCH_CONTEXT)]++;
357 return 1;
358 }
359
ring_sync(struct gpu_perf * gp,const void * event)360 static int ring_sync(struct gpu_perf *gp, const void *event)
361 {
362 const struct sample_event *sample = event;
363 struct gpu_perf_comm *comm;
364
365 comm = lookup_comm(gp, sample->pid);
366 if (comm == NULL)
367 return 0;
368
369 comm->nr_sema++;
370 return 1;
371 }
372
wait_begin(struct gpu_perf * gp,const void * event)373 static int wait_begin(struct gpu_perf *gp, const void *event)
374 {
375 const struct sample_event *sample = event;
376 struct gpu_perf_comm *comm;
377 struct gpu_perf_time *wait;
378
379 comm = lookup_comm(gp, sample->pid);
380 if (comm == NULL)
381 return 0;
382
383 wait = malloc(sizeof(*wait));
384 if (wait == NULL)
385 return 0;
386
387 /* XXX argument order CTX == ENGINE! */
388
389 wait->comm = comm;
390 wait->comm->active = true;
391 wait->context = READ_TP_FIELD_U32(sample, TP_GEM_REQUEST_WAIT_BEGIN, ctx);
392 wait->seqno = READ_TP_FIELD_U32(sample, TP_GEM_REQUEST_WAIT_BEGIN, seqno);
393 wait->time = sample->time;
394 wait->next = gp->wait[GET_RING_ID(sample, TP_GEM_REQUEST_WAIT_BEGIN)];
395 gp->wait[GET_RING_ID(sample, TP_GEM_REQUEST_WAIT_BEGIN)] = wait;
396
397 return 0;
398 }
399
wait_end(struct gpu_perf * gp,const void * event)400 static int wait_end(struct gpu_perf *gp, const void *event)
401 {
402 const struct sample_event *sample = event;
403 struct gpu_perf_time *wait, **prev;
404 uint32_t engine = GET_RING_ID(sample, TP_GEM_REQUEST_WAIT_END);
405 uint32_t context = READ_TP_FIELD_U32(sample, TP_GEM_REQUEST_WAIT_END, ctx);
406 uint32_t seqno = READ_TP_FIELD_U32(sample, TP_GEM_REQUEST_WAIT_END, seqno);
407
408 for (prev = &gp->wait[engine]; (wait = *prev) != NULL; prev = &wait->next) {
409 if (wait->context != context || wait->seqno != seqno)
410 continue;
411
412 wait->comm->wait_time += sample->time - wait->time;
413 wait->comm->active = false;
414
415 *prev = wait->next;
416 free(wait);
417 return 1;
418 }
419
420 return 0;
421 }
422
gpu_perf_init(struct gpu_perf * gp,unsigned flags)423 void gpu_perf_init(struct gpu_perf *gp, unsigned flags)
424 {
425 memset(gp, 0, sizeof(*gp));
426 gp->nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
427 gp->page_size = getpagesize();
428
429 perf_tracepoint_open(gp, TP_GEM_REQUEST_ADD, request_add);
430 if (perf_tracepoint_open(gp, TP_GEM_REQUEST_WAIT_BEGIN, wait_begin) == 0)
431 perf_tracepoint_open(gp, TP_GEM_REQUEST_WAIT_END, wait_end);
432 perf_tracepoint_open(gp, TP_FLIP_COMPLETE, flip_complete);
433 perf_tracepoint_open(gp, TP_GEM_RING_SYNC_TO, ring_sync);
434 perf_tracepoint_open(gp, TP_GEM_RING_SWITCH_CONTEXT, ctx_switch);
435
436 if (gp->nr_events == 0) {
437 gp->error = "i915.ko tracepoints not available";
438 return;
439 }
440
441 if (perf_mmap(gp))
442 return;
443 }
444
process_sample(struct gpu_perf * gp,int cpu,const struct perf_event_header * header)445 static int process_sample(struct gpu_perf *gp, int cpu,
446 const struct perf_event_header *header)
447 {
448 const struct sample_event *sample = (const struct sample_event *)header;
449 int n, update = 0;
450
451 /* hash me! */
452 for (n = 0; n < gp->nr_events; n++) {
453 int m = n * gp->nr_cpus + cpu;
454 if (gp->sample[m].id != sample->id)
455 continue;
456
457 update = gp->sample[m].func(gp, sample);
458 break;
459 }
460
461 return update;
462 }
463
gpu_perf_update(struct gpu_perf * gp)464 int gpu_perf_update(struct gpu_perf *gp)
465 {
466 const int size = N_PAGES * gp->page_size;
467 const int mask = size - 1;
468 uint8_t *buffer = NULL;
469 int buffer_size = 0;
470 int n, update = 0;
471
472 if (gp->map == NULL)
473 return 0;
474
475 for (n = 0; n < gp->nr_cpus; n++) {
476 struct perf_event_mmap_page *mmap = gp->map[n];
477 const uint8_t *data;
478 uint64_t head, tail;
479 int wrap = 0;
480
481 tail = mmap->data_tail;
482 head = mmap->data_head;
483 rmb();
484
485 if (head < tail) {
486 wrap = 1;
487 tail &= mask;
488 head &= mask;
489 head += size;
490 }
491
492 data = (uint8_t *)mmap + gp->page_size;
493 while (head - tail >= sizeof (struct perf_event_header)) {
494 const struct perf_event_header *header;
495
496 header = (const struct perf_event_header *)(data + (tail & mask));
497 assert(header->size > 0);
498 if (header->size > head - tail)
499 break;
500
501 if ((const uint8_t *)header + header->size > data + size) {
502 int before;
503
504 if (header->size > buffer_size) {
505 uint8_t *b = realloc(buffer, header->size);
506 if (b == NULL)
507 break;
508
509 buffer = b;
510 buffer_size = header->size;
511 }
512
513 before = data + size - (const uint8_t *)header;
514
515 memcpy(buffer, header, before);
516 memcpy(buffer + before, data, header->size - before);
517
518 header = (struct perf_event_header *)buffer;
519 }
520
521 if (header->type == PERF_RECORD_SAMPLE)
522 update += process_sample(gp, n, header);
523 tail += header->size;
524 }
525
526 if (wrap)
527 tail &= mask;
528 mmap->data_tail = tail;
529 wmb();
530 }
531
532 free(buffer);
533 return update;
534 }
535