• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017-2018 Intel Corporation
5  */
6 
7 #include <linux/pm_runtime.h>
8 
9 #include "gt/intel_engine.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_engine_user.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_rc6.h"
14 #include "gt/intel_rps.h"
15 
16 #include "i915_drv.h"
17 #include "i915_pmu.h"
18 #include "intel_pm.h"
19 
20 /* Frequency for the sampling timer for events which need it. */
21 #define FREQUENCY 200
22 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
23 
24 #define ENGINE_SAMPLE_MASK \
25 	(BIT(I915_SAMPLE_BUSY) | \
26 	 BIT(I915_SAMPLE_WAIT) | \
27 	 BIT(I915_SAMPLE_SEMA))
28 
29 static cpumask_t i915_pmu_cpumask;
30 static unsigned int i915_pmu_target_cpu = -1;
31 
engine_config_sample(u64 config)32 static u8 engine_config_sample(u64 config)
33 {
34 	return config & I915_PMU_SAMPLE_MASK;
35 }
36 
engine_event_sample(struct perf_event * event)37 static u8 engine_event_sample(struct perf_event *event)
38 {
39 	return engine_config_sample(event->attr.config);
40 }
41 
engine_event_class(struct perf_event * event)42 static u8 engine_event_class(struct perf_event *event)
43 {
44 	return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
45 }
46 
engine_event_instance(struct perf_event * event)47 static u8 engine_event_instance(struct perf_event *event)
48 {
49 	return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
50 }
51 
is_engine_config(u64 config)52 static bool is_engine_config(u64 config)
53 {
54 	return config < __I915_PMU_OTHER(0);
55 }
56 
other_bit(const u64 config)57 static unsigned int other_bit(const u64 config)
58 {
59 	unsigned int val;
60 
61 	switch (config) {
62 	case I915_PMU_ACTUAL_FREQUENCY:
63 		val =  __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
64 		break;
65 	case I915_PMU_REQUESTED_FREQUENCY:
66 		val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
67 		break;
68 	case I915_PMU_RC6_RESIDENCY:
69 		val = __I915_PMU_RC6_RESIDENCY_ENABLED;
70 		break;
71 	default:
72 		/*
73 		 * Events that do not require sampling, or tracking state
74 		 * transitions between enabled and disabled can be ignored.
75 		 */
76 		return -1;
77 	}
78 
79 	return I915_ENGINE_SAMPLE_COUNT + val;
80 }
81 
config_bit(const u64 config)82 static unsigned int config_bit(const u64 config)
83 {
84 	if (is_engine_config(config))
85 		return engine_config_sample(config);
86 	else
87 		return other_bit(config);
88 }
89 
config_mask(u64 config)90 static u64 config_mask(u64 config)
91 {
92 	return BIT_ULL(config_bit(config));
93 }
94 
is_engine_event(struct perf_event * event)95 static bool is_engine_event(struct perf_event *event)
96 {
97 	return is_engine_config(event->attr.config);
98 }
99 
event_bit(struct perf_event * event)100 static unsigned int event_bit(struct perf_event *event)
101 {
102 	return config_bit(event->attr.config);
103 }
104 
pmu_needs_timer(struct i915_pmu * pmu,bool gpu_active)105 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
106 {
107 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
108 	u32 enable;
109 
110 	/*
111 	 * Only some counters need the sampling timer.
112 	 *
113 	 * We start with a bitmask of all currently enabled events.
114 	 */
115 	enable = pmu->enable;
116 
117 	/*
118 	 * Mask out all the ones which do not need the timer, or in
119 	 * other words keep all the ones that could need the timer.
120 	 */
121 	enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
122 		  config_mask(I915_PMU_REQUESTED_FREQUENCY) |
123 		  ENGINE_SAMPLE_MASK;
124 
125 	/*
126 	 * When the GPU is idle per-engine counters do not need to be
127 	 * running so clear those bits out.
128 	 */
129 	if (!gpu_active)
130 		enable &= ~ENGINE_SAMPLE_MASK;
131 	/*
132 	 * Also there is software busyness tracking available we do not
133 	 * need the timer for I915_SAMPLE_BUSY counter.
134 	 */
135 	else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
136 		enable &= ~BIT(I915_SAMPLE_BUSY);
137 
138 	/*
139 	 * If some bits remain it means we need the sampling timer running.
140 	 */
141 	return enable;
142 }
143 
__get_rc6(struct intel_gt * gt)144 static u64 __get_rc6(struct intel_gt *gt)
145 {
146 	struct drm_i915_private *i915 = gt->i915;
147 	u64 val;
148 
149 	val = intel_rc6_residency_ns(&gt->rc6,
150 				     IS_VALLEYVIEW(i915) ?
151 				     VLV_GT_RENDER_RC6 :
152 				     GEN6_GT_GFX_RC6);
153 
154 	if (HAS_RC6p(i915))
155 		val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
156 
157 	if (HAS_RC6pp(i915))
158 		val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
159 
160 	return val;
161 }
162 
ktime_since_raw(const ktime_t kt)163 static inline s64 ktime_since_raw(const ktime_t kt)
164 {
165 	return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
166 }
167 
get_rc6(struct intel_gt * gt)168 static u64 get_rc6(struct intel_gt *gt)
169 {
170 	struct drm_i915_private *i915 = gt->i915;
171 	struct i915_pmu *pmu = &i915->pmu;
172 	unsigned long flags;
173 	bool awake = false;
174 	u64 val;
175 
176 	if (intel_gt_pm_get_if_awake(gt)) {
177 		val = __get_rc6(gt);
178 		intel_gt_pm_put_async(gt);
179 		awake = true;
180 	}
181 
182 	spin_lock_irqsave(&pmu->lock, flags);
183 
184 	if (awake) {
185 		pmu->sample[__I915_SAMPLE_RC6].cur = val;
186 	} else {
187 		/*
188 		 * We think we are runtime suspended.
189 		 *
190 		 * Report the delta from when the device was suspended to now,
191 		 * on top of the last known real value, as the approximated RC6
192 		 * counter value.
193 		 */
194 		val = ktime_since_raw(pmu->sleep_last);
195 		val += pmu->sample[__I915_SAMPLE_RC6].cur;
196 	}
197 
198 	if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
199 		val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
200 	else
201 		pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
202 
203 	spin_unlock_irqrestore(&pmu->lock, flags);
204 
205 	return val;
206 }
207 
init_rc6(struct i915_pmu * pmu)208 static void init_rc6(struct i915_pmu *pmu)
209 {
210 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
211 	intel_wakeref_t wakeref;
212 
213 	with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
214 		pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
215 		pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
216 					pmu->sample[__I915_SAMPLE_RC6].cur;
217 		pmu->sleep_last = ktime_get_raw();
218 	}
219 }
220 
park_rc6(struct drm_i915_private * i915)221 static void park_rc6(struct drm_i915_private *i915)
222 {
223 	struct i915_pmu *pmu = &i915->pmu;
224 
225 	pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
226 	pmu->sleep_last = ktime_get_raw();
227 }
228 
__i915_pmu_maybe_start_timer(struct i915_pmu * pmu)229 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
230 {
231 	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
232 		pmu->timer_enabled = true;
233 		pmu->timer_last = ktime_get();
234 		hrtimer_start_range_ns(&pmu->timer,
235 				       ns_to_ktime(PERIOD), 0,
236 				       HRTIMER_MODE_REL_PINNED);
237 	}
238 }
239 
i915_pmu_gt_parked(struct drm_i915_private * i915)240 void i915_pmu_gt_parked(struct drm_i915_private *i915)
241 {
242 	struct i915_pmu *pmu = &i915->pmu;
243 
244 	if (!pmu->base.event_init)
245 		return;
246 
247 	spin_lock_irq(&pmu->lock);
248 
249 	park_rc6(i915);
250 
251 	/*
252 	 * Signal sampling timer to stop if only engine events are enabled and
253 	 * GPU went idle.
254 	 */
255 	pmu->timer_enabled = pmu_needs_timer(pmu, false);
256 
257 	spin_unlock_irq(&pmu->lock);
258 }
259 
i915_pmu_gt_unparked(struct drm_i915_private * i915)260 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
261 {
262 	struct i915_pmu *pmu = &i915->pmu;
263 
264 	if (!pmu->base.event_init)
265 		return;
266 
267 	spin_lock_irq(&pmu->lock);
268 
269 	/*
270 	 * Re-enable sampling timer when GPU goes active.
271 	 */
272 	__i915_pmu_maybe_start_timer(pmu);
273 
274 	spin_unlock_irq(&pmu->lock);
275 }
276 
277 static void
add_sample(struct i915_pmu_sample * sample,u32 val)278 add_sample(struct i915_pmu_sample *sample, u32 val)
279 {
280 	sample->cur += val;
281 }
282 
exclusive_mmio_access(const struct drm_i915_private * i915)283 static bool exclusive_mmio_access(const struct drm_i915_private *i915)
284 {
285 	/*
286 	 * We have to avoid concurrent mmio cache line access on gen7 or
287 	 * risk a machine hang. For a fun history lesson dig out the old
288 	 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
289 	 */
290 	return GRAPHICS_VER(i915) == 7;
291 }
292 
engine_sample(struct intel_engine_cs * engine,unsigned int period_ns)293 static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
294 {
295 	struct intel_engine_pmu *pmu = &engine->pmu;
296 	bool busy;
297 	u32 val;
298 
299 	val = ENGINE_READ_FW(engine, RING_CTL);
300 	if (val == 0) /* powerwell off => engine idle */
301 		return;
302 
303 	if (val & RING_WAIT)
304 		add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
305 	if (val & RING_WAIT_SEMAPHORE)
306 		add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
307 
308 	/* No need to sample when busy stats are supported. */
309 	if (intel_engine_supports_stats(engine))
310 		return;
311 
312 	/*
313 	 * While waiting on a semaphore or event, MI_MODE reports the
314 	 * ring as idle. However, previously using the seqno, and with
315 	 * execlists sampling, we account for the ring waiting as the
316 	 * engine being busy. Therefore, we record the sample as being
317 	 * busy if either waiting or !idle.
318 	 */
319 	busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
320 	if (!busy) {
321 		val = ENGINE_READ_FW(engine, RING_MI_MODE);
322 		busy = !(val & MODE_IDLE);
323 	}
324 	if (busy)
325 		add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
326 }
327 
328 static void
engines_sample(struct intel_gt * gt,unsigned int period_ns)329 engines_sample(struct intel_gt *gt, unsigned int period_ns)
330 {
331 	struct drm_i915_private *i915 = gt->i915;
332 	struct intel_engine_cs *engine;
333 	enum intel_engine_id id;
334 	unsigned long flags;
335 
336 	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
337 		return;
338 
339 	if (!intel_gt_pm_is_awake(gt))
340 		return;
341 
342 	for_each_engine(engine, gt, id) {
343 		if (!intel_engine_pm_get_if_awake(engine))
344 			continue;
345 
346 		if (exclusive_mmio_access(i915)) {
347 			spin_lock_irqsave(&engine->uncore->lock, flags);
348 			engine_sample(engine, period_ns);
349 			spin_unlock_irqrestore(&engine->uncore->lock, flags);
350 		} else {
351 			engine_sample(engine, period_ns);
352 		}
353 
354 		intel_engine_pm_put_async(engine);
355 	}
356 }
357 
358 static void
add_sample_mult(struct i915_pmu_sample * sample,u32 val,u32 mul)359 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
360 {
361 	sample->cur += mul_u32_u32(val, mul);
362 }
363 
frequency_sampling_enabled(struct i915_pmu * pmu)364 static bool frequency_sampling_enabled(struct i915_pmu *pmu)
365 {
366 	return pmu->enable &
367 	       (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
368 		config_mask(I915_PMU_REQUESTED_FREQUENCY));
369 }
370 
371 static void
frequency_sample(struct intel_gt * gt,unsigned int period_ns)372 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
373 {
374 	struct drm_i915_private *i915 = gt->i915;
375 	struct intel_uncore *uncore = gt->uncore;
376 	struct i915_pmu *pmu = &i915->pmu;
377 	struct intel_rps *rps = &gt->rps;
378 
379 	if (!frequency_sampling_enabled(pmu))
380 		return;
381 
382 	/* Report 0/0 (actual/requested) frequency while parked. */
383 	if (!intel_gt_pm_get_if_awake(gt))
384 		return;
385 
386 	if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
387 		u32 val;
388 
389 		/*
390 		 * We take a quick peek here without using forcewake
391 		 * so that we don't perturb the system under observation
392 		 * (forcewake => !rc6 => increased power use). We expect
393 		 * that if the read fails because it is outside of the
394 		 * mmio power well, then it will return 0 -- in which
395 		 * case we assume the system is running at the intended
396 		 * frequency. Fortunately, the read should rarely fail!
397 		 */
398 		val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
399 		if (val)
400 			val = intel_rps_get_cagf(rps, val);
401 		else
402 			val = rps->cur_freq;
403 
404 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
405 				intel_gpu_freq(rps, val), period_ns / 1000);
406 	}
407 
408 	if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
409 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
410 				intel_rps_get_requested_frequency(rps),
411 				period_ns / 1000);
412 	}
413 
414 	intel_gt_pm_put_async(gt);
415 }
416 
i915_sample(struct hrtimer * hrtimer)417 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
418 {
419 	struct drm_i915_private *i915 =
420 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
421 	struct i915_pmu *pmu = &i915->pmu;
422 	struct intel_gt *gt = &i915->gt;
423 	unsigned int period_ns;
424 	ktime_t now;
425 
426 	if (!READ_ONCE(pmu->timer_enabled))
427 		return HRTIMER_NORESTART;
428 
429 	now = ktime_get();
430 	period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
431 	pmu->timer_last = now;
432 
433 	/*
434 	 * Strictly speaking the passed in period may not be 100% accurate for
435 	 * all internal calculation, since some amount of time can be spent on
436 	 * grabbing the forcewake. However the potential error from timer call-
437 	 * back delay greatly dominates this so we keep it simple.
438 	 */
439 	engines_sample(gt, period_ns);
440 	frequency_sample(gt, period_ns);
441 
442 	hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
443 
444 	return HRTIMER_RESTART;
445 }
446 
i915_pmu_event_destroy(struct perf_event * event)447 static void i915_pmu_event_destroy(struct perf_event *event)
448 {
449 	struct drm_i915_private *i915 =
450 		container_of(event->pmu, typeof(*i915), pmu.base);
451 
452 	drm_WARN_ON(&i915->drm, event->parent);
453 
454 	drm_dev_put(&i915->drm);
455 }
456 
457 static int
engine_event_status(struct intel_engine_cs * engine,enum drm_i915_pmu_engine_sample sample)458 engine_event_status(struct intel_engine_cs *engine,
459 		    enum drm_i915_pmu_engine_sample sample)
460 {
461 	switch (sample) {
462 	case I915_SAMPLE_BUSY:
463 	case I915_SAMPLE_WAIT:
464 		break;
465 	case I915_SAMPLE_SEMA:
466 		if (GRAPHICS_VER(engine->i915) < 6)
467 			return -ENODEV;
468 		break;
469 	default:
470 		return -ENOENT;
471 	}
472 
473 	return 0;
474 }
475 
476 static int
config_status(struct drm_i915_private * i915,u64 config)477 config_status(struct drm_i915_private *i915, u64 config)
478 {
479 	struct intel_gt *gt = &i915->gt;
480 
481 	switch (config) {
482 	case I915_PMU_ACTUAL_FREQUENCY:
483 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
484 			/* Requires a mutex for sampling! */
485 			return -ENODEV;
486 		fallthrough;
487 	case I915_PMU_REQUESTED_FREQUENCY:
488 		if (GRAPHICS_VER(i915) < 6)
489 			return -ENODEV;
490 		break;
491 	case I915_PMU_INTERRUPTS:
492 		break;
493 	case I915_PMU_RC6_RESIDENCY:
494 		if (!gt->rc6.supported)
495 			return -ENODEV;
496 		break;
497 	case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
498 		break;
499 	default:
500 		return -ENOENT;
501 	}
502 
503 	return 0;
504 }
505 
engine_event_init(struct perf_event * event)506 static int engine_event_init(struct perf_event *event)
507 {
508 	struct drm_i915_private *i915 =
509 		container_of(event->pmu, typeof(*i915), pmu.base);
510 	struct intel_engine_cs *engine;
511 
512 	engine = intel_engine_lookup_user(i915, engine_event_class(event),
513 					  engine_event_instance(event));
514 	if (!engine)
515 		return -ENODEV;
516 
517 	return engine_event_status(engine, engine_event_sample(event));
518 }
519 
i915_pmu_event_init(struct perf_event * event)520 static int i915_pmu_event_init(struct perf_event *event)
521 {
522 	struct drm_i915_private *i915 =
523 		container_of(event->pmu, typeof(*i915), pmu.base);
524 	struct i915_pmu *pmu = &i915->pmu;
525 	int ret;
526 
527 	if (pmu->closed)
528 		return -ENODEV;
529 
530 	if (event->attr.type != event->pmu->type)
531 		return -ENOENT;
532 
533 	/* unsupported modes and filters */
534 	if (event->attr.sample_period) /* no sampling */
535 		return -EINVAL;
536 
537 	if (has_branch_stack(event))
538 		return -EOPNOTSUPP;
539 
540 	if (event->cpu < 0)
541 		return -EINVAL;
542 
543 	/* only allow running on one cpu at a time */
544 	if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
545 		return -EINVAL;
546 
547 	if (is_engine_event(event))
548 		ret = engine_event_init(event);
549 	else
550 		ret = config_status(i915, event->attr.config);
551 	if (ret)
552 		return ret;
553 
554 	if (!event->parent) {
555 		drm_dev_get(&i915->drm);
556 		event->destroy = i915_pmu_event_destroy;
557 	}
558 
559 	return 0;
560 }
561 
__i915_pmu_event_read(struct perf_event * event)562 static u64 __i915_pmu_event_read(struct perf_event *event)
563 {
564 	struct drm_i915_private *i915 =
565 		container_of(event->pmu, typeof(*i915), pmu.base);
566 	struct i915_pmu *pmu = &i915->pmu;
567 	u64 val = 0;
568 
569 	if (is_engine_event(event)) {
570 		u8 sample = engine_event_sample(event);
571 		struct intel_engine_cs *engine;
572 
573 		engine = intel_engine_lookup_user(i915,
574 						  engine_event_class(event),
575 						  engine_event_instance(event));
576 
577 		if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
578 			/* Do nothing */
579 		} else if (sample == I915_SAMPLE_BUSY &&
580 			   intel_engine_supports_stats(engine)) {
581 			ktime_t unused;
582 
583 			val = ktime_to_ns(intel_engine_get_busy_time(engine,
584 								     &unused));
585 		} else {
586 			val = engine->pmu.sample[sample].cur;
587 		}
588 	} else {
589 		switch (event->attr.config) {
590 		case I915_PMU_ACTUAL_FREQUENCY:
591 			val =
592 			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
593 				   USEC_PER_SEC /* to MHz */);
594 			break;
595 		case I915_PMU_REQUESTED_FREQUENCY:
596 			val =
597 			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
598 				   USEC_PER_SEC /* to MHz */);
599 			break;
600 		case I915_PMU_INTERRUPTS:
601 			val = READ_ONCE(pmu->irq_count);
602 			break;
603 		case I915_PMU_RC6_RESIDENCY:
604 			val = get_rc6(&i915->gt);
605 			break;
606 		case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
607 			val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
608 			break;
609 		}
610 	}
611 
612 	return val;
613 }
614 
i915_pmu_event_read(struct perf_event * event)615 static void i915_pmu_event_read(struct perf_event *event)
616 {
617 	struct drm_i915_private *i915 =
618 		container_of(event->pmu, typeof(*i915), pmu.base);
619 	struct hw_perf_event *hwc = &event->hw;
620 	struct i915_pmu *pmu = &i915->pmu;
621 	u64 prev, new;
622 
623 	if (pmu->closed) {
624 		event->hw.state = PERF_HES_STOPPED;
625 		return;
626 	}
627 again:
628 	prev = local64_read(&hwc->prev_count);
629 	new = __i915_pmu_event_read(event);
630 
631 	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
632 		goto again;
633 
634 	local64_add(new - prev, &event->count);
635 }
636 
i915_pmu_enable(struct perf_event * event)637 static void i915_pmu_enable(struct perf_event *event)
638 {
639 	struct drm_i915_private *i915 =
640 		container_of(event->pmu, typeof(*i915), pmu.base);
641 	struct i915_pmu *pmu = &i915->pmu;
642 	unsigned long flags;
643 	unsigned int bit;
644 
645 	bit = event_bit(event);
646 	if (bit == -1)
647 		goto update;
648 
649 	spin_lock_irqsave(&pmu->lock, flags);
650 
651 	/*
652 	 * Update the bitmask of enabled events and increment
653 	 * the event reference counter.
654 	 */
655 	BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
656 	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
657 	GEM_BUG_ON(pmu->enable_count[bit] == ~0);
658 
659 	pmu->enable |= BIT_ULL(bit);
660 	pmu->enable_count[bit]++;
661 
662 	/*
663 	 * Start the sampling timer if needed and not already enabled.
664 	 */
665 	__i915_pmu_maybe_start_timer(pmu);
666 
667 	/*
668 	 * For per-engine events the bitmask and reference counting
669 	 * is stored per engine.
670 	 */
671 	if (is_engine_event(event)) {
672 		u8 sample = engine_event_sample(event);
673 		struct intel_engine_cs *engine;
674 
675 		engine = intel_engine_lookup_user(i915,
676 						  engine_event_class(event),
677 						  engine_event_instance(event));
678 
679 		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
680 			     I915_ENGINE_SAMPLE_COUNT);
681 		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
682 			     I915_ENGINE_SAMPLE_COUNT);
683 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
684 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
685 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
686 
687 		engine->pmu.enable |= BIT(sample);
688 		engine->pmu.enable_count[sample]++;
689 	}
690 
691 	spin_unlock_irqrestore(&pmu->lock, flags);
692 
693 update:
694 	/*
695 	 * Store the current counter value so we can report the correct delta
696 	 * for all listeners. Even when the event was already enabled and has
697 	 * an existing non-zero value.
698 	 */
699 	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
700 }
701 
i915_pmu_disable(struct perf_event * event)702 static void i915_pmu_disable(struct perf_event *event)
703 {
704 	struct drm_i915_private *i915 =
705 		container_of(event->pmu, typeof(*i915), pmu.base);
706 	unsigned int bit = event_bit(event);
707 	struct i915_pmu *pmu = &i915->pmu;
708 	unsigned long flags;
709 
710 	if (bit == -1)
711 		return;
712 
713 	spin_lock_irqsave(&pmu->lock, flags);
714 
715 	if (is_engine_event(event)) {
716 		u8 sample = engine_event_sample(event);
717 		struct intel_engine_cs *engine;
718 
719 		engine = intel_engine_lookup_user(i915,
720 						  engine_event_class(event),
721 						  engine_event_instance(event));
722 
723 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
724 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
725 		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
726 
727 		/*
728 		 * Decrement the reference count and clear the enabled
729 		 * bitmask when the last listener on an event goes away.
730 		 */
731 		if (--engine->pmu.enable_count[sample] == 0)
732 			engine->pmu.enable &= ~BIT(sample);
733 	}
734 
735 	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
736 	GEM_BUG_ON(pmu->enable_count[bit] == 0);
737 	/*
738 	 * Decrement the reference count and clear the enabled
739 	 * bitmask when the last listener on an event goes away.
740 	 */
741 	if (--pmu->enable_count[bit] == 0) {
742 		pmu->enable &= ~BIT_ULL(bit);
743 		pmu->timer_enabled &= pmu_needs_timer(pmu, true);
744 	}
745 
746 	spin_unlock_irqrestore(&pmu->lock, flags);
747 }
748 
i915_pmu_event_start(struct perf_event * event,int flags)749 static void i915_pmu_event_start(struct perf_event *event, int flags)
750 {
751 	struct drm_i915_private *i915 =
752 		container_of(event->pmu, typeof(*i915), pmu.base);
753 	struct i915_pmu *pmu = &i915->pmu;
754 
755 	if (pmu->closed)
756 		return;
757 
758 	i915_pmu_enable(event);
759 	event->hw.state = 0;
760 }
761 
i915_pmu_event_stop(struct perf_event * event,int flags)762 static void i915_pmu_event_stop(struct perf_event *event, int flags)
763 {
764 	struct drm_i915_private *i915 =
765 		container_of(event->pmu, typeof(*i915), pmu.base);
766 	struct i915_pmu *pmu = &i915->pmu;
767 
768 	if (pmu->closed)
769 		goto out;
770 
771 	if (flags & PERF_EF_UPDATE)
772 		i915_pmu_event_read(event);
773 	i915_pmu_disable(event);
774 
775 out:
776 	event->hw.state = PERF_HES_STOPPED;
777 }
778 
i915_pmu_event_add(struct perf_event * event,int flags)779 static int i915_pmu_event_add(struct perf_event *event, int flags)
780 {
781 	struct drm_i915_private *i915 =
782 		container_of(event->pmu, typeof(*i915), pmu.base);
783 	struct i915_pmu *pmu = &i915->pmu;
784 
785 	if (pmu->closed)
786 		return -ENODEV;
787 
788 	if (flags & PERF_EF_START)
789 		i915_pmu_event_start(event, flags);
790 
791 	return 0;
792 }
793 
i915_pmu_event_del(struct perf_event * event,int flags)794 static void i915_pmu_event_del(struct perf_event *event, int flags)
795 {
796 	i915_pmu_event_stop(event, PERF_EF_UPDATE);
797 }
798 
i915_pmu_event_event_idx(struct perf_event * event)799 static int i915_pmu_event_event_idx(struct perf_event *event)
800 {
801 	return 0;
802 }
803 
804 struct i915_str_attribute {
805 	struct device_attribute attr;
806 	const char *str;
807 };
808 
i915_pmu_format_show(struct device * dev,struct device_attribute * attr,char * buf)809 static ssize_t i915_pmu_format_show(struct device *dev,
810 				    struct device_attribute *attr, char *buf)
811 {
812 	struct i915_str_attribute *eattr;
813 
814 	eattr = container_of(attr, struct i915_str_attribute, attr);
815 	return sprintf(buf, "%s\n", eattr->str);
816 }
817 
818 #define I915_PMU_FORMAT_ATTR(_name, _config) \
819 	(&((struct i915_str_attribute[]) { \
820 		{ .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
821 		  .str = _config, } \
822 	})[0].attr.attr)
823 
824 static struct attribute *i915_pmu_format_attrs[] = {
825 	I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
826 	NULL,
827 };
828 
829 static const struct attribute_group i915_pmu_format_attr_group = {
830 	.name = "format",
831 	.attrs = i915_pmu_format_attrs,
832 };
833 
834 struct i915_ext_attribute {
835 	struct device_attribute attr;
836 	unsigned long val;
837 };
838 
i915_pmu_event_show(struct device * dev,struct device_attribute * attr,char * buf)839 static ssize_t i915_pmu_event_show(struct device *dev,
840 				   struct device_attribute *attr, char *buf)
841 {
842 	struct i915_ext_attribute *eattr;
843 
844 	eattr = container_of(attr, struct i915_ext_attribute, attr);
845 	return sprintf(buf, "config=0x%lx\n", eattr->val);
846 }
847 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)848 static ssize_t cpumask_show(struct device *dev,
849 			    struct device_attribute *attr, char *buf)
850 {
851 	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
852 }
853 
854 static DEVICE_ATTR_RO(cpumask);
855 
856 static struct attribute *i915_cpumask_attrs[] = {
857 	&dev_attr_cpumask.attr,
858 	NULL,
859 };
860 
861 static const struct attribute_group i915_pmu_cpumask_attr_group = {
862 	.attrs = i915_cpumask_attrs,
863 };
864 
865 #define __event(__config, __name, __unit) \
866 { \
867 	.config = (__config), \
868 	.name = (__name), \
869 	.unit = (__unit), \
870 }
871 
872 #define __engine_event(__sample, __name) \
873 { \
874 	.sample = (__sample), \
875 	.name = (__name), \
876 }
877 
878 static struct i915_ext_attribute *
add_i915_attr(struct i915_ext_attribute * attr,const char * name,u64 config)879 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
880 {
881 	sysfs_attr_init(&attr->attr.attr);
882 	attr->attr.attr.name = name;
883 	attr->attr.attr.mode = 0444;
884 	attr->attr.show = i915_pmu_event_show;
885 	attr->val = config;
886 
887 	return ++attr;
888 }
889 
890 static struct perf_pmu_events_attr *
add_pmu_attr(struct perf_pmu_events_attr * attr,const char * name,const char * str)891 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
892 	     const char *str)
893 {
894 	sysfs_attr_init(&attr->attr.attr);
895 	attr->attr.attr.name = name;
896 	attr->attr.attr.mode = 0444;
897 	attr->attr.show = perf_event_sysfs_show;
898 	attr->event_str = str;
899 
900 	return ++attr;
901 }
902 
903 static struct attribute **
create_event_attributes(struct i915_pmu * pmu)904 create_event_attributes(struct i915_pmu *pmu)
905 {
906 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
907 	static const struct {
908 		u64 config;
909 		const char *name;
910 		const char *unit;
911 	} events[] = {
912 		__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
913 		__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
914 		__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
915 		__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
916 		__event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
917 	};
918 	static const struct {
919 		enum drm_i915_pmu_engine_sample sample;
920 		char *name;
921 	} engine_events[] = {
922 		__engine_event(I915_SAMPLE_BUSY, "busy"),
923 		__engine_event(I915_SAMPLE_SEMA, "sema"),
924 		__engine_event(I915_SAMPLE_WAIT, "wait"),
925 	};
926 	unsigned int count = 0;
927 	struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
928 	struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
929 	struct attribute **attr = NULL, **attr_iter;
930 	struct intel_engine_cs *engine;
931 	unsigned int i;
932 
933 	/* Count how many counters we will be exposing. */
934 	for (i = 0; i < ARRAY_SIZE(events); i++) {
935 		if (!config_status(i915, events[i].config))
936 			count++;
937 	}
938 
939 	for_each_uabi_engine(engine, i915) {
940 		for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
941 			if (!engine_event_status(engine,
942 						 engine_events[i].sample))
943 				count++;
944 		}
945 	}
946 
947 	/* Allocate attribute objects and table. */
948 	i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
949 	if (!i915_attr)
950 		goto err_alloc;
951 
952 	pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
953 	if (!pmu_attr)
954 		goto err_alloc;
955 
956 	/* Max one pointer of each attribute type plus a termination entry. */
957 	attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
958 	if (!attr)
959 		goto err_alloc;
960 
961 	i915_iter = i915_attr;
962 	pmu_iter = pmu_attr;
963 	attr_iter = attr;
964 
965 	/* Initialize supported non-engine counters. */
966 	for (i = 0; i < ARRAY_SIZE(events); i++) {
967 		char *str;
968 
969 		if (config_status(i915, events[i].config))
970 			continue;
971 
972 		str = kstrdup(events[i].name, GFP_KERNEL);
973 		if (!str)
974 			goto err;
975 
976 		*attr_iter++ = &i915_iter->attr.attr;
977 		i915_iter = add_i915_attr(i915_iter, str, events[i].config);
978 
979 		if (events[i].unit) {
980 			str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
981 			if (!str)
982 				goto err;
983 
984 			*attr_iter++ = &pmu_iter->attr.attr;
985 			pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
986 		}
987 	}
988 
989 	/* Initialize supported engine counters. */
990 	for_each_uabi_engine(engine, i915) {
991 		for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
992 			char *str;
993 
994 			if (engine_event_status(engine,
995 						engine_events[i].sample))
996 				continue;
997 
998 			str = kasprintf(GFP_KERNEL, "%s-%s",
999 					engine->name, engine_events[i].name);
1000 			if (!str)
1001 				goto err;
1002 
1003 			*attr_iter++ = &i915_iter->attr.attr;
1004 			i915_iter =
1005 				add_i915_attr(i915_iter, str,
1006 					      __I915_PMU_ENGINE(engine->uabi_class,
1007 								engine->uabi_instance,
1008 								engine_events[i].sample));
1009 
1010 			str = kasprintf(GFP_KERNEL, "%s-%s.unit",
1011 					engine->name, engine_events[i].name);
1012 			if (!str)
1013 				goto err;
1014 
1015 			*attr_iter++ = &pmu_iter->attr.attr;
1016 			pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
1017 		}
1018 	}
1019 
1020 	pmu->i915_attr = i915_attr;
1021 	pmu->pmu_attr = pmu_attr;
1022 
1023 	return attr;
1024 
1025 err:;
1026 	for (attr_iter = attr; *attr_iter; attr_iter++)
1027 		kfree((*attr_iter)->name);
1028 
1029 err_alloc:
1030 	kfree(attr);
1031 	kfree(i915_attr);
1032 	kfree(pmu_attr);
1033 
1034 	return NULL;
1035 }
1036 
free_event_attributes(struct i915_pmu * pmu)1037 static void free_event_attributes(struct i915_pmu *pmu)
1038 {
1039 	struct attribute **attr_iter = pmu->events_attr_group.attrs;
1040 
1041 	for (; *attr_iter; attr_iter++)
1042 		kfree((*attr_iter)->name);
1043 
1044 	kfree(pmu->events_attr_group.attrs);
1045 	kfree(pmu->i915_attr);
1046 	kfree(pmu->pmu_attr);
1047 
1048 	pmu->events_attr_group.attrs = NULL;
1049 	pmu->i915_attr = NULL;
1050 	pmu->pmu_attr = NULL;
1051 }
1052 
i915_pmu_cpu_online(unsigned int cpu,struct hlist_node * node)1053 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1054 {
1055 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1056 
1057 	GEM_BUG_ON(!pmu->base.event_init);
1058 
1059 	/* Select the first online CPU as a designated reader. */
1060 	if (!cpumask_weight(&i915_pmu_cpumask))
1061 		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1062 
1063 	return 0;
1064 }
1065 
i915_pmu_cpu_offline(unsigned int cpu,struct hlist_node * node)1066 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1067 {
1068 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1069 	unsigned int target = i915_pmu_target_cpu;
1070 
1071 	GEM_BUG_ON(!pmu->base.event_init);
1072 
1073 	/*
1074 	 * Unregistering an instance generates a CPU offline event which we must
1075 	 * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
1076 	 */
1077 	if (pmu->closed)
1078 		return 0;
1079 
1080 	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1081 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1082 
1083 		/* Migrate events if there is a valid target */
1084 		if (target < nr_cpu_ids) {
1085 			cpumask_set_cpu(target, &i915_pmu_cpumask);
1086 			i915_pmu_target_cpu = target;
1087 		}
1088 	}
1089 
1090 	if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
1091 		perf_pmu_migrate_context(&pmu->base, cpu, target);
1092 		pmu->cpuhp.cpu = target;
1093 	}
1094 
1095 	return 0;
1096 }
1097 
1098 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1099 
i915_pmu_init(void)1100 int i915_pmu_init(void)
1101 {
1102 	int ret;
1103 
1104 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1105 				      "perf/x86/intel/i915:online",
1106 				      i915_pmu_cpu_online,
1107 				      i915_pmu_cpu_offline);
1108 	if (ret < 0)
1109 		pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
1110 			  ret);
1111 	else
1112 		cpuhp_slot = ret;
1113 
1114 	return 0;
1115 }
1116 
i915_pmu_exit(void)1117 void i915_pmu_exit(void)
1118 {
1119 	if (cpuhp_slot != CPUHP_INVALID)
1120 		cpuhp_remove_multi_state(cpuhp_slot);
1121 }
1122 
i915_pmu_register_cpuhp_state(struct i915_pmu * pmu)1123 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1124 {
1125 	if (cpuhp_slot == CPUHP_INVALID)
1126 		return -EINVAL;
1127 
1128 	return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
1129 }
1130 
i915_pmu_unregister_cpuhp_state(struct i915_pmu * pmu)1131 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1132 {
1133 	cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
1134 }
1135 
is_igp(struct drm_i915_private * i915)1136 static bool is_igp(struct drm_i915_private *i915)
1137 {
1138 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1139 
1140 	/* IGP is 0000:00:02.0 */
1141 	return pci_domain_nr(pdev->bus) == 0 &&
1142 	       pdev->bus->number == 0 &&
1143 	       PCI_SLOT(pdev->devfn) == 2 &&
1144 	       PCI_FUNC(pdev->devfn) == 0;
1145 }
1146 
i915_pmu_register(struct drm_i915_private * i915)1147 void i915_pmu_register(struct drm_i915_private *i915)
1148 {
1149 	struct i915_pmu *pmu = &i915->pmu;
1150 	const struct attribute_group *attr_groups[] = {
1151 		&i915_pmu_format_attr_group,
1152 		&pmu->events_attr_group,
1153 		&i915_pmu_cpumask_attr_group,
1154 		NULL
1155 	};
1156 
1157 	int ret = -ENOMEM;
1158 
1159 	if (GRAPHICS_VER(i915) <= 2) {
1160 		drm_info(&i915->drm, "PMU not supported for this GPU.");
1161 		return;
1162 	}
1163 
1164 	spin_lock_init(&pmu->lock);
1165 	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1166 	pmu->timer.function = i915_sample;
1167 	pmu->cpuhp.cpu = -1;
1168 	init_rc6(pmu);
1169 
1170 	if (!is_igp(i915)) {
1171 		pmu->name = kasprintf(GFP_KERNEL,
1172 				      "i915_%s",
1173 				      dev_name(i915->drm.dev));
1174 		if (pmu->name) {
1175 			/* tools/perf reserves colons as special. */
1176 			strreplace((char *)pmu->name, ':', '_');
1177 		}
1178 	} else {
1179 		pmu->name = "i915";
1180 	}
1181 	if (!pmu->name)
1182 		goto err;
1183 
1184 	pmu->events_attr_group.name = "events";
1185 	pmu->events_attr_group.attrs = create_event_attributes(pmu);
1186 	if (!pmu->events_attr_group.attrs)
1187 		goto err_name;
1188 
1189 	pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1190 					GFP_KERNEL);
1191 	if (!pmu->base.attr_groups)
1192 		goto err_attr;
1193 
1194 	pmu->base.module	= THIS_MODULE;
1195 	pmu->base.task_ctx_nr	= perf_invalid_context;
1196 	pmu->base.event_init	= i915_pmu_event_init;
1197 	pmu->base.add		= i915_pmu_event_add;
1198 	pmu->base.del		= i915_pmu_event_del;
1199 	pmu->base.start		= i915_pmu_event_start;
1200 	pmu->base.stop		= i915_pmu_event_stop;
1201 	pmu->base.read		= i915_pmu_event_read;
1202 	pmu->base.event_idx	= i915_pmu_event_event_idx;
1203 
1204 	ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1205 	if (ret)
1206 		goto err_groups;
1207 
1208 	ret = i915_pmu_register_cpuhp_state(pmu);
1209 	if (ret)
1210 		goto err_unreg;
1211 
1212 	return;
1213 
1214 err_unreg:
1215 	perf_pmu_unregister(&pmu->base);
1216 err_groups:
1217 	kfree(pmu->base.attr_groups);
1218 err_attr:
1219 	pmu->base.event_init = NULL;
1220 	free_event_attributes(pmu);
1221 err_name:
1222 	if (!is_igp(i915))
1223 		kfree(pmu->name);
1224 err:
1225 	drm_notice(&i915->drm, "Failed to register PMU!\n");
1226 }
1227 
i915_pmu_unregister(struct drm_i915_private * i915)1228 void i915_pmu_unregister(struct drm_i915_private *i915)
1229 {
1230 	struct i915_pmu *pmu = &i915->pmu;
1231 
1232 	if (!pmu->base.event_init)
1233 		return;
1234 
1235 	/*
1236 	 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
1237 	 * ensures all currently executing ones will have exited before we
1238 	 * proceed with unregistration.
1239 	 */
1240 	pmu->closed = true;
1241 	synchronize_rcu();
1242 
1243 	hrtimer_cancel(&pmu->timer);
1244 
1245 	i915_pmu_unregister_cpuhp_state(pmu);
1246 
1247 	perf_pmu_unregister(&pmu->base);
1248 	pmu->base.event_init = NULL;
1249 	kfree(pmu->base.attr_groups);
1250 	if (!is_igp(i915))
1251 		kfree(pmu->name);
1252 	free_event_attributes(pmu);
1253 }
1254