Lines Matching refs:pmu
105 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument
107 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
115 enable = pmu->enable; in pmu_needs_timer()
171 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
182 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
185 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
194 val = ktime_since_raw(pmu->sleep_last); in get_rc6()
195 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
198 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6()
199 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6()
201 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; in get_rc6()
203 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
208 static void init_rc6(struct i915_pmu *pmu) in init_rc6() argument
210 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in init_rc6()
214 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in init_rc6()
215 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = in init_rc6()
216 pmu->sample[__I915_SAMPLE_RC6].cur; in init_rc6()
217 pmu->sleep_last = ktime_get_raw(); in init_rc6()
223 struct i915_pmu *pmu = &i915->pmu; in park_rc6() local
225 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in park_rc6()
226 pmu->sleep_last = ktime_get_raw(); in park_rc6()
229 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) in __i915_pmu_maybe_start_timer() argument
231 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { in __i915_pmu_maybe_start_timer()
232 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
233 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
234 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
242 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_parked() local
244 if (!pmu->base.event_init) in i915_pmu_gt_parked()
247 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
255 pmu->timer_enabled = pmu_needs_timer(pmu, false); in i915_pmu_gt_parked()
257 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
262 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_unparked() local
264 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
267 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
272 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_gt_unparked()
274 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
295 struct intel_engine_pmu *pmu = &engine->pmu; in engine_sample() local
304 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engine_sample()
306 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engine_sample()
325 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engine_sample()
336 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
364 static bool frequency_sampling_enabled(struct i915_pmu *pmu) in frequency_sampling_enabled() argument
366 return pmu->enable & in frequency_sampling_enabled()
376 struct i915_pmu *pmu = &i915->pmu; in frequency_sample() local
379 if (!frequency_sampling_enabled(pmu)) in frequency_sample()
386 if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { in frequency_sample()
404 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], in frequency_sample()
408 if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { in frequency_sample()
409 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], in frequency_sample()
420 container_of(hrtimer, struct drm_i915_private, pmu.timer); in i915_sample()
421 struct i915_pmu *pmu = &i915->pmu; in i915_sample() local
426 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
430 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
431 pmu->timer_last = now; in i915_sample()
450 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_destroy()
509 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
523 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
524 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_init() local
527 if (pmu->closed) in i915_pmu_event_init()
530 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
565 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
566 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read() local
586 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
592 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, in __i915_pmu_event_read()
597 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, in __i915_pmu_event_read()
601 val = READ_ONCE(pmu->irq_count); in __i915_pmu_event_read()
618 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_read()
620 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_read() local
623 if (pmu->closed) { in i915_pmu_event_read()
640 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
641 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable() local
649 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
655 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
656 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
657 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
659 pmu->enable |= BIT_ULL(bit); in i915_pmu_enable()
660 pmu->enable_count[bit]++; in i915_pmu_enable()
665 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_enable()
679 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
681 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
683 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
684 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
685 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
687 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
688 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
691 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
705 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
707 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable() local
713 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
723 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
724 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
725 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
731 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
732 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
735 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
736 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
741 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
742 pmu->enable &= ~BIT_ULL(bit); in i915_pmu_disable()
743 pmu->timer_enabled &= pmu_needs_timer(pmu, true); in i915_pmu_disable()
746 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
752 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_start()
753 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_start() local
755 if (pmu->closed) in i915_pmu_event_start()
765 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_stop()
766 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_stop() local
768 if (pmu->closed) in i915_pmu_event_stop()
782 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_add()
783 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_add() local
785 if (pmu->closed) in i915_pmu_event_add()
904 create_event_attributes(struct i915_pmu *pmu) in create_event_attributes() argument
906 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in create_event_attributes()
1020 pmu->i915_attr = i915_attr; in create_event_attributes()
1021 pmu->pmu_attr = pmu_attr; in create_event_attributes()
1037 static void free_event_attributes(struct i915_pmu *pmu) in free_event_attributes() argument
1039 struct attribute **attr_iter = pmu->events_attr_group.attrs; in free_event_attributes()
1044 kfree(pmu->events_attr_group.attrs); in free_event_attributes()
1045 kfree(pmu->i915_attr); in free_event_attributes()
1046 kfree(pmu->pmu_attr); in free_event_attributes()
1048 pmu->events_attr_group.attrs = NULL; in free_event_attributes()
1049 pmu->i915_attr = NULL; in free_event_attributes()
1050 pmu->pmu_attr = NULL; in free_event_attributes()
1055 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_online() local
1057 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1068 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_offline() local
1071 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1077 if (pmu->closed) in i915_pmu_cpu_offline()
1090 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { in i915_pmu_cpu_offline()
1091 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1092 pmu->cpuhp.cpu = target; in i915_pmu_cpu_offline()
1123 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_register_cpuhp_state() argument
1128 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_register_cpuhp_state()
1131 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_unregister_cpuhp_state() argument
1133 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_unregister_cpuhp_state()
1149 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register() local
1152 &pmu->events_attr_group, in i915_pmu_register()
1164 spin_lock_init(&pmu->lock); in i915_pmu_register()
1165 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1166 pmu->timer.function = i915_sample; in i915_pmu_register()
1167 pmu->cpuhp.cpu = -1; in i915_pmu_register()
1168 init_rc6(pmu); in i915_pmu_register()
1171 pmu->name = kasprintf(GFP_KERNEL, in i915_pmu_register()
1174 if (pmu->name) { in i915_pmu_register()
1176 strreplace((char *)pmu->name, ':', '_'); in i915_pmu_register()
1179 pmu->name = "i915"; in i915_pmu_register()
1181 if (!pmu->name) in i915_pmu_register()
1184 pmu->events_attr_group.name = "events"; in i915_pmu_register()
1185 pmu->events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1186 if (!pmu->events_attr_group.attrs) in i915_pmu_register()
1189 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in i915_pmu_register()
1191 if (!pmu->base.attr_groups) in i915_pmu_register()
1194 pmu->base.module = THIS_MODULE; in i915_pmu_register()
1195 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1196 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1197 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1198 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1199 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1200 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1201 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1202 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1204 ret = perf_pmu_register(&pmu->base, pmu->name, -1); in i915_pmu_register()
1208 ret = i915_pmu_register_cpuhp_state(pmu); in i915_pmu_register()
1215 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1217 kfree(pmu->base.attr_groups); in i915_pmu_register()
1219 pmu->base.event_init = NULL; in i915_pmu_register()
1220 free_event_attributes(pmu); in i915_pmu_register()
1223 kfree(pmu->name); in i915_pmu_register()
1230 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister() local
1232 if (!pmu->base.event_init) in i915_pmu_unregister()
1240 pmu->closed = true; in i915_pmu_unregister()
1243 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1245 i915_pmu_unregister_cpuhp_state(pmu); in i915_pmu_unregister()
1247 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1248 pmu->base.event_init = NULL; in i915_pmu_unregister()
1249 kfree(pmu->base.attr_groups); in i915_pmu_unregister()
1251 kfree(pmu->name); in i915_pmu_unregister()
1252 free_event_attributes(pmu); in i915_pmu_unregister()