Lines Matching full:pmu
82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument
84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
92 enable = pmu->enable; in pmu_needs_timer()
150 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
161 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
164 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
173 val = ktime_since(pmu->sleep_last); in get_rc6()
174 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6()
178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6()
180 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; in get_rc6()
182 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
187 static void init_rc6(struct i915_pmu *pmu) in init_rc6() argument
189 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in init_rc6()
193 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in init_rc6()
194 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = in init_rc6()
195 pmu->sample[__I915_SAMPLE_RC6].cur; in init_rc6()
196 pmu->sleep_last = ktime_get(); in init_rc6()
202 struct i915_pmu *pmu = &i915->pmu; in park_rc6() local
204 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in park_rc6()
205 pmu->sleep_last = ktime_get(); in park_rc6()
215 static void init_rc6(struct i915_pmu *pmu) { } in init_rc6() argument
220 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) in __i915_pmu_maybe_start_timer() argument
222 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { in __i915_pmu_maybe_start_timer()
223 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
224 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
225 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
233 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_parked() local
235 if (!pmu->base.event_init) in i915_pmu_gt_parked()
238 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
246 pmu->timer_enabled = pmu_needs_timer(pmu, false); in i915_pmu_gt_parked()
248 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
253 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_unparked() local
255 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
258 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
263 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_gt_unparked()
265 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
286 struct intel_engine_pmu *pmu = &engine->pmu; in engine_sample() local
295 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engine_sample()
297 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engine_sample()
316 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engine_sample()
327 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
355 static bool frequency_sampling_enabled(struct i915_pmu *pmu) in frequency_sampling_enabled() argument
357 return pmu->enable & in frequency_sampling_enabled()
367 struct i915_pmu *pmu = &i915->pmu; in frequency_sample() local
370 if (!frequency_sampling_enabled(pmu)) in frequency_sample()
377 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { in frequency_sample()
395 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], in frequency_sample()
399 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { in frequency_sample()
400 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], in frequency_sample()
411 container_of(hrtimer, struct drm_i915_private, pmu.timer); in i915_sample()
412 struct i915_pmu *pmu = &i915->pmu; in i915_sample() local
417 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
421 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
422 pmu->timer_last = now; in i915_sample()
457 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_destroy()
510 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
524 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
527 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
560 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
561 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read() local
581 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
587 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, in __i915_pmu_event_read()
592 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, in __i915_pmu_event_read()
625 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
627 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable() local
630 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
636 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
637 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
638 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
640 pmu->enable |= BIT_ULL(bit); in i915_pmu_enable()
641 pmu->enable_count[bit]++; in i915_pmu_enable()
646 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_enable()
660 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
662 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
664 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
665 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
666 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
668 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
669 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
672 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
685 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
687 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable() local
690 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
700 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
701 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
702 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
708 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
709 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
712 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
713 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
718 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
719 pmu->enable &= ~BIT_ULL(bit); in i915_pmu_disable()
720 pmu->timer_enabled &= pmu_needs_timer(pmu, true); in i915_pmu_disable()
723 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
860 create_event_attributes(struct i915_pmu *pmu) in create_event_attributes() argument
862 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in create_event_attributes()
975 pmu->i915_attr = i915_attr; in create_event_attributes()
976 pmu->pmu_attr = pmu_attr; in create_event_attributes()
992 static void free_event_attributes(struct i915_pmu *pmu) in free_event_attributes() argument
994 struct attribute **attr_iter = pmu->events_attr_group.attrs; in free_event_attributes()
999 kfree(pmu->events_attr_group.attrs); in free_event_attributes()
1000 kfree(pmu->i915_attr); in free_event_attributes()
1001 kfree(pmu->pmu_attr); in free_event_attributes()
1003 pmu->events_attr_group.attrs = NULL; in free_event_attributes()
1004 pmu->i915_attr = NULL; in free_event_attributes()
1005 pmu->pmu_attr = NULL; in free_event_attributes()
1010 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_online() local
1012 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1023 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_offline() local
1026 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1033 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1040 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_register_cpuhp_state() argument
1053 ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node); in i915_pmu_register_cpuhp_state()
1059 pmu->cpuhp.slot = slot; in i915_pmu_register_cpuhp_state()
1063 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_unregister_cpuhp_state() argument
1065 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in i915_pmu_unregister_cpuhp_state()
1067 drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID); in i915_pmu_unregister_cpuhp_state()
1068 drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node)); in i915_pmu_unregister_cpuhp_state()
1069 cpuhp_remove_multi_state(pmu->cpuhp.slot); in i915_pmu_unregister_cpuhp_state()
1070 pmu->cpuhp.slot = CPUHP_INVALID; in i915_pmu_unregister_cpuhp_state()
1086 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register() local
1089 &pmu->events_attr_group, in i915_pmu_register()
1097 drm_info(&i915->drm, "PMU not supported for this GPU."); in i915_pmu_register()
1101 spin_lock_init(&pmu->lock); in i915_pmu_register()
1102 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1103 pmu->timer.function = i915_sample; in i915_pmu_register()
1104 pmu->cpuhp.slot = CPUHP_INVALID; in i915_pmu_register()
1105 init_rc6(pmu); in i915_pmu_register()
1108 pmu->name = kasprintf(GFP_KERNEL, in i915_pmu_register()
1111 if (pmu->name) { in i915_pmu_register()
1113 strreplace((char *)pmu->name, ':', '_'); in i915_pmu_register()
1116 pmu->name = "i915"; in i915_pmu_register()
1118 if (!pmu->name) in i915_pmu_register()
1121 pmu->events_attr_group.name = "events"; in i915_pmu_register()
1122 pmu->events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1123 if (!pmu->events_attr_group.attrs) in i915_pmu_register()
1126 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in i915_pmu_register()
1128 if (!pmu->base.attr_groups) in i915_pmu_register()
1131 pmu->base.module = THIS_MODULE; in i915_pmu_register()
1132 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1133 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1134 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1135 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1136 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1137 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1138 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1139 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1141 ret = perf_pmu_register(&pmu->base, pmu->name, -1); in i915_pmu_register()
1145 ret = i915_pmu_register_cpuhp_state(pmu); in i915_pmu_register()
1152 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1154 kfree(pmu->base.attr_groups); in i915_pmu_register()
1156 pmu->base.event_init = NULL; in i915_pmu_register()
1157 free_event_attributes(pmu); in i915_pmu_register()
1160 kfree(pmu->name); in i915_pmu_register()
1162 drm_notice(&i915->drm, "Failed to register PMU!\n"); in i915_pmu_register()
1167 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister() local
1169 if (!pmu->base.event_init) in i915_pmu_unregister()
1172 drm_WARN_ON(&i915->drm, pmu->enable); in i915_pmu_unregister()
1174 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1176 i915_pmu_unregister_cpuhp_state(pmu); in i915_pmu_unregister()
1178 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1179 pmu->base.event_init = NULL; in i915_pmu_unregister()
1180 kfree(pmu->base.attr_groups); in i915_pmu_unregister()
1182 kfree(pmu->name); in i915_pmu_unregister()
1183 free_event_attributes(pmu); in i915_pmu_unregister()