Lines Matching refs:spe_pmu

121 static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)  in arm_spe_pmu_cap_get()  argument
124 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]); in arm_spe_pmu_cap_get()
128 return spe_pmu->counter_sz; in arm_spe_pmu_cap_get()
130 return spe_pmu->min_period; in arm_spe_pmu_cap_get()
142 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); in arm_spe_pmu_cap_show() local
148 arm_spe_pmu_cap_get(spe_pmu, cap)); in arm_spe_pmu_cap_show()
257 struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); in arm_spe_pmu_get_attr_cpumask() local
259 return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus); in arm_spe_pmu_get_attr_cpumask()
303 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_event_sanitise_period() local
308 if (period < spe_pmu->min_period) in arm_spe_event_sanitise_period()
309 period = spe_pmu->min_period; in arm_spe_event_sanitise_period()
378 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in arm_spe_pmu_next_snapshot_off() local
395 if (limit - head < spe_pmu->max_record_sz) { in arm_spe_pmu_next_snapshot_off()
406 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in __arm_spe_pmu_next_off() local
427 if (!IS_ALIGNED(head, spe_pmu->align)) { in __arm_spe_pmu_next_off()
428 unsigned long delta = roundup(head, spe_pmu->align) - head; in __arm_spe_pmu_next_off()
477 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in arm_spe_pmu_next_off() local
485 if (limit && (limit - head < spe_pmu->max_record_sz)) { in arm_spe_pmu_next_off()
681 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_event_init() local
688 !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_event_init()
709 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) in arm_spe_pmu_event_init()
713 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) in arm_spe_pmu_event_init()
717 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) in arm_spe_pmu_event_init()
733 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_start() local
735 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); in arm_spe_pmu_start()
766 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_stop() local
768 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); in arm_spe_pmu_stop()
808 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_add() local
812 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_add()
895 static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) in arm_spe_pmu_perf_init() argument
901 struct device *dev = &spe_pmu->pdev->dev; in arm_spe_pmu_perf_init()
903 spe_pmu->pmu = (struct pmu) { in arm_spe_pmu_perf_init()
935 return perf_pmu_register(&spe_pmu->pmu, name, -1); in arm_spe_pmu_perf_init()
938 static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu) in arm_spe_pmu_perf_destroy() argument
940 perf_pmu_unregister(&spe_pmu->pmu); in arm_spe_pmu_perf_destroy()
947 struct arm_spe_pmu *spe_pmu = info; in __arm_spe_pmu_dev_probe() local
948 struct device *dev = &spe_pmu->pdev->dev; in __arm_spe_pmu_dev_probe()
969 spe_pmu->align = 1 << fld; in __arm_spe_pmu_dev_probe()
970 if (spe_pmu->align > SZ_2K) { in __arm_spe_pmu_dev_probe()
979 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; in __arm_spe_pmu_dev_probe()
982 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; in __arm_spe_pmu_dev_probe()
985 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; in __arm_spe_pmu_dev_probe()
988 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; in __arm_spe_pmu_dev_probe()
991 spe_pmu->features |= SPE_PMU_FEAT_LDS; in __arm_spe_pmu_dev_probe()
994 spe_pmu->features |= SPE_PMU_FEAT_ERND; in __arm_spe_pmu_dev_probe()
1000 spe_pmu->min_period = 256; in __arm_spe_pmu_dev_probe()
1003 spe_pmu->min_period = 512; in __arm_spe_pmu_dev_probe()
1006 spe_pmu->min_period = 768; in __arm_spe_pmu_dev_probe()
1009 spe_pmu->min_period = 1024; in __arm_spe_pmu_dev_probe()
1012 spe_pmu->min_period = 1536; in __arm_spe_pmu_dev_probe()
1015 spe_pmu->min_period = 2048; in __arm_spe_pmu_dev_probe()
1018 spe_pmu->min_period = 3072; in __arm_spe_pmu_dev_probe()
1025 spe_pmu->min_period = 4096; in __arm_spe_pmu_dev_probe()
1030 spe_pmu->max_record_sz = 1 << fld; in __arm_spe_pmu_dev_probe()
1031 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { in __arm_spe_pmu_dev_probe()
1044 spe_pmu->counter_sz = 12; in __arm_spe_pmu_dev_probe()
1049 cpumask_pr_args(&spe_pmu->supported_cpus), in __arm_spe_pmu_dev_probe()
1050 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); in __arm_spe_pmu_dev_probe()
1052 spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; in __arm_spe_pmu_dev_probe()
1075 struct arm_spe_pmu *spe_pmu = info; in __arm_spe_pmu_setup_one() local
1078 enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE); in __arm_spe_pmu_setup_one()
1083 struct arm_spe_pmu *spe_pmu = info; in __arm_spe_pmu_stop_one() local
1085 disable_percpu_irq(spe_pmu->irq); in __arm_spe_pmu_stop_one()
1091 struct arm_spe_pmu *spe_pmu; in arm_spe_pmu_cpu_startup() local
1093 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); in arm_spe_pmu_cpu_startup()
1094 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_cpu_startup()
1097 __arm_spe_pmu_setup_one(spe_pmu); in arm_spe_pmu_cpu_startup()
1103 struct arm_spe_pmu *spe_pmu; in arm_spe_pmu_cpu_teardown() local
1105 spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); in arm_spe_pmu_cpu_teardown()
1106 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_cpu_teardown()
1109 __arm_spe_pmu_stop_one(spe_pmu); in arm_spe_pmu_cpu_teardown()
1113 static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu) in arm_spe_pmu_dev_init() argument
1116 cpumask_t *mask = &spe_pmu->supported_cpus; in arm_spe_pmu_dev_init()
1119 ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1); in arm_spe_pmu_dev_init()
1120 if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED)) in arm_spe_pmu_dev_init()
1124 ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME, in arm_spe_pmu_dev_init()
1125 spe_pmu->handle); in arm_spe_pmu_dev_init()
1135 &spe_pmu->hotplug_node); in arm_spe_pmu_dev_init()
1137 free_percpu_irq(spe_pmu->irq, spe_pmu->handle); in arm_spe_pmu_dev_init()
1142 static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu) in arm_spe_pmu_dev_teardown() argument
1144 cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node); in arm_spe_pmu_dev_teardown()
1145 free_percpu_irq(spe_pmu->irq, spe_pmu->handle); in arm_spe_pmu_dev_teardown()
1149 static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu) in arm_spe_pmu_irq_probe() argument
1151 struct platform_device *pdev = spe_pmu->pdev; in arm_spe_pmu_irq_probe()
1162 if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) { in arm_spe_pmu_irq_probe()
1167 spe_pmu->irq = irq; in arm_spe_pmu_irq_probe()
1186 struct arm_spe_pmu *spe_pmu; in arm_spe_pmu_device_probe() local
1198 spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL); in arm_spe_pmu_device_probe()
1199 if (!spe_pmu) { in arm_spe_pmu_device_probe()
1204 spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); in arm_spe_pmu_device_probe()
1205 if (!spe_pmu->handle) in arm_spe_pmu_device_probe()
1208 spe_pmu->pdev = pdev; in arm_spe_pmu_device_probe()
1209 platform_set_drvdata(pdev, spe_pmu); in arm_spe_pmu_device_probe()
1211 ret = arm_spe_pmu_irq_probe(spe_pmu); in arm_spe_pmu_device_probe()
1215 ret = arm_spe_pmu_dev_init(spe_pmu); in arm_spe_pmu_device_probe()
1219 ret = arm_spe_pmu_perf_init(spe_pmu); in arm_spe_pmu_device_probe()
1226 arm_spe_pmu_dev_teardown(spe_pmu); in arm_spe_pmu_device_probe()
1228 free_percpu(spe_pmu->handle); in arm_spe_pmu_device_probe()
1234 struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); in arm_spe_pmu_device_remove() local
1236 arm_spe_pmu_perf_destroy(spe_pmu); in arm_spe_pmu_device_remove()
1237 arm_spe_pmu_dev_teardown(spe_pmu); in arm_spe_pmu_device_remove()
1238 free_percpu(spe_pmu->handle); in arm_spe_pmu_device_remove()