Lines Matching refs:l2cache_pmu

109 struct l2cache_pmu {  struct
138 struct l2cache_pmu *l2cache_pmu; argument
150 #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
164 struct l2cache_pmu *l2cache_pmu, int cpu) in get_cluster_pmu() argument
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); in get_cluster_pmu()
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
443 struct l2cache_pmu *l2cache_pmu; in l2_cache_event_init() local
448 l2cache_pmu = to_l2cache_pmu(event->pmu); in l2_cache_event_init()
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
639 struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); in l2_cache_pmu_cpumask_show() local
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); in l2_cache_pmu_cpumask_show()
738 struct l2cache_pmu *l2cache_pmu, int cpu) in l2_cache_associate_cpu_with_cluster() argument
755 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
759 dev_info(&l2cache_pmu->pdev->dev, in l2_cache_associate_cpu_with_cluster()
763 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
773 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_online_cpu() local
775 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_online_cpu()
776 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
779 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
796 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_online_cpu()
808 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_offline_cpu() local
812 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_offline_cpu()
813 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
822 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
834 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); in l2cache_pmu_offline_cpu()
836 cpumask_set_cpu(target, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
846 struct l2cache_pmu *l2cache_pmu = data; in l2_cache_pmu_probe_cluster() local
866 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
875 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
892 l2cache_pmu->num_pmus++; in l2_cache_pmu_probe_cluster()
900 struct l2cache_pmu *l2cache_pmu; in l2_cache_pmu_probe() local
902 l2cache_pmu = in l2_cache_pmu_probe()
903 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); in l2_cache_pmu_probe()
904 if (!l2cache_pmu) in l2_cache_pmu_probe()
907 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
909 platform_set_drvdata(pdev, l2cache_pmu); in l2_cache_pmu_probe()
910 l2cache_pmu->pmu = (struct pmu) { in l2_cache_pmu_probe()
926 l2cache_pmu->num_counters = get_num_counters(); in l2_cache_pmu_probe()
927 l2cache_pmu->pdev = pdev; in l2_cache_pmu_probe()
928 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, in l2_cache_pmu_probe()
930 if (!l2cache_pmu->pmu_cluster) in l2_cache_pmu_probe()
933 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; in l2_cache_pmu_probe()
934 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | in l2_cache_pmu_probe()
937 cpumask_clear(&l2cache_pmu->cpumask); in l2_cache_pmu_probe()
940 err = device_for_each_child(&pdev->dev, l2cache_pmu, in l2_cache_pmu_probe()
945 if (l2cache_pmu->num_pmus == 0) { in l2_cache_pmu_probe()
951 &l2cache_pmu->node); in l2_cache_pmu_probe()
957 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); in l2_cache_pmu_probe()
964 l2cache_pmu->num_pmus); in l2_cache_pmu_probe()
970 &l2cache_pmu->node); in l2_cache_pmu_probe()
976 struct l2cache_pmu *l2cache_pmu = in l2_cache_pmu_remove() local
979 perf_pmu_unregister(&l2cache_pmu->pmu); in l2_cache_pmu_remove()
981 &l2cache_pmu->node); in l2_cache_pmu_remove()