1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * platform_device probing code for ARM performance counters.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
6*4882a593Smuzhiyun * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #define pr_fmt(fmt) "hw perfevents: " fmt
9*4882a593Smuzhiyun #define dev_fmt pr_fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/bug.h>
12*4882a593Smuzhiyun #include <linux/cpumask.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/irq.h>
16*4882a593Smuzhiyun #include <linux/irqdesc.h>
17*4882a593Smuzhiyun #include <linux/kconfig.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/of_device.h>
20*4882a593Smuzhiyun #include <linux/percpu.h>
21*4882a593Smuzhiyun #include <linux/perf/arm_pmu.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/printk.h>
24*4882a593Smuzhiyun #include <linux/smp.h>
25*4882a593Smuzhiyun
probe_current_pmu(struct arm_pmu * pmu,const struct pmu_probe_info * info)26*4882a593Smuzhiyun static int probe_current_pmu(struct arm_pmu *pmu,
27*4882a593Smuzhiyun const struct pmu_probe_info *info)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int cpu = get_cpu();
30*4882a593Smuzhiyun unsigned int cpuid = read_cpuid_id();
31*4882a593Smuzhiyun int ret = -ENODEV;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun pr_info("probing PMU on CPU %d\n", cpu);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun for (; info->init != NULL; info++) {
36*4882a593Smuzhiyun if ((cpuid & info->mask) != info->cpuid)
37*4882a593Smuzhiyun continue;
38*4882a593Smuzhiyun ret = info->init(pmu);
39*4882a593Smuzhiyun break;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun put_cpu();
43*4882a593Smuzhiyun return ret;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
pmu_parse_percpu_irq(struct arm_pmu * pmu,int irq)46*4882a593Smuzhiyun static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun int cpu, ret;
49*4882a593Smuzhiyun struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
52*4882a593Smuzhiyun if (ret)
53*4882a593Smuzhiyun return ret;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for_each_cpu(cpu, &pmu->supported_cpus)
56*4882a593Smuzhiyun per_cpu(hw_events->irq, cpu) = irq;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
pmu_has_irq_affinity(struct device_node * node)61*4882a593Smuzhiyun static bool pmu_has_irq_affinity(struct device_node *node)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun return !!of_find_property(node, "interrupt-affinity", NULL);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
pmu_parse_irq_affinity(struct device_node * node,int i)66*4882a593Smuzhiyun static int pmu_parse_irq_affinity(struct device_node *node, int i)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct device_node *dn;
69*4882a593Smuzhiyun int cpu;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * If we don't have an interrupt-affinity property, we guess irq
73*4882a593Smuzhiyun * affinity matches our logical CPU order, as we used to assume.
74*4882a593Smuzhiyun * This is fragile, so we'll warn in pmu_parse_irqs().
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun if (!pmu_has_irq_affinity(node))
77*4882a593Smuzhiyun return i;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun dn = of_parse_phandle(node, "interrupt-affinity", i);
80*4882a593Smuzhiyun if (!dn) {
81*4882a593Smuzhiyun pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
82*4882a593Smuzhiyun i, node);
83*4882a593Smuzhiyun return -EINVAL;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun cpu = of_cpu_node_to_id(dn);
87*4882a593Smuzhiyun if (cpu < 0) {
88*4882a593Smuzhiyun pr_warn("failed to find logical CPU for %pOFn\n", dn);
89*4882a593Smuzhiyun cpu = nr_cpu_ids;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun of_node_put(dn);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return cpu;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
pmu_parse_irqs(struct arm_pmu * pmu)97*4882a593Smuzhiyun static int pmu_parse_irqs(struct arm_pmu *pmu)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun int i = 0, num_irqs;
100*4882a593Smuzhiyun struct platform_device *pdev = pmu->plat_device;
101*4882a593Smuzhiyun struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun num_irqs = platform_irq_count(pdev);
104*4882a593Smuzhiyun if (num_irqs < 0)
105*4882a593Smuzhiyun return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * In this case we have no idea which CPUs are covered by the PMU.
109*4882a593Smuzhiyun * To match our prior behaviour, we assume all CPUs in this case.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun if (num_irqs == 0) {
112*4882a593Smuzhiyun pr_warn("no irqs for PMU, sampling events not supported\n");
113*4882a593Smuzhiyun pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
114*4882a593Smuzhiyun cpumask_setall(&pmu->supported_cpus);
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (num_irqs == 1) {
119*4882a593Smuzhiyun int irq = platform_get_irq(pdev, 0);
120*4882a593Smuzhiyun if ((irq > 0) && irq_is_percpu_devid(irq))
121*4882a593Smuzhiyun return pmu_parse_percpu_irq(pmu, irq);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
125*4882a593Smuzhiyun pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
126*4882a593Smuzhiyun pdev->dev.of_node);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun for (i = 0; i < num_irqs; i++) {
130*4882a593Smuzhiyun int cpu, irq;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun irq = platform_get_irq(pdev, i);
133*4882a593Smuzhiyun if (WARN_ON(irq <= 0))
134*4882a593Smuzhiyun continue;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (irq_is_percpu_devid(irq)) {
137*4882a593Smuzhiyun pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
138*4882a593Smuzhiyun return -EINVAL;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
142*4882a593Smuzhiyun if (cpu < 0)
143*4882a593Smuzhiyun return cpu;
144*4882a593Smuzhiyun if (cpu >= nr_cpu_ids)
145*4882a593Smuzhiyun continue;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (per_cpu(hw_events->irq, cpu)) {
148*4882a593Smuzhiyun pr_warn("multiple PMU IRQs for the same CPU detected\n");
149*4882a593Smuzhiyun return -EINVAL;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun per_cpu(hw_events->irq, cpu) = irq;
153*4882a593Smuzhiyun cpumask_set_cpu(cpu, &pmu->supported_cpus);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
armpmu_request_irqs(struct arm_pmu * armpmu)159*4882a593Smuzhiyun static int armpmu_request_irqs(struct arm_pmu *armpmu)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
162*4882a593Smuzhiyun int cpu, err = 0;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun for_each_cpu(cpu, &armpmu->supported_cpus) {
165*4882a593Smuzhiyun int irq = per_cpu(hw_events->irq, cpu);
166*4882a593Smuzhiyun if (!irq)
167*4882a593Smuzhiyun continue;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun err = armpmu_request_irq(irq, cpu);
170*4882a593Smuzhiyun if (err)
171*4882a593Smuzhiyun break;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return err;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
armpmu_free_irqs(struct arm_pmu * armpmu)177*4882a593Smuzhiyun static void armpmu_free_irqs(struct arm_pmu *armpmu)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int cpu;
180*4882a593Smuzhiyun struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun for_each_cpu(cpu, &armpmu->supported_cpus) {
183*4882a593Smuzhiyun int irq = per_cpu(hw_events->irq, cpu);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun armpmu_free_irq(irq, cpu);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
arm_pmu_device_probe(struct platform_device * pdev,const struct of_device_id * of_table,const struct pmu_probe_info * probe_table)189*4882a593Smuzhiyun int arm_pmu_device_probe(struct platform_device *pdev,
190*4882a593Smuzhiyun const struct of_device_id *of_table,
191*4882a593Smuzhiyun const struct pmu_probe_info *probe_table)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun const struct of_device_id *of_id;
194*4882a593Smuzhiyun armpmu_init_fn init_fn;
195*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
196*4882a593Smuzhiyun struct arm_pmu *pmu;
197*4882a593Smuzhiyun int ret = -ENODEV;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun pmu = armpmu_alloc();
200*4882a593Smuzhiyun if (!pmu)
201*4882a593Smuzhiyun return -ENOMEM;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun pmu->plat_device = pdev;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun ret = pmu_parse_irqs(pmu);
206*4882a593Smuzhiyun if (ret)
207*4882a593Smuzhiyun goto out_free;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
210*4882a593Smuzhiyun init_fn = of_id->data;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
213*4882a593Smuzhiyun "secure-reg-access");
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* arm64 systems boot only as non-secure */
216*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
217*4882a593Smuzhiyun pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
218*4882a593Smuzhiyun pmu->secure_access = false;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun ret = init_fn(pmu);
222*4882a593Smuzhiyun } else if (probe_table) {
223*4882a593Smuzhiyun cpumask_setall(&pmu->supported_cpus);
224*4882a593Smuzhiyun ret = probe_current_pmu(pmu, probe_table);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (ret) {
228*4882a593Smuzhiyun pr_info("%pOF: failed to probe PMU!\n", node);
229*4882a593Smuzhiyun goto out_free;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun ret = armpmu_request_irqs(pmu);
233*4882a593Smuzhiyun if (ret)
234*4882a593Smuzhiyun goto out_free_irqs;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun ret = armpmu_register(pmu);
237*4882a593Smuzhiyun if (ret)
238*4882a593Smuzhiyun goto out_free_irqs;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun out_free_irqs:
243*4882a593Smuzhiyun armpmu_free_irqs(pmu);
244*4882a593Smuzhiyun out_free:
245*4882a593Smuzhiyun pr_info("%pOF: failed to register PMU devices!\n", node);
246*4882a593Smuzhiyun armpmu_free(pmu);
247*4882a593Smuzhiyun return ret;
248*4882a593Smuzhiyun }
249