1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2013 Advanced Micro Devices, Inc.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Steven Kinney <Steven.Kinney@amd.com>
6*4882a593Smuzhiyun * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define pr_fmt(fmt) "perf/amd_iommu: " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/perf_event.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/cpumask.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "../perf_event.h"
19*4882a593Smuzhiyun #include "iommu.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* iommu pmu conf masks */
22*4882a593Smuzhiyun #define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
23*4882a593Smuzhiyun #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
24*4882a593Smuzhiyun #define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
25*4882a593Smuzhiyun #define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* iommu pmu conf1 masks */
28*4882a593Smuzhiyun #define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
29*4882a593Smuzhiyun #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
30*4882a593Smuzhiyun #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define IOMMU_NAME_SIZE 16
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct perf_amd_iommu {
35*4882a593Smuzhiyun struct list_head list;
36*4882a593Smuzhiyun struct pmu pmu;
37*4882a593Smuzhiyun struct amd_iommu *iommu;
38*4882a593Smuzhiyun char name[IOMMU_NAME_SIZE];
39*4882a593Smuzhiyun u8 max_banks;
40*4882a593Smuzhiyun u8 max_counters;
41*4882a593Smuzhiyun u64 cntr_assign_mask;
42*4882a593Smuzhiyun raw_spinlock_t lock;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static LIST_HEAD(perf_amd_iommu_list);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*---------------------------------------------
48*4882a593Smuzhiyun * sysfs format attributes
49*4882a593Smuzhiyun *---------------------------------------------*/
50*4882a593Smuzhiyun PMU_FORMAT_ATTR(csource, "config:0-7");
51*4882a593Smuzhiyun PMU_FORMAT_ATTR(devid, "config:8-23");
52*4882a593Smuzhiyun PMU_FORMAT_ATTR(domid, "config:24-39");
53*4882a593Smuzhiyun PMU_FORMAT_ATTR(pasid, "config:40-59");
54*4882a593Smuzhiyun PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
55*4882a593Smuzhiyun PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
56*4882a593Smuzhiyun PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static struct attribute *iommu_format_attrs[] = {
59*4882a593Smuzhiyun &format_attr_csource.attr,
60*4882a593Smuzhiyun &format_attr_devid.attr,
61*4882a593Smuzhiyun &format_attr_pasid.attr,
62*4882a593Smuzhiyun &format_attr_domid.attr,
63*4882a593Smuzhiyun &format_attr_devid_mask.attr,
64*4882a593Smuzhiyun &format_attr_pasid_mask.attr,
65*4882a593Smuzhiyun &format_attr_domid_mask.attr,
66*4882a593Smuzhiyun NULL,
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static struct attribute_group amd_iommu_format_group = {
70*4882a593Smuzhiyun .name = "format",
71*4882a593Smuzhiyun .attrs = iommu_format_attrs,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*---------------------------------------------
75*4882a593Smuzhiyun * sysfs events attributes
76*4882a593Smuzhiyun *---------------------------------------------*/
77*4882a593Smuzhiyun static struct attribute_group amd_iommu_events_group = {
78*4882a593Smuzhiyun .name = "events",
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct amd_iommu_event_desc {
82*4882a593Smuzhiyun struct device_attribute attr;
83*4882a593Smuzhiyun const char *event;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
_iommu_event_show(struct device * dev,struct device_attribute * attr,char * buf)86*4882a593Smuzhiyun static ssize_t _iommu_event_show(struct device *dev,
87*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct amd_iommu_event_desc *event =
90*4882a593Smuzhiyun container_of(attr, struct amd_iommu_event_desc, attr);
91*4882a593Smuzhiyun return sprintf(buf, "%s\n", event->event);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define AMD_IOMMU_EVENT_DESC(_name, _event) \
95*4882a593Smuzhiyun { \
96*4882a593Smuzhiyun .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \
97*4882a593Smuzhiyun .event = _event, \
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
101*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_pass_untrans, "csource=0x01"),
102*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_pass_pretrans, "csource=0x02"),
103*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_pass_excl, "csource=0x03"),
104*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_target_abort, "csource=0x04"),
105*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_trans_total, "csource=0x05"),
106*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit, "csource=0x06"),
107*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis, "csource=0x07"),
108*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit, "csource=0x08"),
109*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis, "csource=0x09"),
110*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_dte_hit, "csource=0x0a"),
111*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(mem_dte_mis, "csource=0x0b"),
112*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(page_tbl_read_tot, "csource=0x0c"),
113*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(page_tbl_read_nst, "csource=0x0d"),
114*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(page_tbl_read_gst, "csource=0x0e"),
115*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(int_dte_hit, "csource=0x0f"),
116*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(int_dte_mis, "csource=0x10"),
117*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(cmd_processed, "csource=0x11"),
118*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(cmd_processed_inv, "csource=0x12"),
119*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(tlb_inv, "csource=0x13"),
120*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h, "csource=0x14"),
121*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(vapic_int_non_guest, "csource=0x15"),
122*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(vapic_int_guest, "csource=0x16"),
123*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(smi_recv, "csource=0x17"),
124*4882a593Smuzhiyun AMD_IOMMU_EVENT_DESC(smi_blk, "csource=0x18"),
125*4882a593Smuzhiyun { /* end: all zeroes */ },
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*---------------------------------------------
129*4882a593Smuzhiyun * sysfs cpumask attributes
130*4882a593Smuzhiyun *---------------------------------------------*/
131*4882a593Smuzhiyun static cpumask_t iommu_cpumask;
132*4882a593Smuzhiyun
_iommu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)133*4882a593Smuzhiyun static ssize_t _iommu_cpumask_show(struct device *dev,
134*4882a593Smuzhiyun struct device_attribute *attr,
135*4882a593Smuzhiyun char *buf)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static struct attribute *iommu_cpumask_attrs[] = {
142*4882a593Smuzhiyun &dev_attr_cpumask.attr,
143*4882a593Smuzhiyun NULL,
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun static struct attribute_group amd_iommu_cpumask_group = {
147*4882a593Smuzhiyun .attrs = iommu_cpumask_attrs,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*---------------------------------------------*/
151*4882a593Smuzhiyun
get_next_avail_iommu_bnk_cntr(struct perf_event * event)152*4882a593Smuzhiyun static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
155*4882a593Smuzhiyun int max_cntrs = piommu->max_counters;
156*4882a593Smuzhiyun int max_banks = piommu->max_banks;
157*4882a593Smuzhiyun u32 shift, bank, cntr;
158*4882a593Smuzhiyun unsigned long flags;
159*4882a593Smuzhiyun int retval;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun raw_spin_lock_irqsave(&piommu->lock, flags);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun for (bank = 0, shift = 0; bank < max_banks; bank++) {
164*4882a593Smuzhiyun for (cntr = 0; cntr < max_cntrs; cntr++) {
165*4882a593Smuzhiyun shift = bank + (bank*3) + cntr;
166*4882a593Smuzhiyun if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
167*4882a593Smuzhiyun continue;
168*4882a593Smuzhiyun } else {
169*4882a593Smuzhiyun piommu->cntr_assign_mask |= BIT_ULL(shift);
170*4882a593Smuzhiyun event->hw.iommu_bank = bank;
171*4882a593Smuzhiyun event->hw.iommu_cntr = cntr;
172*4882a593Smuzhiyun retval = 0;
173*4882a593Smuzhiyun goto out;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun retval = -ENOSPC;
178*4882a593Smuzhiyun out:
179*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&piommu->lock, flags);
180*4882a593Smuzhiyun return retval;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
clear_avail_iommu_bnk_cntr(struct perf_amd_iommu * perf_iommu,u8 bank,u8 cntr)183*4882a593Smuzhiyun static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
184*4882a593Smuzhiyun u8 bank, u8 cntr)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun unsigned long flags;
187*4882a593Smuzhiyun int max_banks, max_cntrs;
188*4882a593Smuzhiyun int shift = 0;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun max_banks = perf_iommu->max_banks;
191*4882a593Smuzhiyun max_cntrs = perf_iommu->max_counters;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if ((bank > max_banks) || (cntr > max_cntrs))
194*4882a593Smuzhiyun return -EINVAL;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun shift = bank + cntr + (bank*3);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun raw_spin_lock_irqsave(&perf_iommu->lock, flags);
199*4882a593Smuzhiyun perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
200*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
perf_iommu_event_init(struct perf_event * event)205*4882a593Smuzhiyun static int perf_iommu_event_init(struct perf_event *event)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* test the event attr type check for PMU enumeration */
210*4882a593Smuzhiyun if (event->attr.type != event->pmu->type)
211*4882a593Smuzhiyun return -ENOENT;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * IOMMU counters are shared across all cores.
215*4882a593Smuzhiyun * Therefore, it does not support per-process mode.
216*4882a593Smuzhiyun * Also, it does not support event sampling mode.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
219*4882a593Smuzhiyun return -EINVAL;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (event->cpu < 0)
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* update the hw_perf_event struct with the iommu config data */
225*4882a593Smuzhiyun hwc->conf = event->attr.config;
226*4882a593Smuzhiyun hwc->conf1 = event->attr.config1;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
perf_event_2_iommu(struct perf_event * ev)231*4882a593Smuzhiyun static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
perf_iommu_enable_event(struct perf_event * ev)236*4882a593Smuzhiyun static void perf_iommu_enable_event(struct perf_event *ev)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct amd_iommu *iommu = perf_event_2_iommu(ev);
239*4882a593Smuzhiyun struct hw_perf_event *hwc = &ev->hw;
240*4882a593Smuzhiyun u8 bank = hwc->iommu_bank;
241*4882a593Smuzhiyun u8 cntr = hwc->iommu_cntr;
242*4882a593Smuzhiyun u64 reg = 0ULL;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun reg = GET_CSOURCE(hwc);
245*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun reg = GET_DEVID_MASK(hwc);
248*4882a593Smuzhiyun reg = GET_DEVID(hwc) | (reg << 32);
249*4882a593Smuzhiyun if (reg)
250*4882a593Smuzhiyun reg |= BIT(31);
251*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun reg = GET_PASID_MASK(hwc);
254*4882a593Smuzhiyun reg = GET_PASID(hwc) | (reg << 32);
255*4882a593Smuzhiyun if (reg)
256*4882a593Smuzhiyun reg |= BIT(31);
257*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun reg = GET_DOMID_MASK(hwc);
260*4882a593Smuzhiyun reg = GET_DOMID(hwc) | (reg << 32);
261*4882a593Smuzhiyun if (reg)
262*4882a593Smuzhiyun reg |= BIT(31);
263*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
perf_iommu_disable_event(struct perf_event * event)266*4882a593Smuzhiyun static void perf_iommu_disable_event(struct perf_event *event)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct amd_iommu *iommu = perf_event_2_iommu(event);
269*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
270*4882a593Smuzhiyun u64 reg = 0ULL;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
273*4882a593Smuzhiyun IOMMU_PC_COUNTER_SRC_REG, ®);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
perf_iommu_start(struct perf_event * event,int flags)276*4882a593Smuzhiyun static void perf_iommu_start(struct perf_event *event, int flags)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
281*4882a593Smuzhiyun return;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
284*4882a593Smuzhiyun hwc->state = 0;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * To account for power-gating, which prevents write to
288*4882a593Smuzhiyun * the counter, we need to enable the counter
289*4882a593Smuzhiyun * before setting up counter register.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun perf_iommu_enable_event(event);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (flags & PERF_EF_RELOAD) {
294*4882a593Smuzhiyun u64 count = 0;
295*4882a593Smuzhiyun struct amd_iommu *iommu = perf_event_2_iommu(event);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * Since the IOMMU PMU only support counting mode,
299*4882a593Smuzhiyun * the counter always start with value zero.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
302*4882a593Smuzhiyun IOMMU_PC_COUNTER_REG, &count);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun perf_event_update_userpage(event);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
perf_iommu_read(struct perf_event * event)308*4882a593Smuzhiyun static void perf_iommu_read(struct perf_event *event)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun u64 count;
311*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
312*4882a593Smuzhiyun struct amd_iommu *iommu = perf_event_2_iommu(event);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
315*4882a593Smuzhiyun IOMMU_PC_COUNTER_REG, &count))
316*4882a593Smuzhiyun return;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* IOMMU pc counter register is only 48 bits */
319*4882a593Smuzhiyun count &= GENMASK_ULL(47, 0);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * Since the counter always start with value zero,
323*4882a593Smuzhiyun * simply just accumulate the count for the event.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun local64_add(count, &event->count);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
perf_iommu_stop(struct perf_event * event,int flags)328*4882a593Smuzhiyun static void perf_iommu_stop(struct perf_event *event, int flags)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (hwc->state & PERF_HES_UPTODATE)
333*4882a593Smuzhiyun return;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * To account for power-gating, in which reading the counter would
337*4882a593Smuzhiyun * return zero, we need to read the register before disabling.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun perf_iommu_read(event);
340*4882a593Smuzhiyun hwc->state |= PERF_HES_UPTODATE;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun perf_iommu_disable_event(event);
343*4882a593Smuzhiyun WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
344*4882a593Smuzhiyun hwc->state |= PERF_HES_STOPPED;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
perf_iommu_add(struct perf_event * event,int flags)347*4882a593Smuzhiyun static int perf_iommu_add(struct perf_event *event, int flags)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun int retval;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* request an iommu bank/counter */
354*4882a593Smuzhiyun retval = get_next_avail_iommu_bnk_cntr(event);
355*4882a593Smuzhiyun if (retval)
356*4882a593Smuzhiyun return retval;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (flags & PERF_EF_START)
359*4882a593Smuzhiyun perf_iommu_start(event, PERF_EF_RELOAD);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
perf_iommu_del(struct perf_event * event,int flags)364*4882a593Smuzhiyun static void perf_iommu_del(struct perf_event *event, int flags)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
367*4882a593Smuzhiyun struct perf_amd_iommu *perf_iommu =
368*4882a593Smuzhiyun container_of(event->pmu, struct perf_amd_iommu, pmu);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun perf_iommu_stop(event, PERF_EF_UPDATE);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* clear the assigned iommu bank/counter */
373*4882a593Smuzhiyun clear_avail_iommu_bnk_cntr(perf_iommu,
374*4882a593Smuzhiyun hwc->iommu_bank, hwc->iommu_cntr);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun perf_event_update_userpage(event);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
_init_events_attrs(void)379*4882a593Smuzhiyun static __init int _init_events_attrs(void)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun int i = 0, j;
382*4882a593Smuzhiyun struct attribute **attrs;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun while (amd_iommu_v2_event_descs[i].attr.attr.name)
385*4882a593Smuzhiyun i++;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
388*4882a593Smuzhiyun if (!attrs)
389*4882a593Smuzhiyun return -ENOMEM;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun for (j = 0; j < i; j++)
392*4882a593Smuzhiyun attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun amd_iommu_events_group.attrs = attrs;
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun static const struct attribute_group *amd_iommu_attr_groups[] = {
399*4882a593Smuzhiyun &amd_iommu_format_group,
400*4882a593Smuzhiyun &amd_iommu_cpumask_group,
401*4882a593Smuzhiyun &amd_iommu_events_group,
402*4882a593Smuzhiyun NULL,
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun static const struct pmu iommu_pmu __initconst = {
406*4882a593Smuzhiyun .event_init = perf_iommu_event_init,
407*4882a593Smuzhiyun .add = perf_iommu_add,
408*4882a593Smuzhiyun .del = perf_iommu_del,
409*4882a593Smuzhiyun .start = perf_iommu_start,
410*4882a593Smuzhiyun .stop = perf_iommu_stop,
411*4882a593Smuzhiyun .read = perf_iommu_read,
412*4882a593Smuzhiyun .task_ctx_nr = perf_invalid_context,
413*4882a593Smuzhiyun .attr_groups = amd_iommu_attr_groups,
414*4882a593Smuzhiyun .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun
init_one_iommu(unsigned int idx)417*4882a593Smuzhiyun static __init int init_one_iommu(unsigned int idx)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct perf_amd_iommu *perf_iommu;
420*4882a593Smuzhiyun int ret;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
423*4882a593Smuzhiyun if (!perf_iommu)
424*4882a593Smuzhiyun return -ENOMEM;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun raw_spin_lock_init(&perf_iommu->lock);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun perf_iommu->pmu = iommu_pmu;
429*4882a593Smuzhiyun perf_iommu->iommu = get_amd_iommu(idx);
430*4882a593Smuzhiyun perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx);
431*4882a593Smuzhiyun perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (!perf_iommu->iommu ||
434*4882a593Smuzhiyun !perf_iommu->max_banks ||
435*4882a593Smuzhiyun !perf_iommu->max_counters) {
436*4882a593Smuzhiyun kfree(perf_iommu);
437*4882a593Smuzhiyun return -EINVAL;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
443*4882a593Smuzhiyun if (!ret) {
444*4882a593Smuzhiyun pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
445*4882a593Smuzhiyun idx, perf_iommu->max_banks, perf_iommu->max_counters);
446*4882a593Smuzhiyun list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
447*4882a593Smuzhiyun } else {
448*4882a593Smuzhiyun pr_warn("Error initializing IOMMU %d.\n", idx);
449*4882a593Smuzhiyun kfree(perf_iommu);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
amd_iommu_pc_init(void)454*4882a593Smuzhiyun static __init int amd_iommu_pc_init(void)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun unsigned int i, cnt = 0;
457*4882a593Smuzhiyun int ret;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Make sure the IOMMU PC resource is available */
460*4882a593Smuzhiyun if (!amd_iommu_pc_supported())
461*4882a593Smuzhiyun return -ENODEV;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ret = _init_events_attrs();
464*4882a593Smuzhiyun if (ret)
465*4882a593Smuzhiyun return ret;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * An IOMMU PMU is specific to an IOMMU, and can function independently.
469*4882a593Smuzhiyun * So we go through all IOMMUs and ignore the one that fails init
470*4882a593Smuzhiyun * unless all IOMMU are failing.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
473*4882a593Smuzhiyun ret = init_one_iommu(i);
474*4882a593Smuzhiyun if (!ret)
475*4882a593Smuzhiyun cnt++;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (!cnt) {
479*4882a593Smuzhiyun kfree(amd_iommu_events_group.attrs);
480*4882a593Smuzhiyun return -ENODEV;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Init cpumask attributes to only core 0 */
484*4882a593Smuzhiyun cpumask_set_cpu(0, &iommu_cpumask);
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun device_initcall(amd_iommu_pc_init);
489