xref: /OK3568_Linux_fs/kernel/arch/powerpc/perf/hv-gpci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Hypervisor supplied "gpci" ("get performance counter info") performance
4*4882a593Smuzhiyun  * counter support
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  * Copyright 2014 IBM Corporation.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) "hv-gpci: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/perf_event.h>
14*4882a593Smuzhiyun #include <asm/firmware.h>
15*4882a593Smuzhiyun #include <asm/hvcall.h>
16*4882a593Smuzhiyun #include <asm/io.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "hv-gpci.h"
19*4882a593Smuzhiyun #include "hv-common.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun  * Example usage:
23*4882a593Smuzhiyun  *  perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
24*4882a593Smuzhiyun  *		  secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* u32 */
28*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
29*4882a593Smuzhiyun /* u32 */
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * Note that starting_index, phys_processor_idx, sibling_part_id,
32*4882a593Smuzhiyun  * hw_chip_id, partition_id all refer to the same bit range. They
33*4882a593Smuzhiyun  * are basically aliases for the starting_index. The specific alias
34*4882a593Smuzhiyun  * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
37*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
38*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
39*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
40*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* u16 */
43*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
44*4882a593Smuzhiyun /* u8 */
45*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
46*4882a593Smuzhiyun /* u8, bytes of data (1-8) */
47*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
48*4882a593Smuzhiyun /* u32, byte offset */
49*4882a593Smuzhiyun EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static cpumask_t hv_gpci_cpumask;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static struct attribute *format_attrs[] = {
54*4882a593Smuzhiyun 	&format_attr_request.attr,
55*4882a593Smuzhiyun 	&format_attr_starting_index.attr,
56*4882a593Smuzhiyun 	&format_attr_phys_processor_idx.attr,
57*4882a593Smuzhiyun 	&format_attr_sibling_part_id.attr,
58*4882a593Smuzhiyun 	&format_attr_hw_chip_id.attr,
59*4882a593Smuzhiyun 	&format_attr_partition_id.attr,
60*4882a593Smuzhiyun 	&format_attr_secondary_index.attr,
61*4882a593Smuzhiyun 	&format_attr_counter_info_version.attr,
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	&format_attr_offset.attr,
64*4882a593Smuzhiyun 	&format_attr_length.attr,
65*4882a593Smuzhiyun 	NULL,
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static struct attribute_group format_group = {
69*4882a593Smuzhiyun 	.name = "format",
70*4882a593Smuzhiyun 	.attrs = format_attrs,
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static struct attribute_group event_group = {
74*4882a593Smuzhiyun 	.name  = "events",
75*4882a593Smuzhiyun 	.attrs = hv_gpci_event_attrs,
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define HV_CAPS_ATTR(_name, _format)				\
79*4882a593Smuzhiyun static ssize_t _name##_show(struct device *dev,			\
80*4882a593Smuzhiyun 			    struct device_attribute *attr,	\
81*4882a593Smuzhiyun 			    char *page)				\
82*4882a593Smuzhiyun {								\
83*4882a593Smuzhiyun 	struct hv_perf_caps caps;				\
84*4882a593Smuzhiyun 	unsigned long hret = hv_perf_caps_get(&caps);		\
85*4882a593Smuzhiyun 	if (hret)						\
86*4882a593Smuzhiyun 		return -EIO;					\
87*4882a593Smuzhiyun 								\
88*4882a593Smuzhiyun 	return sprintf(page, _format, caps._name);		\
89*4882a593Smuzhiyun }								\
90*4882a593Smuzhiyun static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
91*4882a593Smuzhiyun 
kernel_version_show(struct device * dev,struct device_attribute * attr,char * page)92*4882a593Smuzhiyun static ssize_t kernel_version_show(struct device *dev,
93*4882a593Smuzhiyun 				   struct device_attribute *attr,
94*4882a593Smuzhiyun 				   char *page)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)99*4882a593Smuzhiyun static ssize_t cpumask_show(struct device *dev,
100*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static DEVICE_ATTR_RO(kernel_version);
106*4882a593Smuzhiyun static DEVICE_ATTR_RO(cpumask);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun HV_CAPS_ATTR(version, "0x%x\n");
109*4882a593Smuzhiyun HV_CAPS_ATTR(ga, "%d\n");
110*4882a593Smuzhiyun HV_CAPS_ATTR(expanded, "%d\n");
111*4882a593Smuzhiyun HV_CAPS_ATTR(lab, "%d\n");
112*4882a593Smuzhiyun HV_CAPS_ATTR(collect_privileged, "%d\n");
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun static struct attribute *interface_attrs[] = {
115*4882a593Smuzhiyun 	&dev_attr_kernel_version.attr,
116*4882a593Smuzhiyun 	&hv_caps_attr_version.attr,
117*4882a593Smuzhiyun 	&hv_caps_attr_ga.attr,
118*4882a593Smuzhiyun 	&hv_caps_attr_expanded.attr,
119*4882a593Smuzhiyun 	&hv_caps_attr_lab.attr,
120*4882a593Smuzhiyun 	&hv_caps_attr_collect_privileged.attr,
121*4882a593Smuzhiyun 	NULL,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun static struct attribute *cpumask_attrs[] = {
125*4882a593Smuzhiyun 	&dev_attr_cpumask.attr,
126*4882a593Smuzhiyun 	NULL,
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun static struct attribute_group cpumask_attr_group = {
130*4882a593Smuzhiyun 	.attrs = cpumask_attrs,
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun static struct attribute_group interface_group = {
134*4882a593Smuzhiyun 	.name = "interface",
135*4882a593Smuzhiyun 	.attrs = interface_attrs,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun static const struct attribute_group *attr_groups[] = {
139*4882a593Smuzhiyun 	&format_group,
140*4882a593Smuzhiyun 	&event_group,
141*4882a593Smuzhiyun 	&interface_group,
142*4882a593Smuzhiyun 	&cpumask_attr_group,
143*4882a593Smuzhiyun 	NULL,
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
147*4882a593Smuzhiyun 
single_gpci_request(u32 req,u32 starting_index,u16 secondary_index,u8 version_in,u32 offset,u8 length,u64 * value)148*4882a593Smuzhiyun static unsigned long single_gpci_request(u32 req, u32 starting_index,
149*4882a593Smuzhiyun 		u16 secondary_index, u8 version_in, u32 offset, u8 length,
150*4882a593Smuzhiyun 		u64 *value)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	unsigned long ret;
153*4882a593Smuzhiyun 	size_t i;
154*4882a593Smuzhiyun 	u64 count;
155*4882a593Smuzhiyun 	struct hv_gpci_request_buffer *arg;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	arg = (void *)get_cpu_var(hv_gpci_reqb);
158*4882a593Smuzhiyun 	memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	arg->params.counter_request = cpu_to_be32(req);
161*4882a593Smuzhiyun 	arg->params.starting_index = cpu_to_be32(starting_index);
162*4882a593Smuzhiyun 	arg->params.secondary_index = cpu_to_be16(secondary_index);
163*4882a593Smuzhiyun 	arg->params.counter_info_version_in = version_in;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
166*4882a593Smuzhiyun 			virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
167*4882a593Smuzhiyun 	if (ret) {
168*4882a593Smuzhiyun 		pr_devel("hcall failed: 0x%lx\n", ret);
169*4882a593Smuzhiyun 		goto out;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * we verify offset and length are within the zeroed buffer at event
174*4882a593Smuzhiyun 	 * init.
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	count = 0;
177*4882a593Smuzhiyun 	for (i = offset; i < offset + length; i++)
178*4882a593Smuzhiyun 		count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	*value = count;
181*4882a593Smuzhiyun out:
182*4882a593Smuzhiyun 	put_cpu_var(hv_gpci_reqb);
183*4882a593Smuzhiyun 	return ret;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
h_gpci_get_value(struct perf_event * event)186*4882a593Smuzhiyun static u64 h_gpci_get_value(struct perf_event *event)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	u64 count;
189*4882a593Smuzhiyun 	unsigned long ret = single_gpci_request(event_get_request(event),
190*4882a593Smuzhiyun 					event_get_starting_index(event),
191*4882a593Smuzhiyun 					event_get_secondary_index(event),
192*4882a593Smuzhiyun 					event_get_counter_info_version(event),
193*4882a593Smuzhiyun 					event_get_offset(event),
194*4882a593Smuzhiyun 					event_get_length(event),
195*4882a593Smuzhiyun 					&count);
196*4882a593Smuzhiyun 	if (ret)
197*4882a593Smuzhiyun 		return 0;
198*4882a593Smuzhiyun 	return count;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
h_gpci_event_update(struct perf_event * event)201*4882a593Smuzhiyun static void h_gpci_event_update(struct perf_event *event)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	s64 prev;
204*4882a593Smuzhiyun 	u64 now = h_gpci_get_value(event);
205*4882a593Smuzhiyun 	prev = local64_xchg(&event->hw.prev_count, now);
206*4882a593Smuzhiyun 	local64_add(now - prev, &event->count);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
h_gpci_event_start(struct perf_event * event,int flags)209*4882a593Smuzhiyun static void h_gpci_event_start(struct perf_event *event, int flags)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	local64_set(&event->hw.prev_count, h_gpci_get_value(event));
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
h_gpci_event_stop(struct perf_event * event,int flags)214*4882a593Smuzhiyun static void h_gpci_event_stop(struct perf_event *event, int flags)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	h_gpci_event_update(event);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
h_gpci_event_add(struct perf_event * event,int flags)219*4882a593Smuzhiyun static int h_gpci_event_add(struct perf_event *event, int flags)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	if (flags & PERF_EF_START)
222*4882a593Smuzhiyun 		h_gpci_event_start(event, flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
h_gpci_event_init(struct perf_event * event)227*4882a593Smuzhiyun static int h_gpci_event_init(struct perf_event *event)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	u64 count;
230*4882a593Smuzhiyun 	u8 length;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Not our event */
233*4882a593Smuzhiyun 	if (event->attr.type != event->pmu->type)
234*4882a593Smuzhiyun 		return -ENOENT;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* config2 is unused */
237*4882a593Smuzhiyun 	if (event->attr.config2) {
238*4882a593Smuzhiyun 		pr_devel("config2 set when reserved\n");
239*4882a593Smuzhiyun 		return -EINVAL;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* no branch sampling */
243*4882a593Smuzhiyun 	if (has_branch_stack(event))
244*4882a593Smuzhiyun 		return -EOPNOTSUPP;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	length = event_get_length(event);
247*4882a593Smuzhiyun 	if (length < 1 || length > 8) {
248*4882a593Smuzhiyun 		pr_devel("length invalid\n");
249*4882a593Smuzhiyun 		return -EINVAL;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* last byte within the buffer? */
253*4882a593Smuzhiyun 	if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
254*4882a593Smuzhiyun 		pr_devel("request outside of buffer: %zu > %zu\n",
255*4882a593Smuzhiyun 				(size_t)event_get_offset(event) + length,
256*4882a593Smuzhiyun 				HGPCI_MAX_DATA_BYTES);
257*4882a593Smuzhiyun 		return -EINVAL;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* check if the request works... */
261*4882a593Smuzhiyun 	if (single_gpci_request(event_get_request(event),
262*4882a593Smuzhiyun 				event_get_starting_index(event),
263*4882a593Smuzhiyun 				event_get_secondary_index(event),
264*4882a593Smuzhiyun 				event_get_counter_info_version(event),
265*4882a593Smuzhiyun 				event_get_offset(event),
266*4882a593Smuzhiyun 				length,
267*4882a593Smuzhiyun 				&count)) {
268*4882a593Smuzhiyun 		pr_devel("gpci hcall failed\n");
269*4882a593Smuzhiyun 		return -EINVAL;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static struct pmu h_gpci_pmu = {
276*4882a593Smuzhiyun 	.task_ctx_nr = perf_invalid_context,
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	.name = "hv_gpci",
279*4882a593Smuzhiyun 	.attr_groups = attr_groups,
280*4882a593Smuzhiyun 	.event_init  = h_gpci_event_init,
281*4882a593Smuzhiyun 	.add         = h_gpci_event_add,
282*4882a593Smuzhiyun 	.del         = h_gpci_event_stop,
283*4882a593Smuzhiyun 	.start       = h_gpci_event_start,
284*4882a593Smuzhiyun 	.stop        = h_gpci_event_stop,
285*4882a593Smuzhiyun 	.read        = h_gpci_event_update,
286*4882a593Smuzhiyun 	.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
ppc_hv_gpci_cpu_online(unsigned int cpu)289*4882a593Smuzhiyun static int ppc_hv_gpci_cpu_online(unsigned int cpu)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	if (cpumask_empty(&hv_gpci_cpumask))
292*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, &hv_gpci_cpumask);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
ppc_hv_gpci_cpu_offline(unsigned int cpu)297*4882a593Smuzhiyun static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	int target;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* Check if exiting cpu is used for collecting gpci events */
302*4882a593Smuzhiyun 	if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
303*4882a593Smuzhiyun 		return 0;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* Find a new cpu to collect gpci events */
306*4882a593Smuzhiyun 	target = cpumask_last(cpu_active_mask);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (target < 0 || target >= nr_cpu_ids) {
309*4882a593Smuzhiyun 		pr_err("hv_gpci: CPU hotplug init failed\n");
310*4882a593Smuzhiyun 		return -1;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Migrate gpci events to the new target */
314*4882a593Smuzhiyun 	cpumask_set_cpu(target, &hv_gpci_cpumask);
315*4882a593Smuzhiyun 	perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
hv_gpci_cpu_hotplug_init(void)320*4882a593Smuzhiyun static int hv_gpci_cpu_hotplug_init(void)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
323*4882a593Smuzhiyun 			  "perf/powerpc/hv_gcpi:online",
324*4882a593Smuzhiyun 			  ppc_hv_gpci_cpu_online,
325*4882a593Smuzhiyun 			  ppc_hv_gpci_cpu_offline);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
hv_gpci_init(void)328*4882a593Smuzhiyun static int hv_gpci_init(void)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	int r;
331*4882a593Smuzhiyun 	unsigned long hret;
332*4882a593Smuzhiyun 	struct hv_perf_caps caps;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	hv_gpci_assert_offsets_correct();
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
337*4882a593Smuzhiyun 		pr_debug("not a virtualized system, not enabling\n");
338*4882a593Smuzhiyun 		return -ENODEV;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	hret = hv_perf_caps_get(&caps);
342*4882a593Smuzhiyun 	if (hret) {
343*4882a593Smuzhiyun 		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
344*4882a593Smuzhiyun 				hret);
345*4882a593Smuzhiyun 		return -ENODEV;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	/* init cpuhotplug */
349*4882a593Smuzhiyun 	r = hv_gpci_cpu_hotplug_init();
350*4882a593Smuzhiyun 	if (r)
351*4882a593Smuzhiyun 		return r;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* sampling not supported */
354*4882a593Smuzhiyun 	h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
357*4882a593Smuzhiyun 	if (r)
358*4882a593Smuzhiyun 		return r;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return 0;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun device_initcall(hv_gpci_init);
364