xref: /OK3568_Linux_fs/kernel/include/linux/perf_event.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Performance events:
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5*4882a593Smuzhiyun  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6*4882a593Smuzhiyun  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Data type definitions, declarations, prototypes.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *    Started by: Thomas Gleixner and Ingo Molnar
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * For licencing details see kernel-base/COPYING
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun #ifndef _LINUX_PERF_EVENT_H
15*4882a593Smuzhiyun #define _LINUX_PERF_EVENT_H
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <uapi/linux/perf_event.h>
18*4882a593Smuzhiyun #include <uapi/linux/bpf_perf_event.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Kernel-internal data types and definitions:
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
25*4882a593Smuzhiyun # include <asm/perf_event.h>
26*4882a593Smuzhiyun # include <asm/local64.h>
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct perf_guest_info_callbacks {
30*4882a593Smuzhiyun 	int				(*is_in_guest)(void);
31*4882a593Smuzhiyun 	int				(*is_user_mode)(void);
32*4882a593Smuzhiyun 	unsigned long			(*get_guest_ip)(void);
33*4882a593Smuzhiyun 	void				(*handle_intel_pt_intr)(void);
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #ifdef CONFIG_HAVE_HW_BREAKPOINT
37*4882a593Smuzhiyun #include <asm/hw_breakpoint.h>
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <linux/list.h>
41*4882a593Smuzhiyun #include <linux/mutex.h>
42*4882a593Smuzhiyun #include <linux/rculist.h>
43*4882a593Smuzhiyun #include <linux/rcupdate.h>
44*4882a593Smuzhiyun #include <linux/spinlock.h>
45*4882a593Smuzhiyun #include <linux/hrtimer.h>
46*4882a593Smuzhiyun #include <linux/fs.h>
47*4882a593Smuzhiyun #include <linux/pid_namespace.h>
48*4882a593Smuzhiyun #include <linux/workqueue.h>
49*4882a593Smuzhiyun #include <linux/ftrace.h>
50*4882a593Smuzhiyun #include <linux/cpu.h>
51*4882a593Smuzhiyun #include <linux/irq_work.h>
52*4882a593Smuzhiyun #include <linux/static_key.h>
53*4882a593Smuzhiyun #include <linux/jump_label_ratelimit.h>
54*4882a593Smuzhiyun #include <linux/atomic.h>
55*4882a593Smuzhiyun #include <linux/sysfs.h>
56*4882a593Smuzhiyun #include <linux/perf_regs.h>
57*4882a593Smuzhiyun #include <linux/cgroup.h>
58*4882a593Smuzhiyun #include <linux/refcount.h>
59*4882a593Smuzhiyun #include <linux/security.h>
60*4882a593Smuzhiyun #include <asm/local.h>
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct perf_callchain_entry {
63*4882a593Smuzhiyun 	__u64				nr;
64*4882a593Smuzhiyun 	__u64				ip[]; /* /proc/sys/kernel/perf_event_max_stack */
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun struct perf_callchain_entry_ctx {
68*4882a593Smuzhiyun 	struct perf_callchain_entry *entry;
69*4882a593Smuzhiyun 	u32			    max_stack;
70*4882a593Smuzhiyun 	u32			    nr;
71*4882a593Smuzhiyun 	short			    contexts;
72*4882a593Smuzhiyun 	bool			    contexts_maxed;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
76*4882a593Smuzhiyun 				     unsigned long off, unsigned long len);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun struct perf_raw_frag {
79*4882a593Smuzhiyun 	union {
80*4882a593Smuzhiyun 		struct perf_raw_frag	*next;
81*4882a593Smuzhiyun 		unsigned long		pad;
82*4882a593Smuzhiyun 	};
83*4882a593Smuzhiyun 	perf_copy_f			copy;
84*4882a593Smuzhiyun 	void				*data;
85*4882a593Smuzhiyun 	u32				size;
86*4882a593Smuzhiyun } __packed;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun struct perf_raw_record {
89*4882a593Smuzhiyun 	struct perf_raw_frag		frag;
90*4882a593Smuzhiyun 	u32				size;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * branch stack layout:
95*4882a593Smuzhiyun  *  nr: number of taken branches stored in entries[]
96*4882a593Smuzhiyun  *  hw_idx: The low level index of raw branch records
97*4882a593Smuzhiyun  *          for the most recent branch.
98*4882a593Smuzhiyun  *          -1ULL means invalid/unknown.
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * Note that nr can vary from sample to sample
101*4882a593Smuzhiyun  * branches (to, from) are stored from most recent
102*4882a593Smuzhiyun  * to least recent, i.e., entries[0] contains the most
103*4882a593Smuzhiyun  * recent branch.
104*4882a593Smuzhiyun  * The entries[] is an abstraction of raw branch records,
105*4882a593Smuzhiyun  * which may not be stored in age order in HW, e.g. Intel LBR.
106*4882a593Smuzhiyun  * The hw_idx is to expose the low level index of raw
107*4882a593Smuzhiyun  * branch record for the most recent branch aka entries[0].
108*4882a593Smuzhiyun  * The hw_idx index is between -1 (unknown) and max depth,
109*4882a593Smuzhiyun  * which can be retrieved in /sys/devices/cpu/caps/branches.
110*4882a593Smuzhiyun  * For the architectures whose raw branch records are
111*4882a593Smuzhiyun  * already stored in age order, the hw_idx should be 0.
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun struct perf_branch_stack {
114*4882a593Smuzhiyun 	__u64				nr;
115*4882a593Smuzhiyun 	__u64				hw_idx;
116*4882a593Smuzhiyun 	struct perf_branch_entry	entries[];
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun struct task_struct;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun  * extra PMU register associated with an event
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun struct hw_perf_event_extra {
125*4882a593Smuzhiyun 	u64		config;	/* register value */
126*4882a593Smuzhiyun 	unsigned int	reg;	/* register address or index */
127*4882a593Smuzhiyun 	int		alloc;	/* extra register already allocated */
128*4882a593Smuzhiyun 	int		idx;	/* index in shared_regs->regs[] */
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun  * struct hw_perf_event - performance event hardware details:
133*4882a593Smuzhiyun  */
134*4882a593Smuzhiyun struct hw_perf_event {
135*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
136*4882a593Smuzhiyun 	union {
137*4882a593Smuzhiyun 		struct { /* hardware */
138*4882a593Smuzhiyun 			u64		config;
139*4882a593Smuzhiyun 			u64		last_tag;
140*4882a593Smuzhiyun 			unsigned long	config_base;
141*4882a593Smuzhiyun 			unsigned long	event_base;
142*4882a593Smuzhiyun 			int		event_base_rdpmc;
143*4882a593Smuzhiyun 			int		idx;
144*4882a593Smuzhiyun 			int		last_cpu;
145*4882a593Smuzhiyun 			int		flags;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 			struct hw_perf_event_extra extra_reg;
148*4882a593Smuzhiyun 			struct hw_perf_event_extra branch_reg;
149*4882a593Smuzhiyun 		};
150*4882a593Smuzhiyun 		struct { /* software */
151*4882a593Smuzhiyun 			struct hrtimer	hrtimer;
152*4882a593Smuzhiyun 		};
153*4882a593Smuzhiyun 		struct { /* tracepoint */
154*4882a593Smuzhiyun 			/* for tp_event->class */
155*4882a593Smuzhiyun 			struct list_head	tp_list;
156*4882a593Smuzhiyun 		};
157*4882a593Smuzhiyun 		struct { /* amd_power */
158*4882a593Smuzhiyun 			u64	pwr_acc;
159*4882a593Smuzhiyun 			u64	ptsc;
160*4882a593Smuzhiyun 		};
161*4882a593Smuzhiyun #ifdef CONFIG_HAVE_HW_BREAKPOINT
162*4882a593Smuzhiyun 		struct { /* breakpoint */
163*4882a593Smuzhiyun 			/*
164*4882a593Smuzhiyun 			 * Crufty hack to avoid the chicken and egg
165*4882a593Smuzhiyun 			 * problem hw_breakpoint has with context
166*4882a593Smuzhiyun 			 * creation and event initalization.
167*4882a593Smuzhiyun 			 */
168*4882a593Smuzhiyun 			struct arch_hw_breakpoint	info;
169*4882a593Smuzhiyun 			struct list_head		bp_list;
170*4882a593Smuzhiyun 		};
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun 		struct { /* amd_iommu */
173*4882a593Smuzhiyun 			u8	iommu_bank;
174*4882a593Smuzhiyun 			u8	iommu_cntr;
175*4882a593Smuzhiyun 			u16	padding;
176*4882a593Smuzhiyun 			u64	conf;
177*4882a593Smuzhiyun 			u64	conf1;
178*4882a593Smuzhiyun 		};
179*4882a593Smuzhiyun 	};
180*4882a593Smuzhiyun 	/*
181*4882a593Smuzhiyun 	 * If the event is a per task event, this will point to the task in
182*4882a593Smuzhiyun 	 * question. See the comment in perf_event_alloc().
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	struct task_struct		*target;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/*
187*4882a593Smuzhiyun 	 * PMU would store hardware filter configuration
188*4882a593Smuzhiyun 	 * here.
189*4882a593Smuzhiyun 	 */
190*4882a593Smuzhiyun 	void				*addr_filters;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Last sync'ed generation of filters */
193*4882a593Smuzhiyun 	unsigned long			addr_filters_gen;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * hw_perf_event::state flags; used to track the PERF_EF_* state.
197*4882a593Smuzhiyun  */
198*4882a593Smuzhiyun #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
199*4882a593Smuzhiyun #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
200*4882a593Smuzhiyun #define PERF_HES_ARCH		0x04
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	int				state;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * The last observed hardware counter value, updated with a
206*4882a593Smuzhiyun 	 * local64_cmpxchg() such that pmu::read() can be called nested.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	local64_t			prev_count;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * The period to start the next sample with.
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	u64				sample_period;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	union {
216*4882a593Smuzhiyun 		struct { /* Sampling */
217*4882a593Smuzhiyun 			/*
218*4882a593Smuzhiyun 			 * The period we started this sample with.
219*4882a593Smuzhiyun 			 */
220*4882a593Smuzhiyun 			u64				last_period;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 			/*
223*4882a593Smuzhiyun 			 * However much is left of the current period;
224*4882a593Smuzhiyun 			 * note that this is a full 64bit value and
225*4882a593Smuzhiyun 			 * allows for generation of periods longer
226*4882a593Smuzhiyun 			 * than hardware might allow.
227*4882a593Smuzhiyun 			 */
228*4882a593Smuzhiyun 			local64_t			period_left;
229*4882a593Smuzhiyun 		};
230*4882a593Smuzhiyun 		struct { /* Topdown events counting for context switch */
231*4882a593Smuzhiyun 			u64				saved_metric;
232*4882a593Smuzhiyun 			u64				saved_slots;
233*4882a593Smuzhiyun 		};
234*4882a593Smuzhiyun 	};
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/*
237*4882a593Smuzhiyun 	 * State for throttling the event, see __perf_event_overflow() and
238*4882a593Smuzhiyun 	 * perf_adjust_freq_unthr_context().
239*4882a593Smuzhiyun 	 */
240*4882a593Smuzhiyun 	u64                             interrupts_seq;
241*4882a593Smuzhiyun 	u64				interrupts;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/*
244*4882a593Smuzhiyun 	 * State for freq target events, see __perf_event_overflow() and
245*4882a593Smuzhiyun 	 * perf_adjust_freq_unthr_context().
246*4882a593Smuzhiyun 	 */
247*4882a593Smuzhiyun 	u64				freq_time_stamp;
248*4882a593Smuzhiyun 	u64				freq_count_stamp;
249*4882a593Smuzhiyun #endif
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun struct perf_event;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun  * Common implementation detail of pmu::{start,commit,cancel}_txn
256*4882a593Smuzhiyun  */
257*4882a593Smuzhiyun #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
258*4882a593Smuzhiyun #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun  * pmu::capabilities flags
262*4882a593Smuzhiyun  */
263*4882a593Smuzhiyun #define PERF_PMU_CAP_NO_INTERRUPT		0x01
264*4882a593Smuzhiyun #define PERF_PMU_CAP_NO_NMI			0x02
265*4882a593Smuzhiyun #define PERF_PMU_CAP_AUX_NO_SG			0x04
266*4882a593Smuzhiyun #define PERF_PMU_CAP_EXTENDED_REGS		0x08
267*4882a593Smuzhiyun #define PERF_PMU_CAP_EXCLUSIVE			0x10
268*4882a593Smuzhiyun #define PERF_PMU_CAP_ITRACE			0x20
269*4882a593Smuzhiyun #define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
270*4882a593Smuzhiyun #define PERF_PMU_CAP_NO_EXCLUDE			0x80
271*4882a593Smuzhiyun #define PERF_PMU_CAP_AUX_OUTPUT			0x100
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun struct perf_output_handle;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * struct pmu - generic performance monitoring unit
277*4882a593Smuzhiyun  */
278*4882a593Smuzhiyun struct pmu {
279*4882a593Smuzhiyun 	struct list_head		entry;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	struct module			*module;
282*4882a593Smuzhiyun 	struct device			*dev;
283*4882a593Smuzhiyun 	const struct attribute_group	**attr_groups;
284*4882a593Smuzhiyun 	const struct attribute_group	**attr_update;
285*4882a593Smuzhiyun 	const char			*name;
286*4882a593Smuzhiyun 	int				type;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/*
289*4882a593Smuzhiyun 	 * various common per-pmu feature flags
290*4882a593Smuzhiyun 	 */
291*4882a593Smuzhiyun 	int				capabilities;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	int __percpu			*pmu_disable_count;
294*4882a593Smuzhiyun 	struct perf_cpu_context __percpu *pmu_cpu_context;
295*4882a593Smuzhiyun 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
296*4882a593Smuzhiyun 	int				task_ctx_nr;
297*4882a593Smuzhiyun 	int				hrtimer_interval_ms;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* number of address filters this PMU can do */
300*4882a593Smuzhiyun 	unsigned int			nr_addr_filters;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * Fully disable/enable this PMU, can be used to protect from the PMI
304*4882a593Smuzhiyun 	 * as well as for lazy/batch writing of the MSRs.
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
307*4882a593Smuzhiyun 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/*
310*4882a593Smuzhiyun 	 * Try and initialize the event for this PMU.
311*4882a593Smuzhiyun 	 *
312*4882a593Smuzhiyun 	 * Returns:
313*4882a593Smuzhiyun 	 *  -ENOENT	-- @event is not for this PMU
314*4882a593Smuzhiyun 	 *
315*4882a593Smuzhiyun 	 *  -ENODEV	-- @event is for this PMU but PMU not present
316*4882a593Smuzhiyun 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
317*4882a593Smuzhiyun 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
318*4882a593Smuzhiyun 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
319*4882a593Smuzhiyun 	 *  -EACCES	-- @event is for this PMU, @event is valid, but no privileges
320*4882a593Smuzhiyun 	 *
321*4882a593Smuzhiyun 	 *  0		-- @event is for this PMU and valid
322*4882a593Smuzhiyun 	 *
323*4882a593Smuzhiyun 	 * Other error return values are allowed.
324*4882a593Smuzhiyun 	 */
325*4882a593Smuzhiyun 	int (*event_init)		(struct perf_event *event);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/*
328*4882a593Smuzhiyun 	 * Notification that the event was mapped or unmapped.  Called
329*4882a593Smuzhiyun 	 * in the context of the mapping task.
330*4882a593Smuzhiyun 	 */
331*4882a593Smuzhiyun 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
332*4882a593Smuzhiyun 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/*
335*4882a593Smuzhiyun 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
336*4882a593Smuzhiyun 	 * matching hw_perf_event::state flags.
337*4882a593Smuzhiyun 	 */
338*4882a593Smuzhiyun #define PERF_EF_START	0x01		/* start the counter when adding    */
339*4882a593Smuzhiyun #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
340*4882a593Smuzhiyun #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/*
343*4882a593Smuzhiyun 	 * Adds/Removes a counter to/from the PMU, can be done inside a
344*4882a593Smuzhiyun 	 * transaction, see the ->*_txn() methods.
345*4882a593Smuzhiyun 	 *
346*4882a593Smuzhiyun 	 * The add/del callbacks will reserve all hardware resources required
347*4882a593Smuzhiyun 	 * to service the event, this includes any counter constraint
348*4882a593Smuzhiyun 	 * scheduling etc.
349*4882a593Smuzhiyun 	 *
350*4882a593Smuzhiyun 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
351*4882a593Smuzhiyun 	 * is on.
352*4882a593Smuzhiyun 	 *
353*4882a593Smuzhiyun 	 * ->add() called without PERF_EF_START should result in the same state
354*4882a593Smuzhiyun 	 *  as ->add() followed by ->stop().
355*4882a593Smuzhiyun 	 *
356*4882a593Smuzhiyun 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
357*4882a593Smuzhiyun 	 *  ->stop() that must deal with already being stopped without
358*4882a593Smuzhiyun 	 *  PERF_EF_UPDATE.
359*4882a593Smuzhiyun 	 */
360*4882a593Smuzhiyun 	int  (*add)			(struct perf_event *event, int flags);
361*4882a593Smuzhiyun 	void (*del)			(struct perf_event *event, int flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/*
364*4882a593Smuzhiyun 	 * Starts/Stops a counter present on the PMU.
365*4882a593Smuzhiyun 	 *
366*4882a593Smuzhiyun 	 * The PMI handler should stop the counter when perf_event_overflow()
367*4882a593Smuzhiyun 	 * returns !0. ->start() will be used to continue.
368*4882a593Smuzhiyun 	 *
369*4882a593Smuzhiyun 	 * Also used to change the sample period.
370*4882a593Smuzhiyun 	 *
371*4882a593Smuzhiyun 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
372*4882a593Smuzhiyun 	 * is on -- will be called from NMI context with the PMU generates
373*4882a593Smuzhiyun 	 * NMIs.
374*4882a593Smuzhiyun 	 *
375*4882a593Smuzhiyun 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
376*4882a593Smuzhiyun 	 *  period/count values like ->read() would.
377*4882a593Smuzhiyun 	 *
378*4882a593Smuzhiyun 	 * ->start() with PERF_EF_RELOAD will reprogram the counter
379*4882a593Smuzhiyun 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
380*4882a593Smuzhiyun 	 */
381*4882a593Smuzhiyun 	void (*start)			(struct perf_event *event, int flags);
382*4882a593Smuzhiyun 	void (*stop)			(struct perf_event *event, int flags);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/*
385*4882a593Smuzhiyun 	 * Updates the counter value of the event.
386*4882a593Smuzhiyun 	 *
387*4882a593Smuzhiyun 	 * For sampling capable PMUs this will also update the software period
388*4882a593Smuzhiyun 	 * hw_perf_event::period_left field.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	void (*read)			(struct perf_event *event);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/*
393*4882a593Smuzhiyun 	 * Group events scheduling is treated as a transaction, add
394*4882a593Smuzhiyun 	 * group events as a whole and perform one schedulability test.
395*4882a593Smuzhiyun 	 * If the test fails, roll back the whole group
396*4882a593Smuzhiyun 	 *
397*4882a593Smuzhiyun 	 * Start the transaction, after this ->add() doesn't need to
398*4882a593Smuzhiyun 	 * do schedulability tests.
399*4882a593Smuzhiyun 	 *
400*4882a593Smuzhiyun 	 * Optional.
401*4882a593Smuzhiyun 	 */
402*4882a593Smuzhiyun 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
403*4882a593Smuzhiyun 	/*
404*4882a593Smuzhiyun 	 * If ->start_txn() disabled the ->add() schedulability test
405*4882a593Smuzhiyun 	 * then ->commit_txn() is required to perform one. On success
406*4882a593Smuzhiyun 	 * the transaction is closed. On error the transaction is kept
407*4882a593Smuzhiyun 	 * open until ->cancel_txn() is called.
408*4882a593Smuzhiyun 	 *
409*4882a593Smuzhiyun 	 * Optional.
410*4882a593Smuzhiyun 	 */
411*4882a593Smuzhiyun 	int  (*commit_txn)		(struct pmu *pmu);
412*4882a593Smuzhiyun 	/*
413*4882a593Smuzhiyun 	 * Will cancel the transaction, assumes ->del() is called
414*4882a593Smuzhiyun 	 * for each successful ->add() during the transaction.
415*4882a593Smuzhiyun 	 *
416*4882a593Smuzhiyun 	 * Optional.
417*4882a593Smuzhiyun 	 */
418*4882a593Smuzhiyun 	void (*cancel_txn)		(struct pmu *pmu);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/*
421*4882a593Smuzhiyun 	 * Will return the value for perf_event_mmap_page::index for this event,
422*4882a593Smuzhiyun 	 * if no implementation is provided it will default to: event->hw.idx + 1.
423*4882a593Smuzhiyun 	 */
424*4882a593Smuzhiyun 	int (*event_idx)		(struct perf_event *event); /*optional */
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/*
427*4882a593Smuzhiyun 	 * context-switches callback
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	void (*sched_task)		(struct perf_event_context *ctx,
430*4882a593Smuzhiyun 					bool sched_in);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/*
433*4882a593Smuzhiyun 	 * Kmem cache of PMU specific data
434*4882a593Smuzhiyun 	 */
435*4882a593Smuzhiyun 	struct kmem_cache		*task_ctx_cache;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/*
438*4882a593Smuzhiyun 	 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
439*4882a593Smuzhiyun 	 * can be synchronized using this function. See Intel LBR callstack support
440*4882a593Smuzhiyun 	 * implementation and Perf core context switch handling callbacks for usage
441*4882a593Smuzhiyun 	 * examples.
442*4882a593Smuzhiyun 	 */
443*4882a593Smuzhiyun 	void (*swap_task_ctx)		(struct perf_event_context *prev,
444*4882a593Smuzhiyun 					 struct perf_event_context *next);
445*4882a593Smuzhiyun 					/* optional */
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/*
448*4882a593Smuzhiyun 	 * Set up pmu-private data structures for an AUX area
449*4882a593Smuzhiyun 	 */
450*4882a593Smuzhiyun 	void *(*setup_aux)		(struct perf_event *event, void **pages,
451*4882a593Smuzhiyun 					 int nr_pages, bool overwrite);
452*4882a593Smuzhiyun 					/* optional */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/*
455*4882a593Smuzhiyun 	 * Free pmu-private AUX data structures
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 	void (*free_aux)		(void *aux); /* optional */
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/*
460*4882a593Smuzhiyun 	 * Take a snapshot of the AUX buffer without touching the event
461*4882a593Smuzhiyun 	 * state, so that preempting ->start()/->stop() callbacks does
462*4882a593Smuzhiyun 	 * not interfere with their logic. Called in PMI context.
463*4882a593Smuzhiyun 	 *
464*4882a593Smuzhiyun 	 * Returns the size of AUX data copied to the output handle.
465*4882a593Smuzhiyun 	 *
466*4882a593Smuzhiyun 	 * Optional.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	long (*snapshot_aux)		(struct perf_event *event,
469*4882a593Smuzhiyun 					 struct perf_output_handle *handle,
470*4882a593Smuzhiyun 					 unsigned long size);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/*
473*4882a593Smuzhiyun 	 * Validate address range filters: make sure the HW supports the
474*4882a593Smuzhiyun 	 * requested configuration and number of filters; return 0 if the
475*4882a593Smuzhiyun 	 * supplied filters are valid, -errno otherwise.
476*4882a593Smuzhiyun 	 *
477*4882a593Smuzhiyun 	 * Runs in the context of the ioctl()ing process and is not serialized
478*4882a593Smuzhiyun 	 * with the rest of the PMU callbacks.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	int (*addr_filters_validate)	(struct list_head *filters);
481*4882a593Smuzhiyun 					/* optional */
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/*
484*4882a593Smuzhiyun 	 * Synchronize address range filter configuration:
485*4882a593Smuzhiyun 	 * translate hw-agnostic filters into hardware configuration in
486*4882a593Smuzhiyun 	 * event::hw::addr_filters.
487*4882a593Smuzhiyun 	 *
488*4882a593Smuzhiyun 	 * Runs as a part of filter sync sequence that is done in ->start()
489*4882a593Smuzhiyun 	 * callback by calling perf_event_addr_filters_sync().
490*4882a593Smuzhiyun 	 *
491*4882a593Smuzhiyun 	 * May (and should) traverse event::addr_filters::list, for which its
492*4882a593Smuzhiyun 	 * caller provides necessary serialization.
493*4882a593Smuzhiyun 	 */
494*4882a593Smuzhiyun 	void (*addr_filters_sync)	(struct perf_event *event);
495*4882a593Smuzhiyun 					/* optional */
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/*
498*4882a593Smuzhiyun 	 * Check if event can be used for aux_output purposes for
499*4882a593Smuzhiyun 	 * events of this PMU.
500*4882a593Smuzhiyun 	 *
501*4882a593Smuzhiyun 	 * Runs from perf_event_open(). Should return 0 for "no match"
502*4882a593Smuzhiyun 	 * or non-zero for "match".
503*4882a593Smuzhiyun 	 */
504*4882a593Smuzhiyun 	int (*aux_output_match)		(struct perf_event *event);
505*4882a593Smuzhiyun 					/* optional */
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/*
508*4882a593Smuzhiyun 	 * Filter events for PMU-specific reasons.
509*4882a593Smuzhiyun 	 */
510*4882a593Smuzhiyun 	int (*filter_match)		(struct perf_event *event); /* optional */
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/*
513*4882a593Smuzhiyun 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun enum perf_addr_filter_action_t {
519*4882a593Smuzhiyun 	PERF_ADDR_FILTER_ACTION_STOP = 0,
520*4882a593Smuzhiyun 	PERF_ADDR_FILTER_ACTION_START,
521*4882a593Smuzhiyun 	PERF_ADDR_FILTER_ACTION_FILTER,
522*4882a593Smuzhiyun };
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun  * struct perf_addr_filter - address range filter definition
526*4882a593Smuzhiyun  * @entry:	event's filter list linkage
527*4882a593Smuzhiyun  * @path:	object file's path for file-based filters
528*4882a593Smuzhiyun  * @offset:	filter range offset
529*4882a593Smuzhiyun  * @size:	filter range size (size==0 means single address trigger)
530*4882a593Smuzhiyun  * @action:	filter/start/stop
531*4882a593Smuzhiyun  *
532*4882a593Smuzhiyun  * This is a hardware-agnostic filter configuration as specified by the user.
533*4882a593Smuzhiyun  */
534*4882a593Smuzhiyun struct perf_addr_filter {
535*4882a593Smuzhiyun 	struct list_head	entry;
536*4882a593Smuzhiyun 	struct path		path;
537*4882a593Smuzhiyun 	unsigned long		offset;
538*4882a593Smuzhiyun 	unsigned long		size;
539*4882a593Smuzhiyun 	enum perf_addr_filter_action_t	action;
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun /**
543*4882a593Smuzhiyun  * struct perf_addr_filters_head - container for address range filters
544*4882a593Smuzhiyun  * @list:	list of filters for this event
545*4882a593Smuzhiyun  * @lock:	spinlock that serializes accesses to the @list and event's
546*4882a593Smuzhiyun  *		(and its children's) filter generations.
547*4882a593Smuzhiyun  * @nr_file_filters:	number of file-based filters
548*4882a593Smuzhiyun  *
549*4882a593Smuzhiyun  * A child event will use parent's @list (and therefore @lock), so they are
550*4882a593Smuzhiyun  * bundled together; see perf_event_addr_filters().
551*4882a593Smuzhiyun  */
552*4882a593Smuzhiyun struct perf_addr_filters_head {
553*4882a593Smuzhiyun 	struct list_head	list;
554*4882a593Smuzhiyun 	raw_spinlock_t		lock;
555*4882a593Smuzhiyun 	unsigned int		nr_file_filters;
556*4882a593Smuzhiyun };
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun struct perf_addr_filter_range {
559*4882a593Smuzhiyun 	unsigned long		start;
560*4882a593Smuzhiyun 	unsigned long		size;
561*4882a593Smuzhiyun };
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun  * enum perf_event_state - the states of an event:
565*4882a593Smuzhiyun  */
566*4882a593Smuzhiyun enum perf_event_state {
567*4882a593Smuzhiyun 	PERF_EVENT_STATE_DEAD		= -4,
568*4882a593Smuzhiyun 	PERF_EVENT_STATE_EXIT		= -3,
569*4882a593Smuzhiyun 	PERF_EVENT_STATE_ERROR		= -2,
570*4882a593Smuzhiyun 	PERF_EVENT_STATE_OFF		= -1,
571*4882a593Smuzhiyun 	PERF_EVENT_STATE_INACTIVE	=  0,
572*4882a593Smuzhiyun 	PERF_EVENT_STATE_ACTIVE		=  1,
573*4882a593Smuzhiyun };
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun struct file;
576*4882a593Smuzhiyun struct perf_sample_data;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun typedef void (*perf_overflow_handler_t)(struct perf_event *,
579*4882a593Smuzhiyun 					struct perf_sample_data *,
580*4882a593Smuzhiyun 					struct pt_regs *regs);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun  * Event capabilities. For event_caps and groups caps.
584*4882a593Smuzhiyun  *
585*4882a593Smuzhiyun  * PERF_EV_CAP_SOFTWARE: Is a software event.
586*4882a593Smuzhiyun  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
587*4882a593Smuzhiyun  * from any CPU in the package where it is active.
588*4882a593Smuzhiyun  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
589*4882a593Smuzhiyun  * cannot be a group leader. If an event with this flag is detached from the
590*4882a593Smuzhiyun  * group it is scheduled out and moved into an unrecoverable ERROR state.
591*4882a593Smuzhiyun  */
592*4882a593Smuzhiyun #define PERF_EV_CAP_SOFTWARE		BIT(0)
593*4882a593Smuzhiyun #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
594*4882a593Smuzhiyun #define PERF_EV_CAP_SIBLING		BIT(2)
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun #define SWEVENT_HLIST_BITS		8
597*4882a593Smuzhiyun #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun struct swevent_hlist {
600*4882a593Smuzhiyun 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
601*4882a593Smuzhiyun 	struct rcu_head			rcu_head;
602*4882a593Smuzhiyun };
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun #define PERF_ATTACH_CONTEXT	0x01
605*4882a593Smuzhiyun #define PERF_ATTACH_GROUP	0x02
606*4882a593Smuzhiyun #define PERF_ATTACH_TASK	0x04
607*4882a593Smuzhiyun #define PERF_ATTACH_TASK_DATA	0x08
608*4882a593Smuzhiyun #define PERF_ATTACH_ITRACE	0x10
609*4882a593Smuzhiyun #define PERF_ATTACH_SCHED_CB	0x20
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun struct perf_cgroup;
612*4882a593Smuzhiyun struct perf_buffer;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun struct pmu_event_list {
615*4882a593Smuzhiyun 	raw_spinlock_t		lock;
616*4882a593Smuzhiyun 	struct list_head	list;
617*4882a593Smuzhiyun };
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun #define for_each_sibling_event(sibling, event)			\
620*4882a593Smuzhiyun 	if ((event)->group_leader == (event))			\
621*4882a593Smuzhiyun 		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun  * struct perf_event - performance event kernel representation:
625*4882a593Smuzhiyun  */
626*4882a593Smuzhiyun struct perf_event {
627*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
628*4882a593Smuzhiyun 	/*
629*4882a593Smuzhiyun 	 * entry onto perf_event_context::event_list;
630*4882a593Smuzhiyun 	 *   modifications require ctx->lock
631*4882a593Smuzhiyun 	 *   RCU safe iterations.
632*4882a593Smuzhiyun 	 */
633*4882a593Smuzhiyun 	struct list_head		event_entry;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/*
636*4882a593Smuzhiyun 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
637*4882a593Smuzhiyun 	 * either sufficies for read.
638*4882a593Smuzhiyun 	 */
639*4882a593Smuzhiyun 	struct list_head		sibling_list;
640*4882a593Smuzhiyun 	struct list_head		active_list;
641*4882a593Smuzhiyun 	/*
642*4882a593Smuzhiyun 	 * Node on the pinned or flexible tree located at the event context;
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 	struct rb_node			group_node;
645*4882a593Smuzhiyun 	u64				group_index;
646*4882a593Smuzhiyun 	/*
647*4882a593Smuzhiyun 	 * We need storage to track the entries in perf_pmu_migrate_context; we
648*4882a593Smuzhiyun 	 * cannot use the event_entry because of RCU and we want to keep the
649*4882a593Smuzhiyun 	 * group in tact which avoids us using the other two entries.
650*4882a593Smuzhiyun 	 */
651*4882a593Smuzhiyun 	struct list_head		migrate_entry;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	struct hlist_node		hlist_entry;
654*4882a593Smuzhiyun 	struct list_head		active_entry;
655*4882a593Smuzhiyun 	int				nr_siblings;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* Not serialized. Only written during event initialization. */
658*4882a593Smuzhiyun 	int				event_caps;
659*4882a593Smuzhiyun 	/* The cumulative AND of all event_caps for events in this group. */
660*4882a593Smuzhiyun 	int				group_caps;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	struct perf_event		*group_leader;
663*4882a593Smuzhiyun 	struct pmu			*pmu;
664*4882a593Smuzhiyun 	void				*pmu_private;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	enum perf_event_state		state;
667*4882a593Smuzhiyun 	unsigned int			attach_state;
668*4882a593Smuzhiyun 	local64_t			count;
669*4882a593Smuzhiyun 	atomic64_t			child_count;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/*
672*4882a593Smuzhiyun 	 * These are the total time in nanoseconds that the event
673*4882a593Smuzhiyun 	 * has been enabled (i.e. eligible to run, and the task has
674*4882a593Smuzhiyun 	 * been scheduled in, if this is a per-task event)
675*4882a593Smuzhiyun 	 * and running (scheduled onto the CPU), respectively.
676*4882a593Smuzhiyun 	 */
677*4882a593Smuzhiyun 	u64				total_time_enabled;
678*4882a593Smuzhiyun 	u64				total_time_running;
679*4882a593Smuzhiyun 	u64				tstamp;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * timestamp shadows the actual context timing but it can
683*4882a593Smuzhiyun 	 * be safely used in NMI interrupt context. It reflects the
684*4882a593Smuzhiyun 	 * context time as it was when the event was last scheduled in,
685*4882a593Smuzhiyun 	 * or when ctx_sched_in failed to schedule the event because we
686*4882a593Smuzhiyun 	 * run out of PMC.
687*4882a593Smuzhiyun 	 *
688*4882a593Smuzhiyun 	 * ctx_time already accounts for ctx->timestamp. Therefore to
689*4882a593Smuzhiyun 	 * compute ctx_time for a sample, simply add perf_clock().
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	u64				shadow_ctx_time;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	struct perf_event_attr		attr;
694*4882a593Smuzhiyun 	u16				header_size;
695*4882a593Smuzhiyun 	u16				id_header_size;
696*4882a593Smuzhiyun 	u16				read_size;
697*4882a593Smuzhiyun 	struct hw_perf_event		hw;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	struct perf_event_context	*ctx;
700*4882a593Smuzhiyun 	atomic_long_t			refcount;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/*
703*4882a593Smuzhiyun 	 * These accumulate total time (in nanoseconds) that children
704*4882a593Smuzhiyun 	 * events have been enabled and running, respectively.
705*4882a593Smuzhiyun 	 */
706*4882a593Smuzhiyun 	atomic64_t			child_total_time_enabled;
707*4882a593Smuzhiyun 	atomic64_t			child_total_time_running;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/*
710*4882a593Smuzhiyun 	 * Protect attach/detach and child_list:
711*4882a593Smuzhiyun 	 */
712*4882a593Smuzhiyun 	struct mutex			child_mutex;
713*4882a593Smuzhiyun 	struct list_head		child_list;
714*4882a593Smuzhiyun 	struct perf_event		*parent;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	int				oncpu;
717*4882a593Smuzhiyun 	int				cpu;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	struct list_head		owner_entry;
720*4882a593Smuzhiyun 	struct task_struct		*owner;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	/* mmap bits */
723*4882a593Smuzhiyun 	struct mutex			mmap_mutex;
724*4882a593Smuzhiyun 	atomic_t			mmap_count;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	struct perf_buffer		*rb;
727*4882a593Smuzhiyun 	struct list_head		rb_entry;
728*4882a593Smuzhiyun 	unsigned long			rcu_batches;
729*4882a593Smuzhiyun 	int				rcu_pending;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* poll related */
732*4882a593Smuzhiyun 	wait_queue_head_t		waitq;
733*4882a593Smuzhiyun 	struct fasync_struct		*fasync;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	/* delayed work for NMIs and such */
736*4882a593Smuzhiyun 	int				pending_wakeup;
737*4882a593Smuzhiyun 	int				pending_kill;
738*4882a593Smuzhiyun 	int				pending_disable;
739*4882a593Smuzhiyun 	struct irq_work			pending;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	atomic_t			event_limit;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	/* address range filters */
744*4882a593Smuzhiyun 	struct perf_addr_filters_head	addr_filters;
745*4882a593Smuzhiyun 	/* vma address array for file-based filders */
746*4882a593Smuzhiyun 	struct perf_addr_filter_range	*addr_filter_ranges;
747*4882a593Smuzhiyun 	unsigned long			addr_filters_gen;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	/* for aux_output events */
750*4882a593Smuzhiyun 	struct perf_event		*aux_event;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	void (*destroy)(struct perf_event *);
753*4882a593Smuzhiyun 	struct rcu_head			rcu_head;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	struct pid_namespace		*ns;
756*4882a593Smuzhiyun 	u64				id;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	u64				(*clock)(void);
759*4882a593Smuzhiyun 	perf_overflow_handler_t		overflow_handler;
760*4882a593Smuzhiyun 	void				*overflow_handler_context;
761*4882a593Smuzhiyun #ifdef CONFIG_BPF_SYSCALL
762*4882a593Smuzhiyun 	perf_overflow_handler_t		orig_overflow_handler;
763*4882a593Smuzhiyun 	struct bpf_prog			*prog;
764*4882a593Smuzhiyun #endif
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun #ifdef CONFIG_EVENT_TRACING
767*4882a593Smuzhiyun 	struct trace_event_call		*tp_event;
768*4882a593Smuzhiyun 	struct event_filter		*filter;
769*4882a593Smuzhiyun #ifdef CONFIG_FUNCTION_TRACER
770*4882a593Smuzhiyun 	struct ftrace_ops               ftrace_ops;
771*4882a593Smuzhiyun #endif
772*4882a593Smuzhiyun #endif
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_PERF
775*4882a593Smuzhiyun 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun #ifdef CONFIG_SECURITY
779*4882a593Smuzhiyun 	void *security;
780*4882a593Smuzhiyun #endif
781*4882a593Smuzhiyun 	struct list_head		sb_list;
782*4882a593Smuzhiyun #endif /* CONFIG_PERF_EVENTS */
783*4882a593Smuzhiyun };
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun struct perf_event_groups {
787*4882a593Smuzhiyun 	struct rb_root	tree;
788*4882a593Smuzhiyun 	u64		index;
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /**
792*4882a593Smuzhiyun  * struct perf_event_context - event context structure
793*4882a593Smuzhiyun  *
794*4882a593Smuzhiyun  * Used as a container for task events and CPU events as well:
795*4882a593Smuzhiyun  */
796*4882a593Smuzhiyun struct perf_event_context {
797*4882a593Smuzhiyun 	struct pmu			*pmu;
798*4882a593Smuzhiyun 	/*
799*4882a593Smuzhiyun 	 * Protect the states of the events in the list,
800*4882a593Smuzhiyun 	 * nr_active, and the list:
801*4882a593Smuzhiyun 	 */
802*4882a593Smuzhiyun 	raw_spinlock_t			lock;
803*4882a593Smuzhiyun 	/*
804*4882a593Smuzhiyun 	 * Protect the list of events.  Locking either mutex or lock
805*4882a593Smuzhiyun 	 * is sufficient to ensure the list doesn't change; to change
806*4882a593Smuzhiyun 	 * the list you need to lock both the mutex and the spinlock.
807*4882a593Smuzhiyun 	 */
808*4882a593Smuzhiyun 	struct mutex			mutex;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	struct list_head		active_ctx_list;
811*4882a593Smuzhiyun 	struct perf_event_groups	pinned_groups;
812*4882a593Smuzhiyun 	struct perf_event_groups	flexible_groups;
813*4882a593Smuzhiyun 	struct list_head		event_list;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	struct list_head		pinned_active;
816*4882a593Smuzhiyun 	struct list_head		flexible_active;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	int				nr_events;
819*4882a593Smuzhiyun 	int				nr_active;
820*4882a593Smuzhiyun 	int				is_active;
821*4882a593Smuzhiyun 	int				nr_stat;
822*4882a593Smuzhiyun 	int				nr_freq;
823*4882a593Smuzhiyun 	int				rotate_disable;
824*4882a593Smuzhiyun 	/*
825*4882a593Smuzhiyun 	 * Set when nr_events != nr_active, except tolerant to events not
826*4882a593Smuzhiyun 	 * necessary to be active due to scheduling constraints, such as cgroups.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	int				rotate_necessary;
829*4882a593Smuzhiyun 	refcount_t			refcount;
830*4882a593Smuzhiyun 	struct task_struct		*task;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/*
833*4882a593Smuzhiyun 	 * Context clock, runs when context enabled.
834*4882a593Smuzhiyun 	 */
835*4882a593Smuzhiyun 	u64				time;
836*4882a593Smuzhiyun 	u64				timestamp;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/*
839*4882a593Smuzhiyun 	 * These fields let us detect when two contexts have both
840*4882a593Smuzhiyun 	 * been cloned (inherited) from a common ancestor.
841*4882a593Smuzhiyun 	 */
842*4882a593Smuzhiyun 	struct perf_event_context	*parent_ctx;
843*4882a593Smuzhiyun 	u64				parent_gen;
844*4882a593Smuzhiyun 	u64				generation;
845*4882a593Smuzhiyun 	int				pin_count;
846*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_PERF
847*4882a593Smuzhiyun 	int				nr_cgroups;	 /* cgroup evts */
848*4882a593Smuzhiyun #endif
849*4882a593Smuzhiyun 	void				*task_ctx_data; /* pmu specific data */
850*4882a593Smuzhiyun 	struct rcu_head			rcu_head;
851*4882a593Smuzhiyun };
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun  * Number of contexts where an event can trigger:
855*4882a593Smuzhiyun  *	task, softirq, hardirq, nmi.
856*4882a593Smuzhiyun  */
857*4882a593Smuzhiyun #define PERF_NR_CONTEXTS	4
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun  * struct perf_event_cpu_context - per cpu event context structure
861*4882a593Smuzhiyun  */
862*4882a593Smuzhiyun struct perf_cpu_context {
863*4882a593Smuzhiyun 	struct perf_event_context	ctx;
864*4882a593Smuzhiyun 	struct perf_event_context	*task_ctx;
865*4882a593Smuzhiyun 	int				active_oncpu;
866*4882a593Smuzhiyun 	int				exclusive;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	raw_spinlock_t			hrtimer_lock;
869*4882a593Smuzhiyun 	struct hrtimer			hrtimer;
870*4882a593Smuzhiyun 	ktime_t				hrtimer_interval;
871*4882a593Smuzhiyun 	unsigned int			hrtimer_active;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_PERF
874*4882a593Smuzhiyun 	struct perf_cgroup		*cgrp;
875*4882a593Smuzhiyun 	struct list_head		cgrp_cpuctx_entry;
876*4882a593Smuzhiyun #endif
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	struct list_head		sched_cb_entry;
879*4882a593Smuzhiyun 	int				sched_cb_usage;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	int				online;
882*4882a593Smuzhiyun 	/*
883*4882a593Smuzhiyun 	 * Per-CPU storage for iterators used in visit_groups_merge. The default
884*4882a593Smuzhiyun 	 * storage is of size 2 to hold the CPU and any CPU event iterators.
885*4882a593Smuzhiyun 	 */
886*4882a593Smuzhiyun 	int				heap_size;
887*4882a593Smuzhiyun 	struct perf_event		**heap;
888*4882a593Smuzhiyun 	struct perf_event		*heap_default[2];
889*4882a593Smuzhiyun };
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun struct perf_output_handle {
892*4882a593Smuzhiyun 	struct perf_event		*event;
893*4882a593Smuzhiyun 	struct perf_buffer		*rb;
894*4882a593Smuzhiyun 	unsigned long			wakeup;
895*4882a593Smuzhiyun 	unsigned long			size;
896*4882a593Smuzhiyun 	u64				aux_flags;
897*4882a593Smuzhiyun 	union {
898*4882a593Smuzhiyun 		void			*addr;
899*4882a593Smuzhiyun 		unsigned long		head;
900*4882a593Smuzhiyun 	};
901*4882a593Smuzhiyun 	int				page;
902*4882a593Smuzhiyun };
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun struct bpf_perf_event_data_kern {
905*4882a593Smuzhiyun 	bpf_user_pt_regs_t *regs;
906*4882a593Smuzhiyun 	struct perf_sample_data *data;
907*4882a593Smuzhiyun 	struct perf_event *event;
908*4882a593Smuzhiyun };
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_PERF
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun /*
913*4882a593Smuzhiyun  * perf_cgroup_info keeps track of time_enabled for a cgroup.
914*4882a593Smuzhiyun  * This is a per-cpu dynamically allocated data structure.
915*4882a593Smuzhiyun  */
916*4882a593Smuzhiyun struct perf_cgroup_info {
917*4882a593Smuzhiyun 	u64				time;
918*4882a593Smuzhiyun 	u64				timestamp;
919*4882a593Smuzhiyun };
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun struct perf_cgroup {
922*4882a593Smuzhiyun 	struct cgroup_subsys_state	css;
923*4882a593Smuzhiyun 	struct perf_cgroup_info	__percpu *info;
924*4882a593Smuzhiyun };
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun  * Must ensure cgroup is pinned (css_get) before calling
928*4882a593Smuzhiyun  * this function. In other words, we cannot call this function
929*4882a593Smuzhiyun  * if there is no cgroup event for the current CPU context.
930*4882a593Smuzhiyun  */
931*4882a593Smuzhiyun static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct * task,struct perf_event_context * ctx)932*4882a593Smuzhiyun perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	return container_of(task_css_check(task, perf_event_cgrp_id,
935*4882a593Smuzhiyun 					   ctx ? lockdep_is_held(&ctx->lock)
936*4882a593Smuzhiyun 					       : true),
937*4882a593Smuzhiyun 			    struct perf_cgroup, css);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun #endif /* CONFIG_CGROUP_PERF */
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun extern void *perf_aux_output_begin(struct perf_output_handle *handle,
944*4882a593Smuzhiyun 				   struct perf_event *event);
945*4882a593Smuzhiyun extern void perf_aux_output_end(struct perf_output_handle *handle,
946*4882a593Smuzhiyun 				unsigned long size);
947*4882a593Smuzhiyun extern int perf_aux_output_skip(struct perf_output_handle *handle,
948*4882a593Smuzhiyun 				unsigned long size);
949*4882a593Smuzhiyun extern void *perf_get_aux(struct perf_output_handle *handle);
950*4882a593Smuzhiyun extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
951*4882a593Smuzhiyun extern void perf_event_itrace_started(struct perf_event *event);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
954*4882a593Smuzhiyun extern void perf_pmu_unregister(struct pmu *pmu);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun extern int perf_num_counters(void);
957*4882a593Smuzhiyun extern const char *perf_pmu_name(void);
958*4882a593Smuzhiyun extern void __perf_event_task_sched_in(struct task_struct *prev,
959*4882a593Smuzhiyun 				       struct task_struct *task);
960*4882a593Smuzhiyun extern void __perf_event_task_sched_out(struct task_struct *prev,
961*4882a593Smuzhiyun 					struct task_struct *next);
962*4882a593Smuzhiyun extern int perf_event_init_task(struct task_struct *child);
963*4882a593Smuzhiyun extern void perf_event_exit_task(struct task_struct *child);
964*4882a593Smuzhiyun extern void perf_event_free_task(struct task_struct *task);
965*4882a593Smuzhiyun extern void perf_event_delayed_put(struct task_struct *task);
966*4882a593Smuzhiyun extern struct file *perf_event_get(unsigned int fd);
967*4882a593Smuzhiyun extern const struct perf_event *perf_get_event(struct file *file);
968*4882a593Smuzhiyun extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
969*4882a593Smuzhiyun extern void perf_event_print_debug(void);
970*4882a593Smuzhiyun extern void perf_pmu_disable(struct pmu *pmu);
971*4882a593Smuzhiyun extern void perf_pmu_enable(struct pmu *pmu);
972*4882a593Smuzhiyun extern void perf_sched_cb_dec(struct pmu *pmu);
973*4882a593Smuzhiyun extern void perf_sched_cb_inc(struct pmu *pmu);
974*4882a593Smuzhiyun extern int perf_event_task_disable(void);
975*4882a593Smuzhiyun extern int perf_event_task_enable(void);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun extern void perf_pmu_resched(struct pmu *pmu);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun extern int perf_event_refresh(struct perf_event *event, int refresh);
980*4882a593Smuzhiyun extern void perf_event_update_userpage(struct perf_event *event);
981*4882a593Smuzhiyun extern int perf_event_release_kernel(struct perf_event *event);
982*4882a593Smuzhiyun extern struct perf_event *
983*4882a593Smuzhiyun perf_event_create_kernel_counter(struct perf_event_attr *attr,
984*4882a593Smuzhiyun 				int cpu,
985*4882a593Smuzhiyun 				struct task_struct *task,
986*4882a593Smuzhiyun 				perf_overflow_handler_t callback,
987*4882a593Smuzhiyun 				void *context);
988*4882a593Smuzhiyun extern void perf_pmu_migrate_context(struct pmu *pmu,
989*4882a593Smuzhiyun 				int src_cpu, int dst_cpu);
990*4882a593Smuzhiyun int perf_event_read_local(struct perf_event *event, u64 *value,
991*4882a593Smuzhiyun 			  u64 *enabled, u64 *running);
992*4882a593Smuzhiyun extern u64 perf_event_read_value(struct perf_event *event,
993*4882a593Smuzhiyun 				 u64 *enabled, u64 *running);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun struct perf_sample_data {
997*4882a593Smuzhiyun 	/*
998*4882a593Smuzhiyun 	 * Fields set by perf_sample_data_init(), group so as to
999*4882a593Smuzhiyun 	 * minimize the cachelines touched.
1000*4882a593Smuzhiyun 	 */
1001*4882a593Smuzhiyun 	u64				addr;
1002*4882a593Smuzhiyun 	struct perf_raw_record		*raw;
1003*4882a593Smuzhiyun 	struct perf_branch_stack	*br_stack;
1004*4882a593Smuzhiyun 	u64				period;
1005*4882a593Smuzhiyun 	u64				weight;
1006*4882a593Smuzhiyun 	u64				txn;
1007*4882a593Smuzhiyun 	union  perf_mem_data_src	data_src;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/*
1010*4882a593Smuzhiyun 	 * The other fields, optionally {set,used} by
1011*4882a593Smuzhiyun 	 * perf_{prepare,output}_sample().
1012*4882a593Smuzhiyun 	 */
1013*4882a593Smuzhiyun 	u64				type;
1014*4882a593Smuzhiyun 	u64				ip;
1015*4882a593Smuzhiyun 	struct {
1016*4882a593Smuzhiyun 		u32	pid;
1017*4882a593Smuzhiyun 		u32	tid;
1018*4882a593Smuzhiyun 	}				tid_entry;
1019*4882a593Smuzhiyun 	u64				time;
1020*4882a593Smuzhiyun 	u64				id;
1021*4882a593Smuzhiyun 	u64				stream_id;
1022*4882a593Smuzhiyun 	struct {
1023*4882a593Smuzhiyun 		u32	cpu;
1024*4882a593Smuzhiyun 		u32	reserved;
1025*4882a593Smuzhiyun 	}				cpu_entry;
1026*4882a593Smuzhiyun 	struct perf_callchain_entry	*callchain;
1027*4882a593Smuzhiyun 	u64				aux_size;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	struct perf_regs		regs_user;
1030*4882a593Smuzhiyun 	struct perf_regs		regs_intr;
1031*4882a593Smuzhiyun 	u64				stack_user_size;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	u64				phys_addr;
1034*4882a593Smuzhiyun 	u64				cgroup;
1035*4882a593Smuzhiyun } ____cacheline_aligned;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /* default value for data source */
1038*4882a593Smuzhiyun #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1039*4882a593Smuzhiyun 		    PERF_MEM_S(LVL, NA)   |\
1040*4882a593Smuzhiyun 		    PERF_MEM_S(SNOOP, NA) |\
1041*4882a593Smuzhiyun 		    PERF_MEM_S(LOCK, NA)  |\
1042*4882a593Smuzhiyun 		    PERF_MEM_S(TLB, NA))
1043*4882a593Smuzhiyun 
perf_sample_data_init(struct perf_sample_data * data,u64 addr,u64 period)1044*4882a593Smuzhiyun static inline void perf_sample_data_init(struct perf_sample_data *data,
1045*4882a593Smuzhiyun 					 u64 addr, u64 period)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	/* remaining struct members initialized in perf_prepare_sample() */
1048*4882a593Smuzhiyun 	data->addr = addr;
1049*4882a593Smuzhiyun 	data->raw  = NULL;
1050*4882a593Smuzhiyun 	data->br_stack = NULL;
1051*4882a593Smuzhiyun 	data->period = period;
1052*4882a593Smuzhiyun 	data->weight = 0;
1053*4882a593Smuzhiyun 	data->data_src.val = PERF_MEM_NA;
1054*4882a593Smuzhiyun 	data->txn = 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun extern void perf_output_sample(struct perf_output_handle *handle,
1058*4882a593Smuzhiyun 			       struct perf_event_header *header,
1059*4882a593Smuzhiyun 			       struct perf_sample_data *data,
1060*4882a593Smuzhiyun 			       struct perf_event *event);
1061*4882a593Smuzhiyun extern void perf_prepare_sample(struct perf_event_header *header,
1062*4882a593Smuzhiyun 				struct perf_sample_data *data,
1063*4882a593Smuzhiyun 				struct perf_event *event,
1064*4882a593Smuzhiyun 				struct pt_regs *regs);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun extern int perf_event_overflow(struct perf_event *event,
1067*4882a593Smuzhiyun 				 struct perf_sample_data *data,
1068*4882a593Smuzhiyun 				 struct pt_regs *regs);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun extern void perf_event_output_forward(struct perf_event *event,
1071*4882a593Smuzhiyun 				     struct perf_sample_data *data,
1072*4882a593Smuzhiyun 				     struct pt_regs *regs);
1073*4882a593Smuzhiyun extern void perf_event_output_backward(struct perf_event *event,
1074*4882a593Smuzhiyun 				       struct perf_sample_data *data,
1075*4882a593Smuzhiyun 				       struct pt_regs *regs);
1076*4882a593Smuzhiyun extern int perf_event_output(struct perf_event *event,
1077*4882a593Smuzhiyun 			     struct perf_sample_data *data,
1078*4882a593Smuzhiyun 			     struct pt_regs *regs);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun static inline bool
is_default_overflow_handler(struct perf_event * event)1081*4882a593Smuzhiyun is_default_overflow_handler(struct perf_event *event)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	if (likely(event->overflow_handler == perf_event_output_forward))
1084*4882a593Smuzhiyun 		return true;
1085*4882a593Smuzhiyun 	if (unlikely(event->overflow_handler == perf_event_output_backward))
1086*4882a593Smuzhiyun 		return true;
1087*4882a593Smuzhiyun 	return false;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun extern void
1091*4882a593Smuzhiyun perf_event_header__init_id(struct perf_event_header *header,
1092*4882a593Smuzhiyun 			   struct perf_sample_data *data,
1093*4882a593Smuzhiyun 			   struct perf_event *event);
1094*4882a593Smuzhiyun extern void
1095*4882a593Smuzhiyun perf_event__output_id_sample(struct perf_event *event,
1096*4882a593Smuzhiyun 			     struct perf_output_handle *handle,
1097*4882a593Smuzhiyun 			     struct perf_sample_data *sample);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun extern void
1100*4882a593Smuzhiyun perf_log_lost_samples(struct perf_event *event, u64 lost);
1101*4882a593Smuzhiyun 
event_has_any_exclude_flag(struct perf_event * event)1102*4882a593Smuzhiyun static inline bool event_has_any_exclude_flag(struct perf_event *event)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	struct perf_event_attr *attr = &event->attr;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return attr->exclude_idle || attr->exclude_user ||
1107*4882a593Smuzhiyun 	       attr->exclude_kernel || attr->exclude_hv ||
1108*4882a593Smuzhiyun 	       attr->exclude_guest || attr->exclude_host;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
is_sampling_event(struct perf_event * event)1111*4882a593Smuzhiyun static inline bool is_sampling_event(struct perf_event *event)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	return event->attr.sample_period != 0;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun  * Return 1 for a software event, 0 for a hardware event
1118*4882a593Smuzhiyun  */
is_software_event(struct perf_event * event)1119*4882a593Smuzhiyun static inline int is_software_event(struct perf_event *event)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun /*
1125*4882a593Smuzhiyun  * Return 1 for event in sw context, 0 for event in hw context
1126*4882a593Smuzhiyun  */
in_software_context(struct perf_event * event)1127*4882a593Smuzhiyun static inline int in_software_context(struct perf_event *event)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun 	return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun 
is_exclusive_pmu(struct pmu * pmu)1132*4882a593Smuzhiyun static inline int is_exclusive_pmu(struct pmu *pmu)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1140*4882a593Smuzhiyun extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1143*4882a593Smuzhiyun static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1144*4882a593Smuzhiyun #endif
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /*
1147*4882a593Smuzhiyun  * When generating a perf sample in-line, instead of from an interrupt /
1148*4882a593Smuzhiyun  * exception, we lack a pt_regs. This is typically used from software events
1149*4882a593Smuzhiyun  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1150*4882a593Smuzhiyun  *
1151*4882a593Smuzhiyun  * We typically don't need a full set, but (for x86) do require:
1152*4882a593Smuzhiyun  * - ip for PERF_SAMPLE_IP
1153*4882a593Smuzhiyun  * - cs for user_mode() tests
1154*4882a593Smuzhiyun  * - sp for PERF_SAMPLE_CALLCHAIN
1155*4882a593Smuzhiyun  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1156*4882a593Smuzhiyun  *
1157*4882a593Smuzhiyun  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1158*4882a593Smuzhiyun  * things like PERF_SAMPLE_REGS_INTR.
1159*4882a593Smuzhiyun  */
perf_fetch_caller_regs(struct pt_regs * regs)1160*4882a593Smuzhiyun static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1166*4882a593Smuzhiyun perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	if (static_key_false(&perf_swevent_enabled[event_id]))
1169*4882a593Smuzhiyun 		__perf_sw_event(event_id, nr, regs, addr);
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /*
1175*4882a593Smuzhiyun  * 'Special' version for the scheduler, it hard assumes no recursion,
1176*4882a593Smuzhiyun  * which is guaranteed by us not actually scheduling inside other swevents
1177*4882a593Smuzhiyun  * because those disable preemption.
1178*4882a593Smuzhiyun  */
1179*4882a593Smuzhiyun static __always_inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1180*4882a593Smuzhiyun perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1183*4882a593Smuzhiyun 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 		perf_fetch_caller_regs(regs);
1186*4882a593Smuzhiyun 		___perf_sw_event(event_id, nr, regs, addr);
1187*4882a593Smuzhiyun 	}
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun extern struct static_key_false perf_sched_events;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun static __always_inline bool
perf_sw_migrate_enabled(void)1193*4882a593Smuzhiyun perf_sw_migrate_enabled(void)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun 	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1196*4882a593Smuzhiyun 		return true;
1197*4882a593Smuzhiyun 	return false;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun 
perf_event_task_migrate(struct task_struct * task)1200*4882a593Smuzhiyun static inline void perf_event_task_migrate(struct task_struct *task)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	if (perf_sw_migrate_enabled())
1203*4882a593Smuzhiyun 		task->sched_migrated = 1;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1206*4882a593Smuzhiyun static inline void perf_event_task_sched_in(struct task_struct *prev,
1207*4882a593Smuzhiyun 					    struct task_struct *task)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	if (static_branch_unlikely(&perf_sched_events))
1210*4882a593Smuzhiyun 		__perf_event_task_sched_in(prev, task);
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	if (perf_sw_migrate_enabled() && task->sched_migrated) {
1213*4882a593Smuzhiyun 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 		perf_fetch_caller_regs(regs);
1216*4882a593Smuzhiyun 		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1217*4882a593Smuzhiyun 		task->sched_migrated = 0;
1218*4882a593Smuzhiyun 	}
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1221*4882a593Smuzhiyun static inline void perf_event_task_sched_out(struct task_struct *prev,
1222*4882a593Smuzhiyun 					     struct task_struct *next)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	if (static_branch_unlikely(&perf_sched_events))
1227*4882a593Smuzhiyun 		__perf_event_task_sched_out(prev, next);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun extern void perf_event_mmap(struct vm_area_struct *vma);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1233*4882a593Smuzhiyun 			       bool unregister, const char *sym);
1234*4882a593Smuzhiyun extern void perf_event_bpf_event(struct bpf_prog *prog,
1235*4882a593Smuzhiyun 				 enum perf_bpf_event_type type,
1236*4882a593Smuzhiyun 				 u16 flags);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
perf_get_guest_cbs(void)1239*4882a593Smuzhiyun static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun 	/*
1242*4882a593Smuzhiyun 	 * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
1243*4882a593Smuzhiyun 	 * the callbacks between a !NULL check and dereferences, to ensure
1244*4882a593Smuzhiyun 	 * pending stores/changes to the callback pointers are visible before a
1245*4882a593Smuzhiyun 	 * non-NULL perf_guest_cbs is visible to readers, and to prevent a
1246*4882a593Smuzhiyun 	 * module from unloading callbacks while readers are active.
1247*4882a593Smuzhiyun 	 */
1248*4882a593Smuzhiyun 	return rcu_dereference(perf_guest_cbs);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1251*4882a593Smuzhiyun extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun extern void perf_event_exec(void);
1254*4882a593Smuzhiyun extern void perf_event_comm(struct task_struct *tsk, bool exec);
1255*4882a593Smuzhiyun extern void perf_event_namespaces(struct task_struct *tsk);
1256*4882a593Smuzhiyun extern void perf_event_fork(struct task_struct *tsk);
1257*4882a593Smuzhiyun extern void perf_event_text_poke(const void *addr,
1258*4882a593Smuzhiyun 				 const void *old_bytes, size_t old_len,
1259*4882a593Smuzhiyun 				 const void *new_bytes, size_t new_len);
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun /* Callchains */
1262*4882a593Smuzhiyun DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1265*4882a593Smuzhiyun extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1266*4882a593Smuzhiyun extern struct perf_callchain_entry *
1267*4882a593Smuzhiyun get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1268*4882a593Smuzhiyun 		   u32 max_stack, bool crosstask, bool add_mark);
1269*4882a593Smuzhiyun extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1270*4882a593Smuzhiyun extern int get_callchain_buffers(int max_stack);
1271*4882a593Smuzhiyun extern void put_callchain_buffers(void);
1272*4882a593Smuzhiyun extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1273*4882a593Smuzhiyun extern void put_callchain_entry(int rctx);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun extern int sysctl_perf_event_max_stack;
1276*4882a593Smuzhiyun extern int sysctl_perf_event_max_contexts_per_stack;
1277*4882a593Smuzhiyun 
perf_callchain_store_context(struct perf_callchain_entry_ctx * ctx,u64 ip)1278*4882a593Smuzhiyun static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1281*4882a593Smuzhiyun 		struct perf_callchain_entry *entry = ctx->entry;
1282*4882a593Smuzhiyun 		entry->ip[entry->nr++] = ip;
1283*4882a593Smuzhiyun 		++ctx->contexts;
1284*4882a593Smuzhiyun 		return 0;
1285*4882a593Smuzhiyun 	} else {
1286*4882a593Smuzhiyun 		ctx->contexts_maxed = true;
1287*4882a593Smuzhiyun 		return -1; /* no more room, stop walking the stack */
1288*4882a593Smuzhiyun 	}
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
perf_callchain_store(struct perf_callchain_entry_ctx * ctx,u64 ip)1291*4882a593Smuzhiyun static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1294*4882a593Smuzhiyun 		struct perf_callchain_entry *entry = ctx->entry;
1295*4882a593Smuzhiyun 		entry->ip[entry->nr++] = ip;
1296*4882a593Smuzhiyun 		++ctx->nr;
1297*4882a593Smuzhiyun 		return 0;
1298*4882a593Smuzhiyun 	} else {
1299*4882a593Smuzhiyun 		return -1; /* no more room, stop walking the stack */
1300*4882a593Smuzhiyun 	}
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun extern int sysctl_perf_event_paranoid;
1304*4882a593Smuzhiyun extern int sysctl_perf_event_mlock;
1305*4882a593Smuzhiyun extern int sysctl_perf_event_sample_rate;
1306*4882a593Smuzhiyun extern int sysctl_perf_cpu_time_max_percent;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun extern void perf_sample_event_took(u64 sample_len_ns);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun int perf_proc_update_handler(struct ctl_table *table, int write,
1311*4882a593Smuzhiyun 		void *buffer, size_t *lenp, loff_t *ppos);
1312*4882a593Smuzhiyun int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1313*4882a593Smuzhiyun 		void *buffer, size_t *lenp, loff_t *ppos);
1314*4882a593Smuzhiyun int perf_event_max_stack_handler(struct ctl_table *table, int write,
1315*4882a593Smuzhiyun 		void *buffer, size_t *lenp, loff_t *ppos);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun /* Access to perf_event_open(2) syscall. */
1318*4882a593Smuzhiyun #define PERF_SECURITY_OPEN		0
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun /* Finer grained perf_event_open(2) access control. */
1321*4882a593Smuzhiyun #define PERF_SECURITY_CPU		1
1322*4882a593Smuzhiyun #define PERF_SECURITY_KERNEL		2
1323*4882a593Smuzhiyun #define PERF_SECURITY_TRACEPOINT	3
1324*4882a593Smuzhiyun 
perf_is_paranoid(void)1325*4882a593Smuzhiyun static inline int perf_is_paranoid(void)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	return sysctl_perf_event_paranoid > -1;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun 
perf_allow_kernel(struct perf_event_attr * attr)1330*4882a593Smuzhiyun static inline int perf_allow_kernel(struct perf_event_attr *attr)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun 	if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1333*4882a593Smuzhiyun 		return -EACCES;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun 
perf_allow_cpu(struct perf_event_attr * attr)1338*4882a593Smuzhiyun static inline int perf_allow_cpu(struct perf_event_attr *attr)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun 	if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1341*4882a593Smuzhiyun 		return -EACCES;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	return security_perf_event_open(attr, PERF_SECURITY_CPU);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun 
perf_allow_tracepoint(struct perf_event_attr * attr)1346*4882a593Smuzhiyun static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun 	if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1349*4882a593Smuzhiyun 		return -EPERM;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun extern void perf_event_init(void);
1355*4882a593Smuzhiyun extern void perf_tp_event(u16 event_type, u64 count, void *record,
1356*4882a593Smuzhiyun 			  int entry_size, struct pt_regs *regs,
1357*4882a593Smuzhiyun 			  struct hlist_head *head, int rctx,
1358*4882a593Smuzhiyun 			  struct task_struct *task);
1359*4882a593Smuzhiyun extern void perf_bp_event(struct perf_event *event, void *data);
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun #ifndef perf_misc_flags
1362*4882a593Smuzhiyun # define perf_misc_flags(regs) \
1363*4882a593Smuzhiyun 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1364*4882a593Smuzhiyun # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1365*4882a593Smuzhiyun #endif
1366*4882a593Smuzhiyun #ifndef perf_arch_bpf_user_pt_regs
1367*4882a593Smuzhiyun # define perf_arch_bpf_user_pt_regs(regs) regs
1368*4882a593Smuzhiyun #endif
1369*4882a593Smuzhiyun 
has_branch_stack(struct perf_event * event)1370*4882a593Smuzhiyun static inline bool has_branch_stack(struct perf_event *event)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun 
needs_branch_stack(struct perf_event * event)1375*4882a593Smuzhiyun static inline bool needs_branch_stack(struct perf_event *event)
1376*4882a593Smuzhiyun {
1377*4882a593Smuzhiyun 	return event->attr.branch_sample_type != 0;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
has_aux(struct perf_event * event)1380*4882a593Smuzhiyun static inline bool has_aux(struct perf_event *event)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	return event->pmu->setup_aux;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun 
is_write_backward(struct perf_event * event)1385*4882a593Smuzhiyun static inline bool is_write_backward(struct perf_event *event)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	return !!event->attr.write_backward;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun 
has_addr_filter(struct perf_event * event)1390*4882a593Smuzhiyun static inline bool has_addr_filter(struct perf_event *event)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun 	return event->pmu->nr_addr_filters;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun  * An inherited event uses parent's filters
1397*4882a593Smuzhiyun  */
1398*4882a593Smuzhiyun static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event * event)1399*4882a593Smuzhiyun perf_event_addr_filters(struct perf_event *event)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	if (event->parent)
1404*4882a593Smuzhiyun 		ifh = &event->parent->addr_filters;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	return ifh;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun extern void perf_event_addr_filters_sync(struct perf_event *event);
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun extern int perf_output_begin(struct perf_output_handle *handle,
1412*4882a593Smuzhiyun 			     struct perf_sample_data *data,
1413*4882a593Smuzhiyun 			     struct perf_event *event, unsigned int size);
1414*4882a593Smuzhiyun extern int perf_output_begin_forward(struct perf_output_handle *handle,
1415*4882a593Smuzhiyun 				     struct perf_sample_data *data,
1416*4882a593Smuzhiyun 				     struct perf_event *event,
1417*4882a593Smuzhiyun 				     unsigned int size);
1418*4882a593Smuzhiyun extern int perf_output_begin_backward(struct perf_output_handle *handle,
1419*4882a593Smuzhiyun 				      struct perf_sample_data *data,
1420*4882a593Smuzhiyun 				      struct perf_event *event,
1421*4882a593Smuzhiyun 				      unsigned int size);
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun extern void perf_output_end(struct perf_output_handle *handle);
1424*4882a593Smuzhiyun extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1425*4882a593Smuzhiyun 			     const void *buf, unsigned int len);
1426*4882a593Smuzhiyun extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1427*4882a593Smuzhiyun 				     unsigned int len);
1428*4882a593Smuzhiyun extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1429*4882a593Smuzhiyun 				 struct perf_output_handle *handle,
1430*4882a593Smuzhiyun 				 unsigned long from, unsigned long to);
1431*4882a593Smuzhiyun extern int perf_swevent_get_recursion_context(void);
1432*4882a593Smuzhiyun extern void perf_swevent_put_recursion_context(int rctx);
1433*4882a593Smuzhiyun extern u64 perf_swevent_set_period(struct perf_event *event);
1434*4882a593Smuzhiyun extern void perf_event_enable(struct perf_event *event);
1435*4882a593Smuzhiyun extern void perf_event_disable(struct perf_event *event);
1436*4882a593Smuzhiyun extern void perf_event_disable_local(struct perf_event *event);
1437*4882a593Smuzhiyun extern void perf_event_disable_inatomic(struct perf_event *event);
1438*4882a593Smuzhiyun extern void perf_event_task_tick(void);
1439*4882a593Smuzhiyun extern int perf_event_account_interrupt(struct perf_event *event);
1440*4882a593Smuzhiyun extern int perf_event_period(struct perf_event *event, u64 value);
1441*4882a593Smuzhiyun extern u64 perf_event_pause(struct perf_event *event, bool reset);
1442*4882a593Smuzhiyun #else /* !CONFIG_PERF_EVENTS: */
1443*4882a593Smuzhiyun static inline void *
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)1444*4882a593Smuzhiyun perf_aux_output_begin(struct perf_output_handle *handle,
1445*4882a593Smuzhiyun 		      struct perf_event *event)				{ return NULL; }
1446*4882a593Smuzhiyun static inline void
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)1447*4882a593Smuzhiyun perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1448*4882a593Smuzhiyun 									{ }
1449*4882a593Smuzhiyun static inline int
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)1450*4882a593Smuzhiyun perf_aux_output_skip(struct perf_output_handle *handle,
1451*4882a593Smuzhiyun 		     unsigned long size)				{ return -EINVAL; }
1452*4882a593Smuzhiyun static inline void *
perf_get_aux(struct perf_output_handle * handle)1453*4882a593Smuzhiyun perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1454*4882a593Smuzhiyun static inline void
perf_event_task_migrate(struct task_struct * task)1455*4882a593Smuzhiyun perf_event_task_migrate(struct task_struct *task)			{ }
1456*4882a593Smuzhiyun static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1457*4882a593Smuzhiyun perf_event_task_sched_in(struct task_struct *prev,
1458*4882a593Smuzhiyun 			 struct task_struct *task)			{ }
1459*4882a593Smuzhiyun static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1460*4882a593Smuzhiyun perf_event_task_sched_out(struct task_struct *prev,
1461*4882a593Smuzhiyun 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child)1462*4882a593Smuzhiyun static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
perf_event_exit_task(struct task_struct * child)1463*4882a593Smuzhiyun static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1464*4882a593Smuzhiyun static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1465*4882a593Smuzhiyun static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_get(unsigned int fd)1466*4882a593Smuzhiyun static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
perf_get_event(struct file * file)1467*4882a593Smuzhiyun static inline const struct perf_event *perf_get_event(struct file *file)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun 	return ERR_PTR(-EINVAL);
1470*4882a593Smuzhiyun }
perf_event_attrs(struct perf_event * event)1471*4882a593Smuzhiyun static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	return ERR_PTR(-EINVAL);
1474*4882a593Smuzhiyun }
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)1475*4882a593Smuzhiyun static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1476*4882a593Smuzhiyun 					u64 *enabled, u64 *running)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	return -EINVAL;
1479*4882a593Smuzhiyun }
perf_event_print_debug(void)1480*4882a593Smuzhiyun static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1481*4882a593Smuzhiyun static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1482*4882a593Smuzhiyun static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1483*4882a593Smuzhiyun static inline int perf_event_refresh(struct perf_event *event, int refresh)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun 	return -EINVAL;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1489*4882a593Smuzhiyun perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1490*4882a593Smuzhiyun static inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1491*4882a593Smuzhiyun perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
1492*4882a593Smuzhiyun static inline void
perf_bp_event(struct perf_event * event,void * data)1493*4882a593Smuzhiyun perf_bp_event(struct perf_event *event, void *data)			{ }
1494*4882a593Smuzhiyun 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1495*4882a593Smuzhiyun static inline int perf_register_guest_info_callbacks
1496*4882a593Smuzhiyun (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1497*4882a593Smuzhiyun static inline int perf_unregister_guest_info_callbacks
1498*4882a593Smuzhiyun (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1499*4882a593Smuzhiyun 
perf_event_mmap(struct vm_area_struct * vma)1500*4882a593Smuzhiyun static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)1503*4882a593Smuzhiyun static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1504*4882a593Smuzhiyun 				      bool unregister, const char *sym)	{ }
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)1505*4882a593Smuzhiyun static inline void perf_event_bpf_event(struct bpf_prog *prog,
1506*4882a593Smuzhiyun 					enum perf_bpf_event_type type,
1507*4882a593Smuzhiyun 					u16 flags)			{ }
perf_event_exec(void)1508*4882a593Smuzhiyun static inline void perf_event_exec(void)				{ }
perf_event_comm(struct task_struct * tsk,bool exec)1509*4882a593Smuzhiyun static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
perf_event_namespaces(struct task_struct * tsk)1510*4882a593Smuzhiyun static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
perf_event_fork(struct task_struct * tsk)1511*4882a593Smuzhiyun static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)1512*4882a593Smuzhiyun static inline void perf_event_text_poke(const void *addr,
1513*4882a593Smuzhiyun 					const void *old_bytes,
1514*4882a593Smuzhiyun 					size_t old_len,
1515*4882a593Smuzhiyun 					const void *new_bytes,
1516*4882a593Smuzhiyun 					size_t new_len)			{ }
perf_event_init(void)1517*4882a593Smuzhiyun static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1518*4882a593Smuzhiyun static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1519*4882a593Smuzhiyun static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_swevent_set_period(struct perf_event * event)1520*4882a593Smuzhiyun static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
perf_event_enable(struct perf_event * event)1521*4882a593Smuzhiyun static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1522*4882a593Smuzhiyun static inline void perf_event_disable(struct perf_event *event)		{ }
__perf_event_disable(void * info)1523*4882a593Smuzhiyun static inline int __perf_event_disable(void *info)			{ return -1; }
perf_event_task_tick(void)1524*4882a593Smuzhiyun static inline void perf_event_task_tick(void)				{ }
perf_event_release_kernel(struct perf_event * event)1525*4882a593Smuzhiyun static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
perf_event_period(struct perf_event * event,u64 value)1526*4882a593Smuzhiyun static inline int perf_event_period(struct perf_event *event, u64 value)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun 	return -EINVAL;
1529*4882a593Smuzhiyun }
perf_event_pause(struct perf_event * event,bool reset)1530*4882a593Smuzhiyun static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun 	return 0;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun #endif
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1537*4882a593Smuzhiyun extern void perf_restore_debug_store(void);
1538*4882a593Smuzhiyun #else
perf_restore_debug_store(void)1539*4882a593Smuzhiyun static inline void perf_restore_debug_store(void)			{ }
1540*4882a593Smuzhiyun #endif
1541*4882a593Smuzhiyun 
perf_raw_frag_last(const struct perf_raw_frag * frag)1542*4882a593Smuzhiyun static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1543*4882a593Smuzhiyun {
1544*4882a593Smuzhiyun 	return frag->pad < sizeof(u64);
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun struct perf_pmu_events_attr {
1550*4882a593Smuzhiyun 	struct device_attribute attr;
1551*4882a593Smuzhiyun 	u64 id;
1552*4882a593Smuzhiyun 	const char *event_str;
1553*4882a593Smuzhiyun };
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun struct perf_pmu_events_ht_attr {
1556*4882a593Smuzhiyun 	struct device_attribute			attr;
1557*4882a593Smuzhiyun 	u64					id;
1558*4882a593Smuzhiyun 	const char				*event_str_ht;
1559*4882a593Smuzhiyun 	const char				*event_str_noht;
1560*4882a593Smuzhiyun };
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1563*4882a593Smuzhiyun 			      char *page);
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1566*4882a593Smuzhiyun static struct perf_pmu_events_attr _var = {				\
1567*4882a593Smuzhiyun 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1568*4882a593Smuzhiyun 	.id   =  _id,							\
1569*4882a593Smuzhiyun };
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1572*4882a593Smuzhiyun static struct perf_pmu_events_attr _var = {				    \
1573*4882a593Smuzhiyun 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1574*4882a593Smuzhiyun 	.id		= 0,						    \
1575*4882a593Smuzhiyun 	.event_str	= _str,						    \
1576*4882a593Smuzhiyun };
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun #define PMU_FORMAT_ATTR(_name, _format)					\
1579*4882a593Smuzhiyun static ssize_t								\
1580*4882a593Smuzhiyun _name##_show(struct device *dev,					\
1581*4882a593Smuzhiyun 			       struct device_attribute *attr,		\
1582*4882a593Smuzhiyun 			       char *page)				\
1583*4882a593Smuzhiyun {									\
1584*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1585*4882a593Smuzhiyun 	return sprintf(page, _format "\n");				\
1586*4882a593Smuzhiyun }									\
1587*4882a593Smuzhiyun 									\
1588*4882a593Smuzhiyun static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun /* Performance counter hotplug functions */
1591*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
1592*4882a593Smuzhiyun int perf_event_init_cpu(unsigned int cpu);
1593*4882a593Smuzhiyun int perf_event_exit_cpu(unsigned int cpu);
1594*4882a593Smuzhiyun #else
1595*4882a593Smuzhiyun #define perf_event_init_cpu	NULL
1596*4882a593Smuzhiyun #define perf_event_exit_cpu	NULL
1597*4882a593Smuzhiyun #endif
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun extern void __weak arch_perf_update_userpage(struct perf_event *event,
1600*4882a593Smuzhiyun 					     struct perf_event_mmap_page *userpg,
1601*4882a593Smuzhiyun 					     u64 now);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun #endif /* _LINUX_PERF_EVENT_H */
1604