xref: /OK3568_Linux_fs/kernel/include/linux/perf_event.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 #include <uapi/linux/bpf_perf_event.h>
19 
20 /*
21  * Kernel-internal data types and definitions:
22  */
23 
24 #ifdef CONFIG_PERF_EVENTS
25 # include <asm/perf_event.h>
26 # include <asm/local64.h>
27 #endif
28 
29 struct perf_guest_info_callbacks {
30 	int				(*is_in_guest)(void);
31 	int				(*is_user_mode)(void);
32 	unsigned long			(*get_guest_ip)(void);
33 	void				(*handle_intel_pt_intr)(void);
34 };
35 
36 #ifdef CONFIG_HAVE_HW_BREAKPOINT
37 #include <asm/hw_breakpoint.h>
38 #endif
39 
40 #include <linux/list.h>
41 #include <linux/mutex.h>
42 #include <linux/rculist.h>
43 #include <linux/rcupdate.h>
44 #include <linux/spinlock.h>
45 #include <linux/hrtimer.h>
46 #include <linux/fs.h>
47 #include <linux/pid_namespace.h>
48 #include <linux/workqueue.h>
49 #include <linux/ftrace.h>
50 #include <linux/cpu.h>
51 #include <linux/irq_work.h>
52 #include <linux/static_key.h>
53 #include <linux/jump_label_ratelimit.h>
54 #include <linux/atomic.h>
55 #include <linux/sysfs.h>
56 #include <linux/perf_regs.h>
57 #include <linux/cgroup.h>
58 #include <linux/refcount.h>
59 #include <linux/security.h>
60 #include <asm/local.h>
61 
62 struct perf_callchain_entry {
63 	__u64				nr;
64 	__u64				ip[]; /* /proc/sys/kernel/perf_event_max_stack */
65 };
66 
67 struct perf_callchain_entry_ctx {
68 	struct perf_callchain_entry *entry;
69 	u32			    max_stack;
70 	u32			    nr;
71 	short			    contexts;
72 	bool			    contexts_maxed;
73 };
74 
75 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
76 				     unsigned long off, unsigned long len);
77 
78 struct perf_raw_frag {
79 	union {
80 		struct perf_raw_frag	*next;
81 		unsigned long		pad;
82 	};
83 	perf_copy_f			copy;
84 	void				*data;
85 	u32				size;
86 } __packed;
87 
88 struct perf_raw_record {
89 	struct perf_raw_frag		frag;
90 	u32				size;
91 };
92 
93 /*
94  * branch stack layout:
95  *  nr: number of taken branches stored in entries[]
96  *  hw_idx: The low level index of raw branch records
97  *          for the most recent branch.
98  *          -1ULL means invalid/unknown.
99  *
100  * Note that nr can vary from sample to sample
101  * branches (to, from) are stored from most recent
102  * to least recent, i.e., entries[0] contains the most
103  * recent branch.
104  * The entries[] is an abstraction of raw branch records,
105  * which may not be stored in age order in HW, e.g. Intel LBR.
106  * The hw_idx is to expose the low level index of raw
107  * branch record for the most recent branch aka entries[0].
108  * The hw_idx index is between -1 (unknown) and max depth,
109  * which can be retrieved in /sys/devices/cpu/caps/branches.
110  * For the architectures whose raw branch records are
111  * already stored in age order, the hw_idx should be 0.
112  */
113 struct perf_branch_stack {
114 	__u64				nr;
115 	__u64				hw_idx;
116 	struct perf_branch_entry	entries[];
117 };
118 
119 struct task_struct;
120 
121 /*
122  * extra PMU register associated with an event
123  */
124 struct hw_perf_event_extra {
125 	u64		config;	/* register value */
126 	unsigned int	reg;	/* register address or index */
127 	int		alloc;	/* extra register already allocated */
128 	int		idx;	/* index in shared_regs->regs[] */
129 };
130 
131 /**
132  * struct hw_perf_event - performance event hardware details:
133  */
134 struct hw_perf_event {
135 #ifdef CONFIG_PERF_EVENTS
136 	union {
137 		struct { /* hardware */
138 			u64		config;
139 			u64		last_tag;
140 			unsigned long	config_base;
141 			unsigned long	event_base;
142 			int		event_base_rdpmc;
143 			int		idx;
144 			int		last_cpu;
145 			int		flags;
146 
147 			struct hw_perf_event_extra extra_reg;
148 			struct hw_perf_event_extra branch_reg;
149 		};
150 		struct { /* software */
151 			struct hrtimer	hrtimer;
152 		};
153 		struct { /* tracepoint */
154 			/* for tp_event->class */
155 			struct list_head	tp_list;
156 		};
157 		struct { /* amd_power */
158 			u64	pwr_acc;
159 			u64	ptsc;
160 		};
161 #ifdef CONFIG_HAVE_HW_BREAKPOINT
162 		struct { /* breakpoint */
163 			/*
164 			 * Crufty hack to avoid the chicken and egg
165 			 * problem hw_breakpoint has with context
166 			 * creation and event initalization.
167 			 */
168 			struct arch_hw_breakpoint	info;
169 			struct list_head		bp_list;
170 		};
171 #endif
172 		struct { /* amd_iommu */
173 			u8	iommu_bank;
174 			u8	iommu_cntr;
175 			u16	padding;
176 			u64	conf;
177 			u64	conf1;
178 		};
179 	};
180 	/*
181 	 * If the event is a per task event, this will point to the task in
182 	 * question. See the comment in perf_event_alloc().
183 	 */
184 	struct task_struct		*target;
185 
186 	/*
187 	 * PMU would store hardware filter configuration
188 	 * here.
189 	 */
190 	void				*addr_filters;
191 
192 	/* Last sync'ed generation of filters */
193 	unsigned long			addr_filters_gen;
194 
195 /*
196  * hw_perf_event::state flags; used to track the PERF_EF_* state.
197  */
198 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
199 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
200 #define PERF_HES_ARCH		0x04
201 
202 	int				state;
203 
204 	/*
205 	 * The last observed hardware counter value, updated with a
206 	 * local64_cmpxchg() such that pmu::read() can be called nested.
207 	 */
208 	local64_t			prev_count;
209 
210 	/*
211 	 * The period to start the next sample with.
212 	 */
213 	u64				sample_period;
214 
215 	union {
216 		struct { /* Sampling */
217 			/*
218 			 * The period we started this sample with.
219 			 */
220 			u64				last_period;
221 
222 			/*
223 			 * However much is left of the current period;
224 			 * note that this is a full 64bit value and
225 			 * allows for generation of periods longer
226 			 * than hardware might allow.
227 			 */
228 			local64_t			period_left;
229 		};
230 		struct { /* Topdown events counting for context switch */
231 			u64				saved_metric;
232 			u64				saved_slots;
233 		};
234 	};
235 
236 	/*
237 	 * State for throttling the event, see __perf_event_overflow() and
238 	 * perf_adjust_freq_unthr_context().
239 	 */
240 	u64                             interrupts_seq;
241 	u64				interrupts;
242 
243 	/*
244 	 * State for freq target events, see __perf_event_overflow() and
245 	 * perf_adjust_freq_unthr_context().
246 	 */
247 	u64				freq_time_stamp;
248 	u64				freq_count_stamp;
249 #endif
250 };
251 
252 struct perf_event;
253 
254 /*
255  * Common implementation detail of pmu::{start,commit,cancel}_txn
256  */
257 #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
258 #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
259 
260 /**
261  * pmu::capabilities flags
262  */
263 #define PERF_PMU_CAP_NO_INTERRUPT		0x01
264 #define PERF_PMU_CAP_NO_NMI			0x02
265 #define PERF_PMU_CAP_AUX_NO_SG			0x04
266 #define PERF_PMU_CAP_EXTENDED_REGS		0x08
267 #define PERF_PMU_CAP_EXCLUSIVE			0x10
268 #define PERF_PMU_CAP_ITRACE			0x20
269 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
270 #define PERF_PMU_CAP_NO_EXCLUDE			0x80
271 #define PERF_PMU_CAP_AUX_OUTPUT			0x100
272 
273 struct perf_output_handle;
274 
275 /**
276  * struct pmu - generic performance monitoring unit
277  */
278 struct pmu {
279 	struct list_head		entry;
280 
281 	struct module			*module;
282 	struct device			*dev;
283 	const struct attribute_group	**attr_groups;
284 	const struct attribute_group	**attr_update;
285 	const char			*name;
286 	int				type;
287 
288 	/*
289 	 * various common per-pmu feature flags
290 	 */
291 	int				capabilities;
292 
293 	int __percpu			*pmu_disable_count;
294 	struct perf_cpu_context __percpu *pmu_cpu_context;
295 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
296 	int				task_ctx_nr;
297 	int				hrtimer_interval_ms;
298 
299 	/* number of address filters this PMU can do */
300 	unsigned int			nr_addr_filters;
301 
302 	/*
303 	 * Fully disable/enable this PMU, can be used to protect from the PMI
304 	 * as well as for lazy/batch writing of the MSRs.
305 	 */
306 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
307 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
308 
309 	/*
310 	 * Try and initialize the event for this PMU.
311 	 *
312 	 * Returns:
313 	 *  -ENOENT	-- @event is not for this PMU
314 	 *
315 	 *  -ENODEV	-- @event is for this PMU but PMU not present
316 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
317 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
318 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
319 	 *  -EACCES	-- @event is for this PMU, @event is valid, but no privileges
320 	 *
321 	 *  0		-- @event is for this PMU and valid
322 	 *
323 	 * Other error return values are allowed.
324 	 */
325 	int (*event_init)		(struct perf_event *event);
326 
327 	/*
328 	 * Notification that the event was mapped or unmapped.  Called
329 	 * in the context of the mapping task.
330 	 */
331 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
332 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
333 
334 	/*
335 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
336 	 * matching hw_perf_event::state flags.
337 	 */
338 #define PERF_EF_START	0x01		/* start the counter when adding    */
339 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
340 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
341 
342 	/*
343 	 * Adds/Removes a counter to/from the PMU, can be done inside a
344 	 * transaction, see the ->*_txn() methods.
345 	 *
346 	 * The add/del callbacks will reserve all hardware resources required
347 	 * to service the event, this includes any counter constraint
348 	 * scheduling etc.
349 	 *
350 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
351 	 * is on.
352 	 *
353 	 * ->add() called without PERF_EF_START should result in the same state
354 	 *  as ->add() followed by ->stop().
355 	 *
356 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
357 	 *  ->stop() that must deal with already being stopped without
358 	 *  PERF_EF_UPDATE.
359 	 */
360 	int  (*add)			(struct perf_event *event, int flags);
361 	void (*del)			(struct perf_event *event, int flags);
362 
363 	/*
364 	 * Starts/Stops a counter present on the PMU.
365 	 *
366 	 * The PMI handler should stop the counter when perf_event_overflow()
367 	 * returns !0. ->start() will be used to continue.
368 	 *
369 	 * Also used to change the sample period.
370 	 *
371 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
372 	 * is on -- will be called from NMI context with the PMU generates
373 	 * NMIs.
374 	 *
375 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
376 	 *  period/count values like ->read() would.
377 	 *
378 	 * ->start() with PERF_EF_RELOAD will reprogram the counter
379 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
380 	 */
381 	void (*start)			(struct perf_event *event, int flags);
382 	void (*stop)			(struct perf_event *event, int flags);
383 
384 	/*
385 	 * Updates the counter value of the event.
386 	 *
387 	 * For sampling capable PMUs this will also update the software period
388 	 * hw_perf_event::period_left field.
389 	 */
390 	void (*read)			(struct perf_event *event);
391 
392 	/*
393 	 * Group events scheduling is treated as a transaction, add
394 	 * group events as a whole and perform one schedulability test.
395 	 * If the test fails, roll back the whole group
396 	 *
397 	 * Start the transaction, after this ->add() doesn't need to
398 	 * do schedulability tests.
399 	 *
400 	 * Optional.
401 	 */
402 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
403 	/*
404 	 * If ->start_txn() disabled the ->add() schedulability test
405 	 * then ->commit_txn() is required to perform one. On success
406 	 * the transaction is closed. On error the transaction is kept
407 	 * open until ->cancel_txn() is called.
408 	 *
409 	 * Optional.
410 	 */
411 	int  (*commit_txn)		(struct pmu *pmu);
412 	/*
413 	 * Will cancel the transaction, assumes ->del() is called
414 	 * for each successful ->add() during the transaction.
415 	 *
416 	 * Optional.
417 	 */
418 	void (*cancel_txn)		(struct pmu *pmu);
419 
420 	/*
421 	 * Will return the value for perf_event_mmap_page::index for this event,
422 	 * if no implementation is provided it will default to: event->hw.idx + 1.
423 	 */
424 	int (*event_idx)		(struct perf_event *event); /*optional */
425 
426 	/*
427 	 * context-switches callback
428 	 */
429 	void (*sched_task)		(struct perf_event_context *ctx,
430 					bool sched_in);
431 
432 	/*
433 	 * Kmem cache of PMU specific data
434 	 */
435 	struct kmem_cache		*task_ctx_cache;
436 
437 	/*
438 	 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
439 	 * can be synchronized using this function. See Intel LBR callstack support
440 	 * implementation and Perf core context switch handling callbacks for usage
441 	 * examples.
442 	 */
443 	void (*swap_task_ctx)		(struct perf_event_context *prev,
444 					 struct perf_event_context *next);
445 					/* optional */
446 
447 	/*
448 	 * Set up pmu-private data structures for an AUX area
449 	 */
450 	void *(*setup_aux)		(struct perf_event *event, void **pages,
451 					 int nr_pages, bool overwrite);
452 					/* optional */
453 
454 	/*
455 	 * Free pmu-private AUX data structures
456 	 */
457 	void (*free_aux)		(void *aux); /* optional */
458 
459 	/*
460 	 * Take a snapshot of the AUX buffer without touching the event
461 	 * state, so that preempting ->start()/->stop() callbacks does
462 	 * not interfere with their logic. Called in PMI context.
463 	 *
464 	 * Returns the size of AUX data copied to the output handle.
465 	 *
466 	 * Optional.
467 	 */
468 	long (*snapshot_aux)		(struct perf_event *event,
469 					 struct perf_output_handle *handle,
470 					 unsigned long size);
471 
472 	/*
473 	 * Validate address range filters: make sure the HW supports the
474 	 * requested configuration and number of filters; return 0 if the
475 	 * supplied filters are valid, -errno otherwise.
476 	 *
477 	 * Runs in the context of the ioctl()ing process and is not serialized
478 	 * with the rest of the PMU callbacks.
479 	 */
480 	int (*addr_filters_validate)	(struct list_head *filters);
481 					/* optional */
482 
483 	/*
484 	 * Synchronize address range filter configuration:
485 	 * translate hw-agnostic filters into hardware configuration in
486 	 * event::hw::addr_filters.
487 	 *
488 	 * Runs as a part of filter sync sequence that is done in ->start()
489 	 * callback by calling perf_event_addr_filters_sync().
490 	 *
491 	 * May (and should) traverse event::addr_filters::list, for which its
492 	 * caller provides necessary serialization.
493 	 */
494 	void (*addr_filters_sync)	(struct perf_event *event);
495 					/* optional */
496 
497 	/*
498 	 * Check if event can be used for aux_output purposes for
499 	 * events of this PMU.
500 	 *
501 	 * Runs from perf_event_open(). Should return 0 for "no match"
502 	 * or non-zero for "match".
503 	 */
504 	int (*aux_output_match)		(struct perf_event *event);
505 					/* optional */
506 
507 	/*
508 	 * Filter events for PMU-specific reasons.
509 	 */
510 	int (*filter_match)		(struct perf_event *event); /* optional */
511 
512 	/*
513 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
514 	 */
515 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
516 };
517 
518 enum perf_addr_filter_action_t {
519 	PERF_ADDR_FILTER_ACTION_STOP = 0,
520 	PERF_ADDR_FILTER_ACTION_START,
521 	PERF_ADDR_FILTER_ACTION_FILTER,
522 };
523 
524 /**
525  * struct perf_addr_filter - address range filter definition
526  * @entry:	event's filter list linkage
527  * @path:	object file's path for file-based filters
528  * @offset:	filter range offset
529  * @size:	filter range size (size==0 means single address trigger)
530  * @action:	filter/start/stop
531  *
532  * This is a hardware-agnostic filter configuration as specified by the user.
533  */
534 struct perf_addr_filter {
535 	struct list_head	entry;
536 	struct path		path;
537 	unsigned long		offset;
538 	unsigned long		size;
539 	enum perf_addr_filter_action_t	action;
540 };
541 
542 /**
543  * struct perf_addr_filters_head - container for address range filters
544  * @list:	list of filters for this event
545  * @lock:	spinlock that serializes accesses to the @list and event's
546  *		(and its children's) filter generations.
547  * @nr_file_filters:	number of file-based filters
548  *
549  * A child event will use parent's @list (and therefore @lock), so they are
550  * bundled together; see perf_event_addr_filters().
551  */
552 struct perf_addr_filters_head {
553 	struct list_head	list;
554 	raw_spinlock_t		lock;
555 	unsigned int		nr_file_filters;
556 };
557 
558 struct perf_addr_filter_range {
559 	unsigned long		start;
560 	unsigned long		size;
561 };
562 
563 /**
564  * enum perf_event_state - the states of an event:
565  */
566 enum perf_event_state {
567 	PERF_EVENT_STATE_DEAD		= -4,
568 	PERF_EVENT_STATE_EXIT		= -3,
569 	PERF_EVENT_STATE_ERROR		= -2,
570 	PERF_EVENT_STATE_OFF		= -1,
571 	PERF_EVENT_STATE_INACTIVE	=  0,
572 	PERF_EVENT_STATE_ACTIVE		=  1,
573 };
574 
575 struct file;
576 struct perf_sample_data;
577 
578 typedef void (*perf_overflow_handler_t)(struct perf_event *,
579 					struct perf_sample_data *,
580 					struct pt_regs *regs);
581 
582 /*
583  * Event capabilities. For event_caps and groups caps.
584  *
585  * PERF_EV_CAP_SOFTWARE: Is a software event.
586  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
587  * from any CPU in the package where it is active.
588  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
589  * cannot be a group leader. If an event with this flag is detached from the
590  * group it is scheduled out and moved into an unrecoverable ERROR state.
591  */
592 #define PERF_EV_CAP_SOFTWARE		BIT(0)
593 #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
594 #define PERF_EV_CAP_SIBLING		BIT(2)
595 
596 #define SWEVENT_HLIST_BITS		8
597 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
598 
599 struct swevent_hlist {
600 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
601 	struct rcu_head			rcu_head;
602 };
603 
604 #define PERF_ATTACH_CONTEXT	0x01
605 #define PERF_ATTACH_GROUP	0x02
606 #define PERF_ATTACH_TASK	0x04
607 #define PERF_ATTACH_TASK_DATA	0x08
608 #define PERF_ATTACH_ITRACE	0x10
609 #define PERF_ATTACH_SCHED_CB	0x20
610 
611 struct perf_cgroup;
612 struct perf_buffer;
613 
614 struct pmu_event_list {
615 	raw_spinlock_t		lock;
616 	struct list_head	list;
617 };
618 
619 #define for_each_sibling_event(sibling, event)			\
620 	if ((event)->group_leader == (event))			\
621 		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
622 
623 /**
624  * struct perf_event - performance event kernel representation:
625  */
626 struct perf_event {
627 #ifdef CONFIG_PERF_EVENTS
628 	/*
629 	 * entry onto perf_event_context::event_list;
630 	 *   modifications require ctx->lock
631 	 *   RCU safe iterations.
632 	 */
633 	struct list_head		event_entry;
634 
635 	/*
636 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
637 	 * either sufficies for read.
638 	 */
639 	struct list_head		sibling_list;
640 	struct list_head		active_list;
641 	/*
642 	 * Node on the pinned or flexible tree located at the event context;
643 	 */
644 	struct rb_node			group_node;
645 	u64				group_index;
646 	/*
647 	 * We need storage to track the entries in perf_pmu_migrate_context; we
648 	 * cannot use the event_entry because of RCU and we want to keep the
649 	 * group in tact which avoids us using the other two entries.
650 	 */
651 	struct list_head		migrate_entry;
652 
653 	struct hlist_node		hlist_entry;
654 	struct list_head		active_entry;
655 	int				nr_siblings;
656 
657 	/* Not serialized. Only written during event initialization. */
658 	int				event_caps;
659 	/* The cumulative AND of all event_caps for events in this group. */
660 	int				group_caps;
661 
662 	struct perf_event		*group_leader;
663 	struct pmu			*pmu;
664 	void				*pmu_private;
665 
666 	enum perf_event_state		state;
667 	unsigned int			attach_state;
668 	local64_t			count;
669 	atomic64_t			child_count;
670 
671 	/*
672 	 * These are the total time in nanoseconds that the event
673 	 * has been enabled (i.e. eligible to run, and the task has
674 	 * been scheduled in, if this is a per-task event)
675 	 * and running (scheduled onto the CPU), respectively.
676 	 */
677 	u64				total_time_enabled;
678 	u64				total_time_running;
679 	u64				tstamp;
680 
681 	/*
682 	 * timestamp shadows the actual context timing but it can
683 	 * be safely used in NMI interrupt context. It reflects the
684 	 * context time as it was when the event was last scheduled in,
685 	 * or when ctx_sched_in failed to schedule the event because we
686 	 * run out of PMC.
687 	 *
688 	 * ctx_time already accounts for ctx->timestamp. Therefore to
689 	 * compute ctx_time for a sample, simply add perf_clock().
690 	 */
691 	u64				shadow_ctx_time;
692 
693 	struct perf_event_attr		attr;
694 	u16				header_size;
695 	u16				id_header_size;
696 	u16				read_size;
697 	struct hw_perf_event		hw;
698 
699 	struct perf_event_context	*ctx;
700 	atomic_long_t			refcount;
701 
702 	/*
703 	 * These accumulate total time (in nanoseconds) that children
704 	 * events have been enabled and running, respectively.
705 	 */
706 	atomic64_t			child_total_time_enabled;
707 	atomic64_t			child_total_time_running;
708 
709 	/*
710 	 * Protect attach/detach and child_list:
711 	 */
712 	struct mutex			child_mutex;
713 	struct list_head		child_list;
714 	struct perf_event		*parent;
715 
716 	int				oncpu;
717 	int				cpu;
718 
719 	struct list_head		owner_entry;
720 	struct task_struct		*owner;
721 
722 	/* mmap bits */
723 	struct mutex			mmap_mutex;
724 	atomic_t			mmap_count;
725 
726 	struct perf_buffer		*rb;
727 	struct list_head		rb_entry;
728 	unsigned long			rcu_batches;
729 	int				rcu_pending;
730 
731 	/* poll related */
732 	wait_queue_head_t		waitq;
733 	struct fasync_struct		*fasync;
734 
735 	/* delayed work for NMIs and such */
736 	int				pending_wakeup;
737 	int				pending_kill;
738 	int				pending_disable;
739 	struct irq_work			pending;
740 
741 	atomic_t			event_limit;
742 
743 	/* address range filters */
744 	struct perf_addr_filters_head	addr_filters;
745 	/* vma address array for file-based filders */
746 	struct perf_addr_filter_range	*addr_filter_ranges;
747 	unsigned long			addr_filters_gen;
748 
749 	/* for aux_output events */
750 	struct perf_event		*aux_event;
751 
752 	void (*destroy)(struct perf_event *);
753 	struct rcu_head			rcu_head;
754 
755 	struct pid_namespace		*ns;
756 	u64				id;
757 
758 	u64				(*clock)(void);
759 	perf_overflow_handler_t		overflow_handler;
760 	void				*overflow_handler_context;
761 #ifdef CONFIG_BPF_SYSCALL
762 	perf_overflow_handler_t		orig_overflow_handler;
763 	struct bpf_prog			*prog;
764 #endif
765 
766 #ifdef CONFIG_EVENT_TRACING
767 	struct trace_event_call		*tp_event;
768 	struct event_filter		*filter;
769 #ifdef CONFIG_FUNCTION_TRACER
770 	struct ftrace_ops               ftrace_ops;
771 #endif
772 #endif
773 
774 #ifdef CONFIG_CGROUP_PERF
775 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
776 #endif
777 
778 #ifdef CONFIG_SECURITY
779 	void *security;
780 #endif
781 	struct list_head		sb_list;
782 #endif /* CONFIG_PERF_EVENTS */
783 };
784 
785 
786 struct perf_event_groups {
787 	struct rb_root	tree;
788 	u64		index;
789 };
790 
791 /**
792  * struct perf_event_context - event context structure
793  *
794  * Used as a container for task events and CPU events as well:
795  */
796 struct perf_event_context {
797 	struct pmu			*pmu;
798 	/*
799 	 * Protect the states of the events in the list,
800 	 * nr_active, and the list:
801 	 */
802 	raw_spinlock_t			lock;
803 	/*
804 	 * Protect the list of events.  Locking either mutex or lock
805 	 * is sufficient to ensure the list doesn't change; to change
806 	 * the list you need to lock both the mutex and the spinlock.
807 	 */
808 	struct mutex			mutex;
809 
810 	struct list_head		active_ctx_list;
811 	struct perf_event_groups	pinned_groups;
812 	struct perf_event_groups	flexible_groups;
813 	struct list_head		event_list;
814 
815 	struct list_head		pinned_active;
816 	struct list_head		flexible_active;
817 
818 	int				nr_events;
819 	int				nr_active;
820 	int				is_active;
821 	int				nr_stat;
822 	int				nr_freq;
823 	int				rotate_disable;
824 	/*
825 	 * Set when nr_events != nr_active, except tolerant to events not
826 	 * necessary to be active due to scheduling constraints, such as cgroups.
827 	 */
828 	int				rotate_necessary;
829 	refcount_t			refcount;
830 	struct task_struct		*task;
831 
832 	/*
833 	 * Context clock, runs when context enabled.
834 	 */
835 	u64				time;
836 	u64				timestamp;
837 
838 	/*
839 	 * These fields let us detect when two contexts have both
840 	 * been cloned (inherited) from a common ancestor.
841 	 */
842 	struct perf_event_context	*parent_ctx;
843 	u64				parent_gen;
844 	u64				generation;
845 	int				pin_count;
846 #ifdef CONFIG_CGROUP_PERF
847 	int				nr_cgroups;	 /* cgroup evts */
848 #endif
849 	void				*task_ctx_data; /* pmu specific data */
850 	struct rcu_head			rcu_head;
851 };
852 
853 /*
854  * Number of contexts where an event can trigger:
855  *	task, softirq, hardirq, nmi.
856  */
857 #define PERF_NR_CONTEXTS	4
858 
859 /**
860  * struct perf_event_cpu_context - per cpu event context structure
861  */
862 struct perf_cpu_context {
863 	struct perf_event_context	ctx;
864 	struct perf_event_context	*task_ctx;
865 	int				active_oncpu;
866 	int				exclusive;
867 
868 	raw_spinlock_t			hrtimer_lock;
869 	struct hrtimer			hrtimer;
870 	ktime_t				hrtimer_interval;
871 	unsigned int			hrtimer_active;
872 
873 #ifdef CONFIG_CGROUP_PERF
874 	struct perf_cgroup		*cgrp;
875 	struct list_head		cgrp_cpuctx_entry;
876 #endif
877 
878 	struct list_head		sched_cb_entry;
879 	int				sched_cb_usage;
880 
881 	int				online;
882 	/*
883 	 * Per-CPU storage for iterators used in visit_groups_merge. The default
884 	 * storage is of size 2 to hold the CPU and any CPU event iterators.
885 	 */
886 	int				heap_size;
887 	struct perf_event		**heap;
888 	struct perf_event		*heap_default[2];
889 };
890 
891 struct perf_output_handle {
892 	struct perf_event		*event;
893 	struct perf_buffer		*rb;
894 	unsigned long			wakeup;
895 	unsigned long			size;
896 	u64				aux_flags;
897 	union {
898 		void			*addr;
899 		unsigned long		head;
900 	};
901 	int				page;
902 };
903 
904 struct bpf_perf_event_data_kern {
905 	bpf_user_pt_regs_t *regs;
906 	struct perf_sample_data *data;
907 	struct perf_event *event;
908 };
909 
910 #ifdef CONFIG_CGROUP_PERF
911 
912 /*
913  * perf_cgroup_info keeps track of time_enabled for a cgroup.
914  * This is a per-cpu dynamically allocated data structure.
915  */
916 struct perf_cgroup_info {
917 	u64				time;
918 	u64				timestamp;
919 };
920 
921 struct perf_cgroup {
922 	struct cgroup_subsys_state	css;
923 	struct perf_cgroup_info	__percpu *info;
924 };
925 
926 /*
927  * Must ensure cgroup is pinned (css_get) before calling
928  * this function. In other words, we cannot call this function
929  * if there is no cgroup event for the current CPU context.
930  */
931 static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct * task,struct perf_event_context * ctx)932 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
933 {
934 	return container_of(task_css_check(task, perf_event_cgrp_id,
935 					   ctx ? lockdep_is_held(&ctx->lock)
936 					       : true),
937 			    struct perf_cgroup, css);
938 }
939 #endif /* CONFIG_CGROUP_PERF */
940 
941 #ifdef CONFIG_PERF_EVENTS
942 
943 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
944 				   struct perf_event *event);
945 extern void perf_aux_output_end(struct perf_output_handle *handle,
946 				unsigned long size);
947 extern int perf_aux_output_skip(struct perf_output_handle *handle,
948 				unsigned long size);
949 extern void *perf_get_aux(struct perf_output_handle *handle);
950 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
951 extern void perf_event_itrace_started(struct perf_event *event);
952 
953 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
954 extern void perf_pmu_unregister(struct pmu *pmu);
955 
956 extern int perf_num_counters(void);
957 extern const char *perf_pmu_name(void);
958 extern void __perf_event_task_sched_in(struct task_struct *prev,
959 				       struct task_struct *task);
960 extern void __perf_event_task_sched_out(struct task_struct *prev,
961 					struct task_struct *next);
962 extern int perf_event_init_task(struct task_struct *child);
963 extern void perf_event_exit_task(struct task_struct *child);
964 extern void perf_event_free_task(struct task_struct *task);
965 extern void perf_event_delayed_put(struct task_struct *task);
966 extern struct file *perf_event_get(unsigned int fd);
967 extern const struct perf_event *perf_get_event(struct file *file);
968 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
969 extern void perf_event_print_debug(void);
970 extern void perf_pmu_disable(struct pmu *pmu);
971 extern void perf_pmu_enable(struct pmu *pmu);
972 extern void perf_sched_cb_dec(struct pmu *pmu);
973 extern void perf_sched_cb_inc(struct pmu *pmu);
974 extern int perf_event_task_disable(void);
975 extern int perf_event_task_enable(void);
976 
977 extern void perf_pmu_resched(struct pmu *pmu);
978 
979 extern int perf_event_refresh(struct perf_event *event, int refresh);
980 extern void perf_event_update_userpage(struct perf_event *event);
981 extern int perf_event_release_kernel(struct perf_event *event);
982 extern struct perf_event *
983 perf_event_create_kernel_counter(struct perf_event_attr *attr,
984 				int cpu,
985 				struct task_struct *task,
986 				perf_overflow_handler_t callback,
987 				void *context);
988 extern void perf_pmu_migrate_context(struct pmu *pmu,
989 				int src_cpu, int dst_cpu);
990 int perf_event_read_local(struct perf_event *event, u64 *value,
991 			  u64 *enabled, u64 *running);
992 extern u64 perf_event_read_value(struct perf_event *event,
993 				 u64 *enabled, u64 *running);
994 
995 
996 struct perf_sample_data {
997 	/*
998 	 * Fields set by perf_sample_data_init(), group so as to
999 	 * minimize the cachelines touched.
1000 	 */
1001 	u64				addr;
1002 	struct perf_raw_record		*raw;
1003 	struct perf_branch_stack	*br_stack;
1004 	u64				period;
1005 	u64				weight;
1006 	u64				txn;
1007 	union  perf_mem_data_src	data_src;
1008 
1009 	/*
1010 	 * The other fields, optionally {set,used} by
1011 	 * perf_{prepare,output}_sample().
1012 	 */
1013 	u64				type;
1014 	u64				ip;
1015 	struct {
1016 		u32	pid;
1017 		u32	tid;
1018 	}				tid_entry;
1019 	u64				time;
1020 	u64				id;
1021 	u64				stream_id;
1022 	struct {
1023 		u32	cpu;
1024 		u32	reserved;
1025 	}				cpu_entry;
1026 	struct perf_callchain_entry	*callchain;
1027 	u64				aux_size;
1028 
1029 	struct perf_regs		regs_user;
1030 	struct perf_regs		regs_intr;
1031 	u64				stack_user_size;
1032 
1033 	u64				phys_addr;
1034 	u64				cgroup;
1035 } ____cacheline_aligned;
1036 
1037 /* default value for data source */
1038 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1039 		    PERF_MEM_S(LVL, NA)   |\
1040 		    PERF_MEM_S(SNOOP, NA) |\
1041 		    PERF_MEM_S(LOCK, NA)  |\
1042 		    PERF_MEM_S(TLB, NA))
1043 
perf_sample_data_init(struct perf_sample_data * data,u64 addr,u64 period)1044 static inline void perf_sample_data_init(struct perf_sample_data *data,
1045 					 u64 addr, u64 period)
1046 {
1047 	/* remaining struct members initialized in perf_prepare_sample() */
1048 	data->addr = addr;
1049 	data->raw  = NULL;
1050 	data->br_stack = NULL;
1051 	data->period = period;
1052 	data->weight = 0;
1053 	data->data_src.val = PERF_MEM_NA;
1054 	data->txn = 0;
1055 }
1056 
1057 extern void perf_output_sample(struct perf_output_handle *handle,
1058 			       struct perf_event_header *header,
1059 			       struct perf_sample_data *data,
1060 			       struct perf_event *event);
1061 extern void perf_prepare_sample(struct perf_event_header *header,
1062 				struct perf_sample_data *data,
1063 				struct perf_event *event,
1064 				struct pt_regs *regs);
1065 
1066 extern int perf_event_overflow(struct perf_event *event,
1067 				 struct perf_sample_data *data,
1068 				 struct pt_regs *regs);
1069 
1070 extern void perf_event_output_forward(struct perf_event *event,
1071 				     struct perf_sample_data *data,
1072 				     struct pt_regs *regs);
1073 extern void perf_event_output_backward(struct perf_event *event,
1074 				       struct perf_sample_data *data,
1075 				       struct pt_regs *regs);
1076 extern int perf_event_output(struct perf_event *event,
1077 			     struct perf_sample_data *data,
1078 			     struct pt_regs *regs);
1079 
1080 static inline bool
is_default_overflow_handler(struct perf_event * event)1081 is_default_overflow_handler(struct perf_event *event)
1082 {
1083 	if (likely(event->overflow_handler == perf_event_output_forward))
1084 		return true;
1085 	if (unlikely(event->overflow_handler == perf_event_output_backward))
1086 		return true;
1087 	return false;
1088 }
1089 
1090 extern void
1091 perf_event_header__init_id(struct perf_event_header *header,
1092 			   struct perf_sample_data *data,
1093 			   struct perf_event *event);
1094 extern void
1095 perf_event__output_id_sample(struct perf_event *event,
1096 			     struct perf_output_handle *handle,
1097 			     struct perf_sample_data *sample);
1098 
1099 extern void
1100 perf_log_lost_samples(struct perf_event *event, u64 lost);
1101 
event_has_any_exclude_flag(struct perf_event * event)1102 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1103 {
1104 	struct perf_event_attr *attr = &event->attr;
1105 
1106 	return attr->exclude_idle || attr->exclude_user ||
1107 	       attr->exclude_kernel || attr->exclude_hv ||
1108 	       attr->exclude_guest || attr->exclude_host;
1109 }
1110 
is_sampling_event(struct perf_event * event)1111 static inline bool is_sampling_event(struct perf_event *event)
1112 {
1113 	return event->attr.sample_period != 0;
1114 }
1115 
1116 /*
1117  * Return 1 for a software event, 0 for a hardware event
1118  */
is_software_event(struct perf_event * event)1119 static inline int is_software_event(struct perf_event *event)
1120 {
1121 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1122 }
1123 
1124 /*
1125  * Return 1 for event in sw context, 0 for event in hw context
1126  */
in_software_context(struct perf_event * event)1127 static inline int in_software_context(struct perf_event *event)
1128 {
1129 	return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1130 }
1131 
is_exclusive_pmu(struct pmu * pmu)1132 static inline int is_exclusive_pmu(struct pmu *pmu)
1133 {
1134 	return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1135 }
1136 
1137 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1138 
1139 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1140 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1141 
1142 #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1143 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1144 #endif
1145 
1146 /*
1147  * When generating a perf sample in-line, instead of from an interrupt /
1148  * exception, we lack a pt_regs. This is typically used from software events
1149  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1150  *
1151  * We typically don't need a full set, but (for x86) do require:
1152  * - ip for PERF_SAMPLE_IP
1153  * - cs for user_mode() tests
1154  * - sp for PERF_SAMPLE_CALLCHAIN
1155  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1156  *
1157  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1158  * things like PERF_SAMPLE_REGS_INTR.
1159  */
perf_fetch_caller_regs(struct pt_regs * regs)1160 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1161 {
1162 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1163 }
1164 
1165 static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1166 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1167 {
1168 	if (static_key_false(&perf_swevent_enabled[event_id]))
1169 		__perf_sw_event(event_id, nr, regs, addr);
1170 }
1171 
1172 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1173 
1174 /*
1175  * 'Special' version for the scheduler, it hard assumes no recursion,
1176  * which is guaranteed by us not actually scheduling inside other swevents
1177  * because those disable preemption.
1178  */
1179 static __always_inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1180 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1181 {
1182 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1183 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1184 
1185 		perf_fetch_caller_regs(regs);
1186 		___perf_sw_event(event_id, nr, regs, addr);
1187 	}
1188 }
1189 
1190 extern struct static_key_false perf_sched_events;
1191 
1192 static __always_inline bool
perf_sw_migrate_enabled(void)1193 perf_sw_migrate_enabled(void)
1194 {
1195 	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1196 		return true;
1197 	return false;
1198 }
1199 
perf_event_task_migrate(struct task_struct * task)1200 static inline void perf_event_task_migrate(struct task_struct *task)
1201 {
1202 	if (perf_sw_migrate_enabled())
1203 		task->sched_migrated = 1;
1204 }
1205 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1206 static inline void perf_event_task_sched_in(struct task_struct *prev,
1207 					    struct task_struct *task)
1208 {
1209 	if (static_branch_unlikely(&perf_sched_events))
1210 		__perf_event_task_sched_in(prev, task);
1211 
1212 	if (perf_sw_migrate_enabled() && task->sched_migrated) {
1213 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1214 
1215 		perf_fetch_caller_regs(regs);
1216 		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1217 		task->sched_migrated = 0;
1218 	}
1219 }
1220 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1221 static inline void perf_event_task_sched_out(struct task_struct *prev,
1222 					     struct task_struct *next)
1223 {
1224 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1225 
1226 	if (static_branch_unlikely(&perf_sched_events))
1227 		__perf_event_task_sched_out(prev, next);
1228 }
1229 
1230 extern void perf_event_mmap(struct vm_area_struct *vma);
1231 
1232 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1233 			       bool unregister, const char *sym);
1234 extern void perf_event_bpf_event(struct bpf_prog *prog,
1235 				 enum perf_bpf_event_type type,
1236 				 u16 flags);
1237 
1238 extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
perf_get_guest_cbs(void)1239 static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
1240 {
1241 	/*
1242 	 * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
1243 	 * the callbacks between a !NULL check and dereferences, to ensure
1244 	 * pending stores/changes to the callback pointers are visible before a
1245 	 * non-NULL perf_guest_cbs is visible to readers, and to prevent a
1246 	 * module from unloading callbacks while readers are active.
1247 	 */
1248 	return rcu_dereference(perf_guest_cbs);
1249 }
1250 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1251 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1252 
1253 extern void perf_event_exec(void);
1254 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1255 extern void perf_event_namespaces(struct task_struct *tsk);
1256 extern void perf_event_fork(struct task_struct *tsk);
1257 extern void perf_event_text_poke(const void *addr,
1258 				 const void *old_bytes, size_t old_len,
1259 				 const void *new_bytes, size_t new_len);
1260 
1261 /* Callchains */
1262 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1263 
1264 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1265 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1266 extern struct perf_callchain_entry *
1267 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1268 		   u32 max_stack, bool crosstask, bool add_mark);
1269 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1270 extern int get_callchain_buffers(int max_stack);
1271 extern void put_callchain_buffers(void);
1272 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1273 extern void put_callchain_entry(int rctx);
1274 
1275 extern int sysctl_perf_event_max_stack;
1276 extern int sysctl_perf_event_max_contexts_per_stack;
1277 
perf_callchain_store_context(struct perf_callchain_entry_ctx * ctx,u64 ip)1278 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1279 {
1280 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1281 		struct perf_callchain_entry *entry = ctx->entry;
1282 		entry->ip[entry->nr++] = ip;
1283 		++ctx->contexts;
1284 		return 0;
1285 	} else {
1286 		ctx->contexts_maxed = true;
1287 		return -1; /* no more room, stop walking the stack */
1288 	}
1289 }
1290 
perf_callchain_store(struct perf_callchain_entry_ctx * ctx,u64 ip)1291 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1292 {
1293 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1294 		struct perf_callchain_entry *entry = ctx->entry;
1295 		entry->ip[entry->nr++] = ip;
1296 		++ctx->nr;
1297 		return 0;
1298 	} else {
1299 		return -1; /* no more room, stop walking the stack */
1300 	}
1301 }
1302 
1303 extern int sysctl_perf_event_paranoid;
1304 extern int sysctl_perf_event_mlock;
1305 extern int sysctl_perf_event_sample_rate;
1306 extern int sysctl_perf_cpu_time_max_percent;
1307 
1308 extern void perf_sample_event_took(u64 sample_len_ns);
1309 
1310 int perf_proc_update_handler(struct ctl_table *table, int write,
1311 		void *buffer, size_t *lenp, loff_t *ppos);
1312 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1313 		void *buffer, size_t *lenp, loff_t *ppos);
1314 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1315 		void *buffer, size_t *lenp, loff_t *ppos);
1316 
1317 /* Access to perf_event_open(2) syscall. */
1318 #define PERF_SECURITY_OPEN		0
1319 
1320 /* Finer grained perf_event_open(2) access control. */
1321 #define PERF_SECURITY_CPU		1
1322 #define PERF_SECURITY_KERNEL		2
1323 #define PERF_SECURITY_TRACEPOINT	3
1324 
perf_is_paranoid(void)1325 static inline int perf_is_paranoid(void)
1326 {
1327 	return sysctl_perf_event_paranoid > -1;
1328 }
1329 
perf_allow_kernel(struct perf_event_attr * attr)1330 static inline int perf_allow_kernel(struct perf_event_attr *attr)
1331 {
1332 	if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1333 		return -EACCES;
1334 
1335 	return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1336 }
1337 
perf_allow_cpu(struct perf_event_attr * attr)1338 static inline int perf_allow_cpu(struct perf_event_attr *attr)
1339 {
1340 	if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1341 		return -EACCES;
1342 
1343 	return security_perf_event_open(attr, PERF_SECURITY_CPU);
1344 }
1345 
perf_allow_tracepoint(struct perf_event_attr * attr)1346 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1347 {
1348 	if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1349 		return -EPERM;
1350 
1351 	return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1352 }
1353 
1354 extern void perf_event_init(void);
1355 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1356 			  int entry_size, struct pt_regs *regs,
1357 			  struct hlist_head *head, int rctx,
1358 			  struct task_struct *task);
1359 extern void perf_bp_event(struct perf_event *event, void *data);
1360 
1361 #ifndef perf_misc_flags
1362 # define perf_misc_flags(regs) \
1363 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1364 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1365 #endif
1366 #ifndef perf_arch_bpf_user_pt_regs
1367 # define perf_arch_bpf_user_pt_regs(regs) regs
1368 #endif
1369 
has_branch_stack(struct perf_event * event)1370 static inline bool has_branch_stack(struct perf_event *event)
1371 {
1372 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1373 }
1374 
needs_branch_stack(struct perf_event * event)1375 static inline bool needs_branch_stack(struct perf_event *event)
1376 {
1377 	return event->attr.branch_sample_type != 0;
1378 }
1379 
has_aux(struct perf_event * event)1380 static inline bool has_aux(struct perf_event *event)
1381 {
1382 	return event->pmu->setup_aux;
1383 }
1384 
is_write_backward(struct perf_event * event)1385 static inline bool is_write_backward(struct perf_event *event)
1386 {
1387 	return !!event->attr.write_backward;
1388 }
1389 
has_addr_filter(struct perf_event * event)1390 static inline bool has_addr_filter(struct perf_event *event)
1391 {
1392 	return event->pmu->nr_addr_filters;
1393 }
1394 
1395 /*
1396  * An inherited event uses parent's filters
1397  */
1398 static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event * event)1399 perf_event_addr_filters(struct perf_event *event)
1400 {
1401 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1402 
1403 	if (event->parent)
1404 		ifh = &event->parent->addr_filters;
1405 
1406 	return ifh;
1407 }
1408 
1409 extern void perf_event_addr_filters_sync(struct perf_event *event);
1410 
1411 extern int perf_output_begin(struct perf_output_handle *handle,
1412 			     struct perf_sample_data *data,
1413 			     struct perf_event *event, unsigned int size);
1414 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1415 				     struct perf_sample_data *data,
1416 				     struct perf_event *event,
1417 				     unsigned int size);
1418 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1419 				      struct perf_sample_data *data,
1420 				      struct perf_event *event,
1421 				      unsigned int size);
1422 
1423 extern void perf_output_end(struct perf_output_handle *handle);
1424 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1425 			     const void *buf, unsigned int len);
1426 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1427 				     unsigned int len);
1428 extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1429 				 struct perf_output_handle *handle,
1430 				 unsigned long from, unsigned long to);
1431 extern int perf_swevent_get_recursion_context(void);
1432 extern void perf_swevent_put_recursion_context(int rctx);
1433 extern u64 perf_swevent_set_period(struct perf_event *event);
1434 extern void perf_event_enable(struct perf_event *event);
1435 extern void perf_event_disable(struct perf_event *event);
1436 extern void perf_event_disable_local(struct perf_event *event);
1437 extern void perf_event_disable_inatomic(struct perf_event *event);
1438 extern void perf_event_task_tick(void);
1439 extern int perf_event_account_interrupt(struct perf_event *event);
1440 extern int perf_event_period(struct perf_event *event, u64 value);
1441 extern u64 perf_event_pause(struct perf_event *event, bool reset);
1442 #else /* !CONFIG_PERF_EVENTS: */
1443 static inline void *
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)1444 perf_aux_output_begin(struct perf_output_handle *handle,
1445 		      struct perf_event *event)				{ return NULL; }
1446 static inline void
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)1447 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1448 									{ }
1449 static inline int
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)1450 perf_aux_output_skip(struct perf_output_handle *handle,
1451 		     unsigned long size)				{ return -EINVAL; }
1452 static inline void *
perf_get_aux(struct perf_output_handle * handle)1453 perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1454 static inline void
perf_event_task_migrate(struct task_struct * task)1455 perf_event_task_migrate(struct task_struct *task)			{ }
1456 static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1457 perf_event_task_sched_in(struct task_struct *prev,
1458 			 struct task_struct *task)			{ }
1459 static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1460 perf_event_task_sched_out(struct task_struct *prev,
1461 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child)1462 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
perf_event_exit_task(struct task_struct * child)1463 static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1464 static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1465 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_get(unsigned int fd)1466 static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
perf_get_event(struct file * file)1467 static inline const struct perf_event *perf_get_event(struct file *file)
1468 {
1469 	return ERR_PTR(-EINVAL);
1470 }
perf_event_attrs(struct perf_event * event)1471 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1472 {
1473 	return ERR_PTR(-EINVAL);
1474 }
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)1475 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1476 					u64 *enabled, u64 *running)
1477 {
1478 	return -EINVAL;
1479 }
perf_event_print_debug(void)1480 static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1481 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1482 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1483 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1484 {
1485 	return -EINVAL;
1486 }
1487 
1488 static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1489 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1490 static inline void
perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1491 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
1492 static inline void
perf_bp_event(struct perf_event * event,void * data)1493 perf_bp_event(struct perf_event *event, void *data)			{ }
1494 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1495 static inline int perf_register_guest_info_callbacks
1496 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1497 static inline int perf_unregister_guest_info_callbacks
1498 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1499 
perf_event_mmap(struct vm_area_struct * vma)1500 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1501 
1502 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)1503 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1504 				      bool unregister, const char *sym)	{ }
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)1505 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1506 					enum perf_bpf_event_type type,
1507 					u16 flags)			{ }
perf_event_exec(void)1508 static inline void perf_event_exec(void)				{ }
perf_event_comm(struct task_struct * tsk,bool exec)1509 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
perf_event_namespaces(struct task_struct * tsk)1510 static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
perf_event_fork(struct task_struct * tsk)1511 static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)1512 static inline void perf_event_text_poke(const void *addr,
1513 					const void *old_bytes,
1514 					size_t old_len,
1515 					const void *new_bytes,
1516 					size_t new_len)			{ }
perf_event_init(void)1517 static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1518 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1519 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_swevent_set_period(struct perf_event * event)1520 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
perf_event_enable(struct perf_event * event)1521 static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1522 static inline void perf_event_disable(struct perf_event *event)		{ }
__perf_event_disable(void * info)1523 static inline int __perf_event_disable(void *info)			{ return -1; }
perf_event_task_tick(void)1524 static inline void perf_event_task_tick(void)				{ }
perf_event_release_kernel(struct perf_event * event)1525 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
perf_event_period(struct perf_event * event,u64 value)1526 static inline int perf_event_period(struct perf_event *event, u64 value)
1527 {
1528 	return -EINVAL;
1529 }
perf_event_pause(struct perf_event * event,bool reset)1530 static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1531 {
1532 	return 0;
1533 }
1534 #endif
1535 
1536 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1537 extern void perf_restore_debug_store(void);
1538 #else
perf_restore_debug_store(void)1539 static inline void perf_restore_debug_store(void)			{ }
1540 #endif
1541 
perf_raw_frag_last(const struct perf_raw_frag * frag)1542 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1543 {
1544 	return frag->pad < sizeof(u64);
1545 }
1546 
1547 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1548 
1549 struct perf_pmu_events_attr {
1550 	struct device_attribute attr;
1551 	u64 id;
1552 	const char *event_str;
1553 };
1554 
1555 struct perf_pmu_events_ht_attr {
1556 	struct device_attribute			attr;
1557 	u64					id;
1558 	const char				*event_str_ht;
1559 	const char				*event_str_noht;
1560 };
1561 
1562 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1563 			      char *page);
1564 
1565 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1566 static struct perf_pmu_events_attr _var = {				\
1567 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1568 	.id   =  _id,							\
1569 };
1570 
1571 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1572 static struct perf_pmu_events_attr _var = {				    \
1573 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1574 	.id		= 0,						    \
1575 	.event_str	= _str,						    \
1576 };
1577 
1578 #define PMU_FORMAT_ATTR(_name, _format)					\
1579 static ssize_t								\
1580 _name##_show(struct device *dev,					\
1581 			       struct device_attribute *attr,		\
1582 			       char *page)				\
1583 {									\
1584 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1585 	return sprintf(page, _format "\n");				\
1586 }									\
1587 									\
1588 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1589 
1590 /* Performance counter hotplug functions */
1591 #ifdef CONFIG_PERF_EVENTS
1592 int perf_event_init_cpu(unsigned int cpu);
1593 int perf_event_exit_cpu(unsigned int cpu);
1594 #else
1595 #define perf_event_init_cpu	NULL
1596 #define perf_event_exit_cpu	NULL
1597 #endif
1598 
1599 extern void __weak arch_perf_update_userpage(struct perf_event *event,
1600 					     struct perf_event_mmap_page *userpg,
1601 					     u64 now);
1602 
1603 #endif /* _LINUX_PERF_EVENT_H */
1604