xref: /OK3568_Linux_fs/kernel/tools/perf/builtin-sched.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include "builtin.h"
3*4882a593Smuzhiyun #include "perf.h"
4*4882a593Smuzhiyun #include "perf-sys.h"
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include "util/cpumap.h"
7*4882a593Smuzhiyun #include "util/evlist.h"
8*4882a593Smuzhiyun #include "util/evsel.h"
9*4882a593Smuzhiyun #include "util/evsel_fprintf.h"
10*4882a593Smuzhiyun #include "util/symbol.h"
11*4882a593Smuzhiyun #include "util/thread.h"
12*4882a593Smuzhiyun #include "util/header.h"
13*4882a593Smuzhiyun #include "util/session.h"
14*4882a593Smuzhiyun #include "util/tool.h"
15*4882a593Smuzhiyun #include "util/cloexec.h"
16*4882a593Smuzhiyun #include "util/thread_map.h"
17*4882a593Smuzhiyun #include "util/color.h"
18*4882a593Smuzhiyun #include "util/stat.h"
19*4882a593Smuzhiyun #include "util/string2.h"
20*4882a593Smuzhiyun #include "util/callchain.h"
21*4882a593Smuzhiyun #include "util/time-utils.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <subcmd/pager.h>
24*4882a593Smuzhiyun #include <subcmd/parse-options.h>
25*4882a593Smuzhiyun #include "util/trace-event.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "util/debug.h"
28*4882a593Smuzhiyun #include "util/event.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/kernel.h>
31*4882a593Smuzhiyun #include <linux/log2.h>
32*4882a593Smuzhiyun #include <linux/zalloc.h>
33*4882a593Smuzhiyun #include <sys/prctl.h>
34*4882a593Smuzhiyun #include <sys/resource.h>
35*4882a593Smuzhiyun #include <inttypes.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <errno.h>
38*4882a593Smuzhiyun #include <semaphore.h>
39*4882a593Smuzhiyun #include <pthread.h>
40*4882a593Smuzhiyun #include <math.h>
41*4882a593Smuzhiyun #include <api/fs/fs.h>
42*4882a593Smuzhiyun #include <perf/cpumap.h>
43*4882a593Smuzhiyun #include <linux/time64.h>
44*4882a593Smuzhiyun #include <linux/err.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include <linux/ctype.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define PR_SET_NAME		15               /* Set process name */
49*4882a593Smuzhiyun #define MAX_CPUS		4096
50*4882a593Smuzhiyun #define COMM_LEN		20
51*4882a593Smuzhiyun #define SYM_LEN			129
52*4882a593Smuzhiyun #define MAX_PID			1024000
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static const char *cpu_list;
55*4882a593Smuzhiyun static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun struct sched_atom;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun struct task_desc {
60*4882a593Smuzhiyun 	unsigned long		nr;
61*4882a593Smuzhiyun 	unsigned long		pid;
62*4882a593Smuzhiyun 	char			comm[COMM_LEN];
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	unsigned long		nr_events;
65*4882a593Smuzhiyun 	unsigned long		curr_event;
66*4882a593Smuzhiyun 	struct sched_atom	**atoms;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	pthread_t		thread;
69*4882a593Smuzhiyun 	sem_t			sleep_sem;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	sem_t			ready_for_work;
72*4882a593Smuzhiyun 	sem_t			work_done_sem;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	u64			cpu_usage;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun enum sched_event_type {
78*4882a593Smuzhiyun 	SCHED_EVENT_RUN,
79*4882a593Smuzhiyun 	SCHED_EVENT_SLEEP,
80*4882a593Smuzhiyun 	SCHED_EVENT_WAKEUP,
81*4882a593Smuzhiyun 	SCHED_EVENT_MIGRATION,
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct sched_atom {
85*4882a593Smuzhiyun 	enum sched_event_type	type;
86*4882a593Smuzhiyun 	int			specific_wait;
87*4882a593Smuzhiyun 	u64			timestamp;
88*4882a593Smuzhiyun 	u64			duration;
89*4882a593Smuzhiyun 	unsigned long		nr;
90*4882a593Smuzhiyun 	sem_t			*wait_sem;
91*4882a593Smuzhiyun 	struct task_desc	*wakee;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* task state bitmask, copied from include/linux/sched.h */
97*4882a593Smuzhiyun #define TASK_RUNNING		0
98*4882a593Smuzhiyun #define TASK_INTERRUPTIBLE	1
99*4882a593Smuzhiyun #define TASK_UNINTERRUPTIBLE	2
100*4882a593Smuzhiyun #define __TASK_STOPPED		4
101*4882a593Smuzhiyun #define __TASK_TRACED		8
102*4882a593Smuzhiyun /* in tsk->exit_state */
103*4882a593Smuzhiyun #define EXIT_DEAD		16
104*4882a593Smuzhiyun #define EXIT_ZOMBIE		32
105*4882a593Smuzhiyun #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
106*4882a593Smuzhiyun /* in tsk->state again */
107*4882a593Smuzhiyun #define TASK_DEAD		64
108*4882a593Smuzhiyun #define TASK_WAKEKILL		128
109*4882a593Smuzhiyun #define TASK_WAKING		256
110*4882a593Smuzhiyun #define TASK_PARKED		512
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun enum thread_state {
113*4882a593Smuzhiyun 	THREAD_SLEEPING = 0,
114*4882a593Smuzhiyun 	THREAD_WAIT_CPU,
115*4882a593Smuzhiyun 	THREAD_SCHED_IN,
116*4882a593Smuzhiyun 	THREAD_IGNORE
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun struct work_atom {
120*4882a593Smuzhiyun 	struct list_head	list;
121*4882a593Smuzhiyun 	enum thread_state	state;
122*4882a593Smuzhiyun 	u64			sched_out_time;
123*4882a593Smuzhiyun 	u64			wake_up_time;
124*4882a593Smuzhiyun 	u64			sched_in_time;
125*4882a593Smuzhiyun 	u64			runtime;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct work_atoms {
129*4882a593Smuzhiyun 	struct list_head	work_list;
130*4882a593Smuzhiyun 	struct thread		*thread;
131*4882a593Smuzhiyun 	struct rb_node		node;
132*4882a593Smuzhiyun 	u64			max_lat;
133*4882a593Smuzhiyun 	u64			max_lat_start;
134*4882a593Smuzhiyun 	u64			max_lat_end;
135*4882a593Smuzhiyun 	u64			total_lat;
136*4882a593Smuzhiyun 	u64			nb_atoms;
137*4882a593Smuzhiyun 	u64			total_runtime;
138*4882a593Smuzhiyun 	int			num_merged;
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct perf_sched;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun struct trace_sched_handler {
146*4882a593Smuzhiyun 	int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
147*4882a593Smuzhiyun 			    struct perf_sample *sample, struct machine *machine);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
150*4882a593Smuzhiyun 			     struct perf_sample *sample, struct machine *machine);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
153*4882a593Smuzhiyun 			    struct perf_sample *sample, struct machine *machine);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
156*4882a593Smuzhiyun 	int (*fork_event)(struct perf_sched *sched, union perf_event *event,
157*4882a593Smuzhiyun 			  struct machine *machine);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	int (*migrate_task_event)(struct perf_sched *sched,
160*4882a593Smuzhiyun 				  struct evsel *evsel,
161*4882a593Smuzhiyun 				  struct perf_sample *sample,
162*4882a593Smuzhiyun 				  struct machine *machine);
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define COLOR_PIDS PERF_COLOR_BLUE
166*4882a593Smuzhiyun #define COLOR_CPUS PERF_COLOR_BG_RED
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct perf_sched_map {
169*4882a593Smuzhiyun 	DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
170*4882a593Smuzhiyun 	int			*comp_cpus;
171*4882a593Smuzhiyun 	bool			 comp;
172*4882a593Smuzhiyun 	struct perf_thread_map *color_pids;
173*4882a593Smuzhiyun 	const char		*color_pids_str;
174*4882a593Smuzhiyun 	struct perf_cpu_map	*color_cpus;
175*4882a593Smuzhiyun 	const char		*color_cpus_str;
176*4882a593Smuzhiyun 	struct perf_cpu_map	*cpus;
177*4882a593Smuzhiyun 	const char		*cpus_str;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun struct perf_sched {
181*4882a593Smuzhiyun 	struct perf_tool tool;
182*4882a593Smuzhiyun 	const char	 *sort_order;
183*4882a593Smuzhiyun 	unsigned long	 nr_tasks;
184*4882a593Smuzhiyun 	struct task_desc **pid_to_task;
185*4882a593Smuzhiyun 	struct task_desc **tasks;
186*4882a593Smuzhiyun 	const struct trace_sched_handler *tp_handler;
187*4882a593Smuzhiyun 	pthread_mutex_t	 start_work_mutex;
188*4882a593Smuzhiyun 	pthread_mutex_t	 work_done_wait_mutex;
189*4882a593Smuzhiyun 	int		 profile_cpu;
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun  * Track the current task - that way we can know whether there's any
192*4882a593Smuzhiyun  * weird events, such as a task being switched away that is not current.
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun 	int		 max_cpu;
195*4882a593Smuzhiyun 	u32		 curr_pid[MAX_CPUS];
196*4882a593Smuzhiyun 	struct thread	 *curr_thread[MAX_CPUS];
197*4882a593Smuzhiyun 	char		 next_shortname1;
198*4882a593Smuzhiyun 	char		 next_shortname2;
199*4882a593Smuzhiyun 	unsigned int	 replay_repeat;
200*4882a593Smuzhiyun 	unsigned long	 nr_run_events;
201*4882a593Smuzhiyun 	unsigned long	 nr_sleep_events;
202*4882a593Smuzhiyun 	unsigned long	 nr_wakeup_events;
203*4882a593Smuzhiyun 	unsigned long	 nr_sleep_corrections;
204*4882a593Smuzhiyun 	unsigned long	 nr_run_events_optimized;
205*4882a593Smuzhiyun 	unsigned long	 targetless_wakeups;
206*4882a593Smuzhiyun 	unsigned long	 multitarget_wakeups;
207*4882a593Smuzhiyun 	unsigned long	 nr_runs;
208*4882a593Smuzhiyun 	unsigned long	 nr_timestamps;
209*4882a593Smuzhiyun 	unsigned long	 nr_unordered_timestamps;
210*4882a593Smuzhiyun 	unsigned long	 nr_context_switch_bugs;
211*4882a593Smuzhiyun 	unsigned long	 nr_events;
212*4882a593Smuzhiyun 	unsigned long	 nr_lost_chunks;
213*4882a593Smuzhiyun 	unsigned long	 nr_lost_events;
214*4882a593Smuzhiyun 	u64		 run_measurement_overhead;
215*4882a593Smuzhiyun 	u64		 sleep_measurement_overhead;
216*4882a593Smuzhiyun 	u64		 start_time;
217*4882a593Smuzhiyun 	u64		 cpu_usage;
218*4882a593Smuzhiyun 	u64		 runavg_cpu_usage;
219*4882a593Smuzhiyun 	u64		 parent_cpu_usage;
220*4882a593Smuzhiyun 	u64		 runavg_parent_cpu_usage;
221*4882a593Smuzhiyun 	u64		 sum_runtime;
222*4882a593Smuzhiyun 	u64		 sum_fluct;
223*4882a593Smuzhiyun 	u64		 run_avg;
224*4882a593Smuzhiyun 	u64		 all_runtime;
225*4882a593Smuzhiyun 	u64		 all_count;
226*4882a593Smuzhiyun 	u64		 cpu_last_switched[MAX_CPUS];
227*4882a593Smuzhiyun 	struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
228*4882a593Smuzhiyun 	struct list_head sort_list, cmp_pid;
229*4882a593Smuzhiyun 	bool force;
230*4882a593Smuzhiyun 	bool skip_merge;
231*4882a593Smuzhiyun 	struct perf_sched_map map;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* options for timehist command */
234*4882a593Smuzhiyun 	bool		summary;
235*4882a593Smuzhiyun 	bool		summary_only;
236*4882a593Smuzhiyun 	bool		idle_hist;
237*4882a593Smuzhiyun 	bool		show_callchain;
238*4882a593Smuzhiyun 	unsigned int	max_stack;
239*4882a593Smuzhiyun 	bool		show_cpu_visual;
240*4882a593Smuzhiyun 	bool		show_wakeups;
241*4882a593Smuzhiyun 	bool		show_next;
242*4882a593Smuzhiyun 	bool		show_migrations;
243*4882a593Smuzhiyun 	bool		show_state;
244*4882a593Smuzhiyun 	u64		skipped_samples;
245*4882a593Smuzhiyun 	const char	*time_str;
246*4882a593Smuzhiyun 	struct perf_time_interval ptime;
247*4882a593Smuzhiyun 	struct perf_time_interval hist_time;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* per thread run time data */
251*4882a593Smuzhiyun struct thread_runtime {
252*4882a593Smuzhiyun 	u64 last_time;      /* time of previous sched in/out event */
253*4882a593Smuzhiyun 	u64 dt_run;         /* run time */
254*4882a593Smuzhiyun 	u64 dt_sleep;       /* time between CPU access by sleep (off cpu) */
255*4882a593Smuzhiyun 	u64 dt_iowait;      /* time between CPU access by iowait (off cpu) */
256*4882a593Smuzhiyun 	u64 dt_preempt;     /* time between CPU access by preempt (off cpu) */
257*4882a593Smuzhiyun 	u64 dt_delay;       /* time between wakeup and sched-in */
258*4882a593Smuzhiyun 	u64 ready_to_run;   /* time of wakeup */
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	struct stats run_stats;
261*4882a593Smuzhiyun 	u64 total_run_time;
262*4882a593Smuzhiyun 	u64 total_sleep_time;
263*4882a593Smuzhiyun 	u64 total_iowait_time;
264*4882a593Smuzhiyun 	u64 total_preempt_time;
265*4882a593Smuzhiyun 	u64 total_delay_time;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	int last_state;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	char shortname[3];
270*4882a593Smuzhiyun 	bool comm_changed;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	u64 migrations;
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /* per event run time data */
276*4882a593Smuzhiyun struct evsel_runtime {
277*4882a593Smuzhiyun 	u64 *last_time; /* time this event was last seen per cpu */
278*4882a593Smuzhiyun 	u32 ncpu;       /* highest cpu slot allocated */
279*4882a593Smuzhiyun };
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* per cpu idle time data */
282*4882a593Smuzhiyun struct idle_thread_runtime {
283*4882a593Smuzhiyun 	struct thread_runtime	tr;
284*4882a593Smuzhiyun 	struct thread		*last_thread;
285*4882a593Smuzhiyun 	struct rb_root_cached	sorted_root;
286*4882a593Smuzhiyun 	struct callchain_root	callchain;
287*4882a593Smuzhiyun 	struct callchain_cursor	cursor;
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* track idle times per cpu */
291*4882a593Smuzhiyun static struct thread **idle_threads;
292*4882a593Smuzhiyun static int idle_max_cpu;
293*4882a593Smuzhiyun static char idle_comm[] = "<idle>";
294*4882a593Smuzhiyun 
get_nsecs(void)295*4882a593Smuzhiyun static u64 get_nsecs(void)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct timespec ts;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	clock_gettime(CLOCK_MONOTONIC, &ts);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
burn_nsecs(struct perf_sched * sched,u64 nsecs)304*4882a593Smuzhiyun static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	u64 T0 = get_nsecs(), T1;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	do {
309*4882a593Smuzhiyun 		T1 = get_nsecs();
310*4882a593Smuzhiyun 	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
sleep_nsecs(u64 nsecs)313*4882a593Smuzhiyun static void sleep_nsecs(u64 nsecs)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct timespec ts;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ts.tv_nsec = nsecs % 999999999;
318*4882a593Smuzhiyun 	ts.tv_sec = nsecs / 999999999;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	nanosleep(&ts, NULL);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
calibrate_run_measurement_overhead(struct perf_sched * sched)323*4882a593Smuzhiyun static void calibrate_run_measurement_overhead(struct perf_sched *sched)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
326*4882a593Smuzhiyun 	int i;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
329*4882a593Smuzhiyun 		T0 = get_nsecs();
330*4882a593Smuzhiyun 		burn_nsecs(sched, 0);
331*4882a593Smuzhiyun 		T1 = get_nsecs();
332*4882a593Smuzhiyun 		delta = T1-T0;
333*4882a593Smuzhiyun 		min_delta = min(min_delta, delta);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 	sched->run_measurement_overhead = min_delta;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
calibrate_sleep_measurement_overhead(struct perf_sched * sched)340*4882a593Smuzhiyun static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
343*4882a593Smuzhiyun 	int i;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
346*4882a593Smuzhiyun 		T0 = get_nsecs();
347*4882a593Smuzhiyun 		sleep_nsecs(10000);
348*4882a593Smuzhiyun 		T1 = get_nsecs();
349*4882a593Smuzhiyun 		delta = T1-T0;
350*4882a593Smuzhiyun 		min_delta = min(min_delta, delta);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 	min_delta -= 10000;
353*4882a593Smuzhiyun 	sched->sleep_measurement_overhead = min_delta;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun static struct sched_atom *
get_new_event(struct task_desc * task,u64 timestamp)359*4882a593Smuzhiyun get_new_event(struct task_desc *task, u64 timestamp)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct sched_atom *event = zalloc(sizeof(*event));
362*4882a593Smuzhiyun 	unsigned long idx = task->nr_events;
363*4882a593Smuzhiyun 	size_t size;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	event->timestamp = timestamp;
366*4882a593Smuzhiyun 	event->nr = idx;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	task->nr_events++;
369*4882a593Smuzhiyun 	size = sizeof(struct sched_atom *) * task->nr_events;
370*4882a593Smuzhiyun 	task->atoms = realloc(task->atoms, size);
371*4882a593Smuzhiyun 	BUG_ON(!task->atoms);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	task->atoms[idx] = event;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	return event;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
last_event(struct task_desc * task)378*4882a593Smuzhiyun static struct sched_atom *last_event(struct task_desc *task)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	if (!task->nr_events)
381*4882a593Smuzhiyun 		return NULL;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return task->atoms[task->nr_events - 1];
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
add_sched_event_run(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 duration)386*4882a593Smuzhiyun static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
387*4882a593Smuzhiyun 				u64 timestamp, u64 duration)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct sched_atom *event, *curr_event = last_event(task);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 * optimize an existing RUN event by merging this one
393*4882a593Smuzhiyun 	 * to it:
394*4882a593Smuzhiyun 	 */
395*4882a593Smuzhiyun 	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
396*4882a593Smuzhiyun 		sched->nr_run_events_optimized++;
397*4882a593Smuzhiyun 		curr_event->duration += duration;
398*4882a593Smuzhiyun 		return;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	event = get_new_event(task, timestamp);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	event->type = SCHED_EVENT_RUN;
404*4882a593Smuzhiyun 	event->duration = duration;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	sched->nr_run_events++;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
add_sched_event_wakeup(struct perf_sched * sched,struct task_desc * task,u64 timestamp,struct task_desc * wakee)409*4882a593Smuzhiyun static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
410*4882a593Smuzhiyun 				   u64 timestamp, struct task_desc *wakee)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct sched_atom *event, *wakee_event;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	event = get_new_event(task, timestamp);
415*4882a593Smuzhiyun 	event->type = SCHED_EVENT_WAKEUP;
416*4882a593Smuzhiyun 	event->wakee = wakee;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	wakee_event = last_event(wakee);
419*4882a593Smuzhiyun 	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
420*4882a593Smuzhiyun 		sched->targetless_wakeups++;
421*4882a593Smuzhiyun 		return;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 	if (wakee_event->wait_sem) {
424*4882a593Smuzhiyun 		sched->multitarget_wakeups++;
425*4882a593Smuzhiyun 		return;
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
429*4882a593Smuzhiyun 	sem_init(wakee_event->wait_sem, 0, 0);
430*4882a593Smuzhiyun 	wakee_event->specific_wait = 1;
431*4882a593Smuzhiyun 	event->wait_sem = wakee_event->wait_sem;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	sched->nr_wakeup_events++;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
add_sched_event_sleep(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 task_state __maybe_unused)436*4882a593Smuzhiyun static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
437*4882a593Smuzhiyun 				  u64 timestamp, u64 task_state __maybe_unused)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct sched_atom *event = get_new_event(task, timestamp);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	event->type = SCHED_EVENT_SLEEP;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	sched->nr_sleep_events++;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
register_pid(struct perf_sched * sched,unsigned long pid,const char * comm)446*4882a593Smuzhiyun static struct task_desc *register_pid(struct perf_sched *sched,
447*4882a593Smuzhiyun 				      unsigned long pid, const char *comm)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct task_desc *task;
450*4882a593Smuzhiyun 	static int pid_max;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (sched->pid_to_task == NULL) {
453*4882a593Smuzhiyun 		if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
454*4882a593Smuzhiyun 			pid_max = MAX_PID;
455*4882a593Smuzhiyun 		BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 	if (pid >= (unsigned long)pid_max) {
458*4882a593Smuzhiyun 		BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
459*4882a593Smuzhiyun 			sizeof(struct task_desc *))) == NULL);
460*4882a593Smuzhiyun 		while (pid >= (unsigned long)pid_max)
461*4882a593Smuzhiyun 			sched->pid_to_task[pid_max++] = NULL;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	task = sched->pid_to_task[pid];
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (task)
467*4882a593Smuzhiyun 		return task;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	task = zalloc(sizeof(*task));
470*4882a593Smuzhiyun 	task->pid = pid;
471*4882a593Smuzhiyun 	task->nr = sched->nr_tasks;
472*4882a593Smuzhiyun 	strcpy(task->comm, comm);
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * every task starts in sleeping state - this gets ignored
475*4882a593Smuzhiyun 	 * if there's no wakeup pointing to this sleep state:
476*4882a593Smuzhiyun 	 */
477*4882a593Smuzhiyun 	add_sched_event_sleep(sched, task, 0, 0);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	sched->pid_to_task[pid] = task;
480*4882a593Smuzhiyun 	sched->nr_tasks++;
481*4882a593Smuzhiyun 	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
482*4882a593Smuzhiyun 	BUG_ON(!sched->tasks);
483*4882a593Smuzhiyun 	sched->tasks[task->nr] = task;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (verbose > 0)
486*4882a593Smuzhiyun 		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return task;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 
print_task_traces(struct perf_sched * sched)492*4882a593Smuzhiyun static void print_task_traces(struct perf_sched *sched)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct task_desc *task;
495*4882a593Smuzhiyun 	unsigned long i;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
498*4882a593Smuzhiyun 		task = sched->tasks[i];
499*4882a593Smuzhiyun 		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
500*4882a593Smuzhiyun 			task->nr, task->comm, task->pid, task->nr_events);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
add_cross_task_wakeups(struct perf_sched * sched)504*4882a593Smuzhiyun static void add_cross_task_wakeups(struct perf_sched *sched)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct task_desc *task1, *task2;
507*4882a593Smuzhiyun 	unsigned long i, j;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
510*4882a593Smuzhiyun 		task1 = sched->tasks[i];
511*4882a593Smuzhiyun 		j = i + 1;
512*4882a593Smuzhiyun 		if (j == sched->nr_tasks)
513*4882a593Smuzhiyun 			j = 0;
514*4882a593Smuzhiyun 		task2 = sched->tasks[j];
515*4882a593Smuzhiyun 		add_sched_event_wakeup(sched, task1, 0, task2);
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
perf_sched__process_event(struct perf_sched * sched,struct sched_atom * atom)519*4882a593Smuzhiyun static void perf_sched__process_event(struct perf_sched *sched,
520*4882a593Smuzhiyun 				      struct sched_atom *atom)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	int ret = 0;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	switch (atom->type) {
525*4882a593Smuzhiyun 		case SCHED_EVENT_RUN:
526*4882a593Smuzhiyun 			burn_nsecs(sched, atom->duration);
527*4882a593Smuzhiyun 			break;
528*4882a593Smuzhiyun 		case SCHED_EVENT_SLEEP:
529*4882a593Smuzhiyun 			if (atom->wait_sem)
530*4882a593Smuzhiyun 				ret = sem_wait(atom->wait_sem);
531*4882a593Smuzhiyun 			BUG_ON(ret);
532*4882a593Smuzhiyun 			break;
533*4882a593Smuzhiyun 		case SCHED_EVENT_WAKEUP:
534*4882a593Smuzhiyun 			if (atom->wait_sem)
535*4882a593Smuzhiyun 				ret = sem_post(atom->wait_sem);
536*4882a593Smuzhiyun 			BUG_ON(ret);
537*4882a593Smuzhiyun 			break;
538*4882a593Smuzhiyun 		case SCHED_EVENT_MIGRATION:
539*4882a593Smuzhiyun 			break;
540*4882a593Smuzhiyun 		default:
541*4882a593Smuzhiyun 			BUG_ON(1);
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
get_cpu_usage_nsec_parent(void)545*4882a593Smuzhiyun static u64 get_cpu_usage_nsec_parent(void)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	struct rusage ru;
548*4882a593Smuzhiyun 	u64 sum;
549*4882a593Smuzhiyun 	int err;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	err = getrusage(RUSAGE_SELF, &ru);
552*4882a593Smuzhiyun 	BUG_ON(err);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	sum =  ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
555*4882a593Smuzhiyun 	sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	return sum;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
self_open_counters(struct perf_sched * sched,unsigned long cur_task)560*4882a593Smuzhiyun static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct perf_event_attr attr;
563*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
564*4882a593Smuzhiyun 	int fd;
565*4882a593Smuzhiyun 	struct rlimit limit;
566*4882a593Smuzhiyun 	bool need_privilege = false;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	memset(&attr, 0, sizeof(attr));
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	attr.type = PERF_TYPE_SOFTWARE;
571*4882a593Smuzhiyun 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun force_again:
574*4882a593Smuzhiyun 	fd = sys_perf_event_open(&attr, 0, -1, -1,
575*4882a593Smuzhiyun 				 perf_event_open_cloexec_flag());
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (fd < 0) {
578*4882a593Smuzhiyun 		if (errno == EMFILE) {
579*4882a593Smuzhiyun 			if (sched->force) {
580*4882a593Smuzhiyun 				BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
581*4882a593Smuzhiyun 				limit.rlim_cur += sched->nr_tasks - cur_task;
582*4882a593Smuzhiyun 				if (limit.rlim_cur > limit.rlim_max) {
583*4882a593Smuzhiyun 					limit.rlim_max = limit.rlim_cur;
584*4882a593Smuzhiyun 					need_privilege = true;
585*4882a593Smuzhiyun 				}
586*4882a593Smuzhiyun 				if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
587*4882a593Smuzhiyun 					if (need_privilege && errno == EPERM)
588*4882a593Smuzhiyun 						strcpy(info, "Need privilege\n");
589*4882a593Smuzhiyun 				} else
590*4882a593Smuzhiyun 					goto force_again;
591*4882a593Smuzhiyun 			} else
592*4882a593Smuzhiyun 				strcpy(info, "Have a try with -f option\n");
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 		pr_err("Error: sys_perf_event_open() syscall returned "
595*4882a593Smuzhiyun 		       "with %d (%s)\n%s", fd,
596*4882a593Smuzhiyun 		       str_error_r(errno, sbuf, sizeof(sbuf)), info);
597*4882a593Smuzhiyun 		exit(EXIT_FAILURE);
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 	return fd;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
get_cpu_usage_nsec_self(int fd)602*4882a593Smuzhiyun static u64 get_cpu_usage_nsec_self(int fd)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	u64 runtime;
605*4882a593Smuzhiyun 	int ret;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	ret = read(fd, &runtime, sizeof(runtime));
608*4882a593Smuzhiyun 	BUG_ON(ret != sizeof(runtime));
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	return runtime;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun struct sched_thread_parms {
614*4882a593Smuzhiyun 	struct task_desc  *task;
615*4882a593Smuzhiyun 	struct perf_sched *sched;
616*4882a593Smuzhiyun 	int fd;
617*4882a593Smuzhiyun };
618*4882a593Smuzhiyun 
thread_func(void * ctx)619*4882a593Smuzhiyun static void *thread_func(void *ctx)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct sched_thread_parms *parms = ctx;
622*4882a593Smuzhiyun 	struct task_desc *this_task = parms->task;
623*4882a593Smuzhiyun 	struct perf_sched *sched = parms->sched;
624*4882a593Smuzhiyun 	u64 cpu_usage_0, cpu_usage_1;
625*4882a593Smuzhiyun 	unsigned long i, ret;
626*4882a593Smuzhiyun 	char comm2[22];
627*4882a593Smuzhiyun 	int fd = parms->fd;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	zfree(&parms);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	sprintf(comm2, ":%s", this_task->comm);
632*4882a593Smuzhiyun 	prctl(PR_SET_NAME, comm2);
633*4882a593Smuzhiyun 	if (fd < 0)
634*4882a593Smuzhiyun 		return NULL;
635*4882a593Smuzhiyun again:
636*4882a593Smuzhiyun 	ret = sem_post(&this_task->ready_for_work);
637*4882a593Smuzhiyun 	BUG_ON(ret);
638*4882a593Smuzhiyun 	ret = pthread_mutex_lock(&sched->start_work_mutex);
639*4882a593Smuzhiyun 	BUG_ON(ret);
640*4882a593Smuzhiyun 	ret = pthread_mutex_unlock(&sched->start_work_mutex);
641*4882a593Smuzhiyun 	BUG_ON(ret);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	for (i = 0; i < this_task->nr_events; i++) {
646*4882a593Smuzhiyun 		this_task->curr_event = i;
647*4882a593Smuzhiyun 		perf_sched__process_event(sched, this_task->atoms[i]);
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
651*4882a593Smuzhiyun 	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
652*4882a593Smuzhiyun 	ret = sem_post(&this_task->work_done_sem);
653*4882a593Smuzhiyun 	BUG_ON(ret);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
656*4882a593Smuzhiyun 	BUG_ON(ret);
657*4882a593Smuzhiyun 	ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
658*4882a593Smuzhiyun 	BUG_ON(ret);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	goto again;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
create_tasks(struct perf_sched * sched)663*4882a593Smuzhiyun static void create_tasks(struct perf_sched *sched)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct task_desc *task;
666*4882a593Smuzhiyun 	pthread_attr_t attr;
667*4882a593Smuzhiyun 	unsigned long i;
668*4882a593Smuzhiyun 	int err;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	err = pthread_attr_init(&attr);
671*4882a593Smuzhiyun 	BUG_ON(err);
672*4882a593Smuzhiyun 	err = pthread_attr_setstacksize(&attr,
673*4882a593Smuzhiyun 			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
674*4882a593Smuzhiyun 	BUG_ON(err);
675*4882a593Smuzhiyun 	err = pthread_mutex_lock(&sched->start_work_mutex);
676*4882a593Smuzhiyun 	BUG_ON(err);
677*4882a593Smuzhiyun 	err = pthread_mutex_lock(&sched->work_done_wait_mutex);
678*4882a593Smuzhiyun 	BUG_ON(err);
679*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
680*4882a593Smuzhiyun 		struct sched_thread_parms *parms = malloc(sizeof(*parms));
681*4882a593Smuzhiyun 		BUG_ON(parms == NULL);
682*4882a593Smuzhiyun 		parms->task = task = sched->tasks[i];
683*4882a593Smuzhiyun 		parms->sched = sched;
684*4882a593Smuzhiyun 		parms->fd = self_open_counters(sched, i);
685*4882a593Smuzhiyun 		sem_init(&task->sleep_sem, 0, 0);
686*4882a593Smuzhiyun 		sem_init(&task->ready_for_work, 0, 0);
687*4882a593Smuzhiyun 		sem_init(&task->work_done_sem, 0, 0);
688*4882a593Smuzhiyun 		task->curr_event = 0;
689*4882a593Smuzhiyun 		err = pthread_create(&task->thread, &attr, thread_func, parms);
690*4882a593Smuzhiyun 		BUG_ON(err);
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
wait_for_tasks(struct perf_sched * sched)694*4882a593Smuzhiyun static void wait_for_tasks(struct perf_sched *sched)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	u64 cpu_usage_0, cpu_usage_1;
697*4882a593Smuzhiyun 	struct task_desc *task;
698*4882a593Smuzhiyun 	unsigned long i, ret;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	sched->start_time = get_nsecs();
701*4882a593Smuzhiyun 	sched->cpu_usage = 0;
702*4882a593Smuzhiyun 	pthread_mutex_unlock(&sched->work_done_wait_mutex);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
705*4882a593Smuzhiyun 		task = sched->tasks[i];
706*4882a593Smuzhiyun 		ret = sem_wait(&task->ready_for_work);
707*4882a593Smuzhiyun 		BUG_ON(ret);
708*4882a593Smuzhiyun 		sem_init(&task->ready_for_work, 0, 0);
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
711*4882a593Smuzhiyun 	BUG_ON(ret);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	cpu_usage_0 = get_cpu_usage_nsec_parent();
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	pthread_mutex_unlock(&sched->start_work_mutex);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
718*4882a593Smuzhiyun 		task = sched->tasks[i];
719*4882a593Smuzhiyun 		ret = sem_wait(&task->work_done_sem);
720*4882a593Smuzhiyun 		BUG_ON(ret);
721*4882a593Smuzhiyun 		sem_init(&task->work_done_sem, 0, 0);
722*4882a593Smuzhiyun 		sched->cpu_usage += task->cpu_usage;
723*4882a593Smuzhiyun 		task->cpu_usage = 0;
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	cpu_usage_1 = get_cpu_usage_nsec_parent();
727*4882a593Smuzhiyun 	if (!sched->runavg_cpu_usage)
728*4882a593Smuzhiyun 		sched->runavg_cpu_usage = sched->cpu_usage;
729*4882a593Smuzhiyun 	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
732*4882a593Smuzhiyun 	if (!sched->runavg_parent_cpu_usage)
733*4882a593Smuzhiyun 		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
734*4882a593Smuzhiyun 	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
735*4882a593Smuzhiyun 					 sched->parent_cpu_usage)/sched->replay_repeat;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	ret = pthread_mutex_lock(&sched->start_work_mutex);
738*4882a593Smuzhiyun 	BUG_ON(ret);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	for (i = 0; i < sched->nr_tasks; i++) {
741*4882a593Smuzhiyun 		task = sched->tasks[i];
742*4882a593Smuzhiyun 		sem_init(&task->sleep_sem, 0, 0);
743*4882a593Smuzhiyun 		task->curr_event = 0;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
run_one_test(struct perf_sched * sched)747*4882a593Smuzhiyun static void run_one_test(struct perf_sched *sched)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	u64 T0, T1, delta, avg_delta, fluct;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	T0 = get_nsecs();
752*4882a593Smuzhiyun 	wait_for_tasks(sched);
753*4882a593Smuzhiyun 	T1 = get_nsecs();
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	delta = T1 - T0;
756*4882a593Smuzhiyun 	sched->sum_runtime += delta;
757*4882a593Smuzhiyun 	sched->nr_runs++;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	avg_delta = sched->sum_runtime / sched->nr_runs;
760*4882a593Smuzhiyun 	if (delta < avg_delta)
761*4882a593Smuzhiyun 		fluct = avg_delta - delta;
762*4882a593Smuzhiyun 	else
763*4882a593Smuzhiyun 		fluct = delta - avg_delta;
764*4882a593Smuzhiyun 	sched->sum_fluct += fluct;
765*4882a593Smuzhiyun 	if (!sched->run_avg)
766*4882a593Smuzhiyun 		sched->run_avg = delta;
767*4882a593Smuzhiyun 	sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	printf("cpu: %0.2f / %0.2f",
774*4882a593Smuzhiyun 		(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun #if 0
777*4882a593Smuzhiyun 	/*
778*4882a593Smuzhiyun 	 * rusage statistics done by the parent, these are less
779*4882a593Smuzhiyun 	 * accurate than the sched->sum_exec_runtime based statistics:
780*4882a593Smuzhiyun 	 */
781*4882a593Smuzhiyun 	printf(" [%0.2f / %0.2f]",
782*4882a593Smuzhiyun 		(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
783*4882a593Smuzhiyun 		(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
784*4882a593Smuzhiyun #endif
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	printf("\n");
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (sched->nr_sleep_corrections)
789*4882a593Smuzhiyun 		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
790*4882a593Smuzhiyun 	sched->nr_sleep_corrections = 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
test_calibrations(struct perf_sched * sched)793*4882a593Smuzhiyun static void test_calibrations(struct perf_sched *sched)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	u64 T0, T1;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	T0 = get_nsecs();
798*4882a593Smuzhiyun 	burn_nsecs(sched, NSEC_PER_MSEC);
799*4882a593Smuzhiyun 	T1 = get_nsecs();
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	T0 = get_nsecs();
804*4882a593Smuzhiyun 	sleep_nsecs(NSEC_PER_MSEC);
805*4882a593Smuzhiyun 	T1 = get_nsecs();
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun static int
replay_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)811*4882a593Smuzhiyun replay_wakeup_event(struct perf_sched *sched,
812*4882a593Smuzhiyun 		    struct evsel *evsel, struct perf_sample *sample,
813*4882a593Smuzhiyun 		    struct machine *machine __maybe_unused)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	const char *comm = evsel__strval(evsel, sample, "comm");
816*4882a593Smuzhiyun 	const u32 pid	 = evsel__intval(evsel, sample, "pid");
817*4882a593Smuzhiyun 	struct task_desc *waker, *wakee;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (verbose > 0) {
820*4882a593Smuzhiyun 		printf("sched_wakeup event %p\n", evsel);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 		printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	waker = register_pid(sched, sample->tid, "<unknown>");
826*4882a593Smuzhiyun 	wakee = register_pid(sched, pid, comm);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	add_sched_event_wakeup(sched, waker, sample->time, wakee);
829*4882a593Smuzhiyun 	return 0;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
replay_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)832*4882a593Smuzhiyun static int replay_switch_event(struct perf_sched *sched,
833*4882a593Smuzhiyun 			       struct evsel *evsel,
834*4882a593Smuzhiyun 			       struct perf_sample *sample,
835*4882a593Smuzhiyun 			       struct machine *machine __maybe_unused)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	const char *prev_comm  = evsel__strval(evsel, sample, "prev_comm"),
838*4882a593Smuzhiyun 		   *next_comm  = evsel__strval(evsel, sample, "next_comm");
839*4882a593Smuzhiyun 	const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
840*4882a593Smuzhiyun 		  next_pid = evsel__intval(evsel, sample, "next_pid");
841*4882a593Smuzhiyun 	const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
842*4882a593Smuzhiyun 	struct task_desc *prev, __maybe_unused *next;
843*4882a593Smuzhiyun 	u64 timestamp0, timestamp = sample->time;
844*4882a593Smuzhiyun 	int cpu = sample->cpu;
845*4882a593Smuzhiyun 	s64 delta;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	if (verbose > 0)
848*4882a593Smuzhiyun 		printf("sched_switch event %p\n", evsel);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (cpu >= MAX_CPUS || cpu < 0)
851*4882a593Smuzhiyun 		return 0;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	timestamp0 = sched->cpu_last_switched[cpu];
854*4882a593Smuzhiyun 	if (timestamp0)
855*4882a593Smuzhiyun 		delta = timestamp - timestamp0;
856*4882a593Smuzhiyun 	else
857*4882a593Smuzhiyun 		delta = 0;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (delta < 0) {
860*4882a593Smuzhiyun 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
861*4882a593Smuzhiyun 		return -1;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
865*4882a593Smuzhiyun 		 prev_comm, prev_pid, next_comm, next_pid, delta);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	prev = register_pid(sched, prev_pid, prev_comm);
868*4882a593Smuzhiyun 	next = register_pid(sched, next_pid, next_comm);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	sched->cpu_last_switched[cpu] = timestamp;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	add_sched_event_run(sched, prev, timestamp, delta);
873*4882a593Smuzhiyun 	add_sched_event_sleep(sched, prev, timestamp, prev_state);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
replay_fork_event(struct perf_sched * sched,union perf_event * event,struct machine * machine)878*4882a593Smuzhiyun static int replay_fork_event(struct perf_sched *sched,
879*4882a593Smuzhiyun 			     union perf_event *event,
880*4882a593Smuzhiyun 			     struct machine *machine)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	struct thread *child, *parent;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	child = machine__findnew_thread(machine, event->fork.pid,
885*4882a593Smuzhiyun 					event->fork.tid);
886*4882a593Smuzhiyun 	parent = machine__findnew_thread(machine, event->fork.ppid,
887*4882a593Smuzhiyun 					 event->fork.ptid);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (child == NULL || parent == NULL) {
890*4882a593Smuzhiyun 		pr_debug("thread does not exist on fork event: child %p, parent %p\n",
891*4882a593Smuzhiyun 				 child, parent);
892*4882a593Smuzhiyun 		goto out_put;
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if (verbose > 0) {
896*4882a593Smuzhiyun 		printf("fork event\n");
897*4882a593Smuzhiyun 		printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
898*4882a593Smuzhiyun 		printf("...  child: %s/%d\n", thread__comm_str(child), child->tid);
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	register_pid(sched, parent->tid, thread__comm_str(parent));
902*4882a593Smuzhiyun 	register_pid(sched, child->tid, thread__comm_str(child));
903*4882a593Smuzhiyun out_put:
904*4882a593Smuzhiyun 	thread__put(child);
905*4882a593Smuzhiyun 	thread__put(parent);
906*4882a593Smuzhiyun 	return 0;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun struct sort_dimension {
910*4882a593Smuzhiyun 	const char		*name;
911*4882a593Smuzhiyun 	sort_fn_t		cmp;
912*4882a593Smuzhiyun 	struct list_head	list;
913*4882a593Smuzhiyun };
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun  * handle runtime stats saved per thread
917*4882a593Smuzhiyun  */
thread__init_runtime(struct thread * thread)918*4882a593Smuzhiyun static struct thread_runtime *thread__init_runtime(struct thread *thread)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct thread_runtime *r;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	r = zalloc(sizeof(struct thread_runtime));
923*4882a593Smuzhiyun 	if (!r)
924*4882a593Smuzhiyun 		return NULL;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	init_stats(&r->run_stats);
927*4882a593Smuzhiyun 	thread__set_priv(thread, r);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	return r;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
thread__get_runtime(struct thread * thread)932*4882a593Smuzhiyun static struct thread_runtime *thread__get_runtime(struct thread *thread)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	struct thread_runtime *tr;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	tr = thread__priv(thread);
937*4882a593Smuzhiyun 	if (tr == NULL) {
938*4882a593Smuzhiyun 		tr = thread__init_runtime(thread);
939*4882a593Smuzhiyun 		if (tr == NULL)
940*4882a593Smuzhiyun 			pr_debug("Failed to malloc memory for runtime data.\n");
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	return tr;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun static int
thread_lat_cmp(struct list_head * list,struct work_atoms * l,struct work_atoms * r)947*4882a593Smuzhiyun thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	struct sort_dimension *sort;
950*4882a593Smuzhiyun 	int ret = 0;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	BUG_ON(list_empty(list));
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	list_for_each_entry(sort, list, list) {
955*4882a593Smuzhiyun 		ret = sort->cmp(l, r);
956*4882a593Smuzhiyun 		if (ret)
957*4882a593Smuzhiyun 			return ret;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	return ret;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun static struct work_atoms *
thread_atoms_search(struct rb_root_cached * root,struct thread * thread,struct list_head * sort_list)964*4882a593Smuzhiyun thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
965*4882a593Smuzhiyun 			 struct list_head *sort_list)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct rb_node *node = root->rb_root.rb_node;
968*4882a593Smuzhiyun 	struct work_atoms key = { .thread = thread };
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	while (node) {
971*4882a593Smuzhiyun 		struct work_atoms *atoms;
972*4882a593Smuzhiyun 		int cmp;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 		atoms = container_of(node, struct work_atoms, node);
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 		cmp = thread_lat_cmp(sort_list, &key, atoms);
977*4882a593Smuzhiyun 		if (cmp > 0)
978*4882a593Smuzhiyun 			node = node->rb_left;
979*4882a593Smuzhiyun 		else if (cmp < 0)
980*4882a593Smuzhiyun 			node = node->rb_right;
981*4882a593Smuzhiyun 		else {
982*4882a593Smuzhiyun 			BUG_ON(thread != atoms->thread);
983*4882a593Smuzhiyun 			return atoms;
984*4882a593Smuzhiyun 		}
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	return NULL;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun static void
__thread_latency_insert(struct rb_root_cached * root,struct work_atoms * data,struct list_head * sort_list)990*4882a593Smuzhiyun __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
991*4882a593Smuzhiyun 			 struct list_head *sort_list)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
994*4882a593Smuzhiyun 	bool leftmost = true;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	while (*new) {
997*4882a593Smuzhiyun 		struct work_atoms *this;
998*4882a593Smuzhiyun 		int cmp;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 		this = container_of(*new, struct work_atoms, node);
1001*4882a593Smuzhiyun 		parent = *new;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 		cmp = thread_lat_cmp(sort_list, data, this);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 		if (cmp > 0)
1006*4882a593Smuzhiyun 			new = &((*new)->rb_left);
1007*4882a593Smuzhiyun 		else {
1008*4882a593Smuzhiyun 			new = &((*new)->rb_right);
1009*4882a593Smuzhiyun 			leftmost = false;
1010*4882a593Smuzhiyun 		}
1011*4882a593Smuzhiyun 	}
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	rb_link_node(&data->node, parent, new);
1014*4882a593Smuzhiyun 	rb_insert_color_cached(&data->node, root, leftmost);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
thread_atoms_insert(struct perf_sched * sched,struct thread * thread)1017*4882a593Smuzhiyun static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	struct work_atoms *atoms = zalloc(sizeof(*atoms));
1020*4882a593Smuzhiyun 	if (!atoms) {
1021*4882a593Smuzhiyun 		pr_err("No memory at %s\n", __func__);
1022*4882a593Smuzhiyun 		return -1;
1023*4882a593Smuzhiyun 	}
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	atoms->thread = thread__get(thread);
1026*4882a593Smuzhiyun 	INIT_LIST_HEAD(&atoms->work_list);
1027*4882a593Smuzhiyun 	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1028*4882a593Smuzhiyun 	return 0;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun 
sched_out_state(u64 prev_state)1031*4882a593Smuzhiyun static char sched_out_state(u64 prev_state)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun 	const char *str = TASK_STATE_TO_CHAR_STR;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	return str[prev_state];
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun static int
add_sched_out_event(struct work_atoms * atoms,char run_state,u64 timestamp)1039*4882a593Smuzhiyun add_sched_out_event(struct work_atoms *atoms,
1040*4882a593Smuzhiyun 		    char run_state,
1041*4882a593Smuzhiyun 		    u64 timestamp)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	struct work_atom *atom = zalloc(sizeof(*atom));
1044*4882a593Smuzhiyun 	if (!atom) {
1045*4882a593Smuzhiyun 		pr_err("Non memory at %s", __func__);
1046*4882a593Smuzhiyun 		return -1;
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	atom->sched_out_time = timestamp;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	if (run_state == 'R') {
1052*4882a593Smuzhiyun 		atom->state = THREAD_WAIT_CPU;
1053*4882a593Smuzhiyun 		atom->wake_up_time = atom->sched_out_time;
1054*4882a593Smuzhiyun 	}
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	list_add_tail(&atom->list, &atoms->work_list);
1057*4882a593Smuzhiyun 	return 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun static void
add_runtime_event(struct work_atoms * atoms,u64 delta,u64 timestamp __maybe_unused)1061*4882a593Smuzhiyun add_runtime_event(struct work_atoms *atoms, u64 delta,
1062*4882a593Smuzhiyun 		  u64 timestamp __maybe_unused)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun 	struct work_atom *atom;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	BUG_ON(list_empty(&atoms->work_list));
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	atom->runtime += delta;
1071*4882a593Smuzhiyun 	atoms->total_runtime += delta;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun static void
add_sched_in_event(struct work_atoms * atoms,u64 timestamp)1075*4882a593Smuzhiyun add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	struct work_atom *atom;
1078*4882a593Smuzhiyun 	u64 delta;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (list_empty(&atoms->work_list))
1081*4882a593Smuzhiyun 		return;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	if (atom->state != THREAD_WAIT_CPU)
1086*4882a593Smuzhiyun 		return;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	if (timestamp < atom->wake_up_time) {
1089*4882a593Smuzhiyun 		atom->state = THREAD_IGNORE;
1090*4882a593Smuzhiyun 		return;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	atom->state = THREAD_SCHED_IN;
1094*4882a593Smuzhiyun 	atom->sched_in_time = timestamp;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	delta = atom->sched_in_time - atom->wake_up_time;
1097*4882a593Smuzhiyun 	atoms->total_lat += delta;
1098*4882a593Smuzhiyun 	if (delta > atoms->max_lat) {
1099*4882a593Smuzhiyun 		atoms->max_lat = delta;
1100*4882a593Smuzhiyun 		atoms->max_lat_start = atom->wake_up_time;
1101*4882a593Smuzhiyun 		atoms->max_lat_end = timestamp;
1102*4882a593Smuzhiyun 	}
1103*4882a593Smuzhiyun 	atoms->nb_atoms++;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun 
latency_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1106*4882a593Smuzhiyun static int latency_switch_event(struct perf_sched *sched,
1107*4882a593Smuzhiyun 				struct evsel *evsel,
1108*4882a593Smuzhiyun 				struct perf_sample *sample,
1109*4882a593Smuzhiyun 				struct machine *machine)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1112*4882a593Smuzhiyun 		  next_pid = evsel__intval(evsel, sample, "next_pid");
1113*4882a593Smuzhiyun 	const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
1114*4882a593Smuzhiyun 	struct work_atoms *out_events, *in_events;
1115*4882a593Smuzhiyun 	struct thread *sched_out, *sched_in;
1116*4882a593Smuzhiyun 	u64 timestamp0, timestamp = sample->time;
1117*4882a593Smuzhiyun 	int cpu = sample->cpu, err = -1;
1118*4882a593Smuzhiyun 	s64 delta;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	timestamp0 = sched->cpu_last_switched[cpu];
1123*4882a593Smuzhiyun 	sched->cpu_last_switched[cpu] = timestamp;
1124*4882a593Smuzhiyun 	if (timestamp0)
1125*4882a593Smuzhiyun 		delta = timestamp - timestamp0;
1126*4882a593Smuzhiyun 	else
1127*4882a593Smuzhiyun 		delta = 0;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	if (delta < 0) {
1130*4882a593Smuzhiyun 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1131*4882a593Smuzhiyun 		return -1;
1132*4882a593Smuzhiyun 	}
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	sched_out = machine__findnew_thread(machine, -1, prev_pid);
1135*4882a593Smuzhiyun 	sched_in = machine__findnew_thread(machine, -1, next_pid);
1136*4882a593Smuzhiyun 	if (sched_out == NULL || sched_in == NULL)
1137*4882a593Smuzhiyun 		goto out_put;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1140*4882a593Smuzhiyun 	if (!out_events) {
1141*4882a593Smuzhiyun 		if (thread_atoms_insert(sched, sched_out))
1142*4882a593Smuzhiyun 			goto out_put;
1143*4882a593Smuzhiyun 		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1144*4882a593Smuzhiyun 		if (!out_events) {
1145*4882a593Smuzhiyun 			pr_err("out-event: Internal tree error");
1146*4882a593Smuzhiyun 			goto out_put;
1147*4882a593Smuzhiyun 		}
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 	if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1150*4882a593Smuzhiyun 		return -1;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1153*4882a593Smuzhiyun 	if (!in_events) {
1154*4882a593Smuzhiyun 		if (thread_atoms_insert(sched, sched_in))
1155*4882a593Smuzhiyun 			goto out_put;
1156*4882a593Smuzhiyun 		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1157*4882a593Smuzhiyun 		if (!in_events) {
1158*4882a593Smuzhiyun 			pr_err("in-event: Internal tree error");
1159*4882a593Smuzhiyun 			goto out_put;
1160*4882a593Smuzhiyun 		}
1161*4882a593Smuzhiyun 		/*
1162*4882a593Smuzhiyun 		 * Take came in we have not heard about yet,
1163*4882a593Smuzhiyun 		 * add in an initial atom in runnable state:
1164*4882a593Smuzhiyun 		 */
1165*4882a593Smuzhiyun 		if (add_sched_out_event(in_events, 'R', timestamp))
1166*4882a593Smuzhiyun 			goto out_put;
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 	add_sched_in_event(in_events, timestamp);
1169*4882a593Smuzhiyun 	err = 0;
1170*4882a593Smuzhiyun out_put:
1171*4882a593Smuzhiyun 	thread__put(sched_out);
1172*4882a593Smuzhiyun 	thread__put(sched_in);
1173*4882a593Smuzhiyun 	return err;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
latency_runtime_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1176*4882a593Smuzhiyun static int latency_runtime_event(struct perf_sched *sched,
1177*4882a593Smuzhiyun 				 struct evsel *evsel,
1178*4882a593Smuzhiyun 				 struct perf_sample *sample,
1179*4882a593Smuzhiyun 				 struct machine *machine)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun 	const u32 pid	   = evsel__intval(evsel, sample, "pid");
1182*4882a593Smuzhiyun 	const u64 runtime  = evsel__intval(evsel, sample, "runtime");
1183*4882a593Smuzhiyun 	struct thread *thread = machine__findnew_thread(machine, -1, pid);
1184*4882a593Smuzhiyun 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1185*4882a593Smuzhiyun 	u64 timestamp = sample->time;
1186*4882a593Smuzhiyun 	int cpu = sample->cpu, err = -1;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	if (thread == NULL)
1189*4882a593Smuzhiyun 		return -1;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1192*4882a593Smuzhiyun 	if (!atoms) {
1193*4882a593Smuzhiyun 		if (thread_atoms_insert(sched, thread))
1194*4882a593Smuzhiyun 			goto out_put;
1195*4882a593Smuzhiyun 		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1196*4882a593Smuzhiyun 		if (!atoms) {
1197*4882a593Smuzhiyun 			pr_err("in-event: Internal tree error");
1198*4882a593Smuzhiyun 			goto out_put;
1199*4882a593Smuzhiyun 		}
1200*4882a593Smuzhiyun 		if (add_sched_out_event(atoms, 'R', timestamp))
1201*4882a593Smuzhiyun 			goto out_put;
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	add_runtime_event(atoms, runtime, timestamp);
1205*4882a593Smuzhiyun 	err = 0;
1206*4882a593Smuzhiyun out_put:
1207*4882a593Smuzhiyun 	thread__put(thread);
1208*4882a593Smuzhiyun 	return err;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun 
latency_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1211*4882a593Smuzhiyun static int latency_wakeup_event(struct perf_sched *sched,
1212*4882a593Smuzhiyun 				struct evsel *evsel,
1213*4882a593Smuzhiyun 				struct perf_sample *sample,
1214*4882a593Smuzhiyun 				struct machine *machine)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	const u32 pid	  = evsel__intval(evsel, sample, "pid");
1217*4882a593Smuzhiyun 	struct work_atoms *atoms;
1218*4882a593Smuzhiyun 	struct work_atom *atom;
1219*4882a593Smuzhiyun 	struct thread *wakee;
1220*4882a593Smuzhiyun 	u64 timestamp = sample->time;
1221*4882a593Smuzhiyun 	int err = -1;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	wakee = machine__findnew_thread(machine, -1, pid);
1224*4882a593Smuzhiyun 	if (wakee == NULL)
1225*4882a593Smuzhiyun 		return -1;
1226*4882a593Smuzhiyun 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1227*4882a593Smuzhiyun 	if (!atoms) {
1228*4882a593Smuzhiyun 		if (thread_atoms_insert(sched, wakee))
1229*4882a593Smuzhiyun 			goto out_put;
1230*4882a593Smuzhiyun 		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1231*4882a593Smuzhiyun 		if (!atoms) {
1232*4882a593Smuzhiyun 			pr_err("wakeup-event: Internal tree error");
1233*4882a593Smuzhiyun 			goto out_put;
1234*4882a593Smuzhiyun 		}
1235*4882a593Smuzhiyun 		if (add_sched_out_event(atoms, 'S', timestamp))
1236*4882a593Smuzhiyun 			goto out_put;
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	BUG_ON(list_empty(&atoms->work_list));
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	/*
1244*4882a593Smuzhiyun 	 * As we do not guarantee the wakeup event happens when
1245*4882a593Smuzhiyun 	 * task is out of run queue, also may happen when task is
1246*4882a593Smuzhiyun 	 * on run queue and wakeup only change ->state to TASK_RUNNING,
1247*4882a593Smuzhiyun 	 * then we should not set the ->wake_up_time when wake up a
1248*4882a593Smuzhiyun 	 * task which is on run queue.
1249*4882a593Smuzhiyun 	 *
1250*4882a593Smuzhiyun 	 * You WILL be missing events if you've recorded only
1251*4882a593Smuzhiyun 	 * one CPU, or are only looking at only one, so don't
1252*4882a593Smuzhiyun 	 * skip in this case.
1253*4882a593Smuzhiyun 	 */
1254*4882a593Smuzhiyun 	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1255*4882a593Smuzhiyun 		goto out_ok;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	sched->nr_timestamps++;
1258*4882a593Smuzhiyun 	if (atom->sched_out_time > timestamp) {
1259*4882a593Smuzhiyun 		sched->nr_unordered_timestamps++;
1260*4882a593Smuzhiyun 		goto out_ok;
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	atom->state = THREAD_WAIT_CPU;
1264*4882a593Smuzhiyun 	atom->wake_up_time = timestamp;
1265*4882a593Smuzhiyun out_ok:
1266*4882a593Smuzhiyun 	err = 0;
1267*4882a593Smuzhiyun out_put:
1268*4882a593Smuzhiyun 	thread__put(wakee);
1269*4882a593Smuzhiyun 	return err;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun 
latency_migrate_task_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1272*4882a593Smuzhiyun static int latency_migrate_task_event(struct perf_sched *sched,
1273*4882a593Smuzhiyun 				      struct evsel *evsel,
1274*4882a593Smuzhiyun 				      struct perf_sample *sample,
1275*4882a593Smuzhiyun 				      struct machine *machine)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun 	const u32 pid = evsel__intval(evsel, sample, "pid");
1278*4882a593Smuzhiyun 	u64 timestamp = sample->time;
1279*4882a593Smuzhiyun 	struct work_atoms *atoms;
1280*4882a593Smuzhiyun 	struct work_atom *atom;
1281*4882a593Smuzhiyun 	struct thread *migrant;
1282*4882a593Smuzhiyun 	int err = -1;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	/*
1285*4882a593Smuzhiyun 	 * Only need to worry about migration when profiling one CPU.
1286*4882a593Smuzhiyun 	 */
1287*4882a593Smuzhiyun 	if (sched->profile_cpu == -1)
1288*4882a593Smuzhiyun 		return 0;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	migrant = machine__findnew_thread(machine, -1, pid);
1291*4882a593Smuzhiyun 	if (migrant == NULL)
1292*4882a593Smuzhiyun 		return -1;
1293*4882a593Smuzhiyun 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1294*4882a593Smuzhiyun 	if (!atoms) {
1295*4882a593Smuzhiyun 		if (thread_atoms_insert(sched, migrant))
1296*4882a593Smuzhiyun 			goto out_put;
1297*4882a593Smuzhiyun 		register_pid(sched, migrant->tid, thread__comm_str(migrant));
1298*4882a593Smuzhiyun 		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1299*4882a593Smuzhiyun 		if (!atoms) {
1300*4882a593Smuzhiyun 			pr_err("migration-event: Internal tree error");
1301*4882a593Smuzhiyun 			goto out_put;
1302*4882a593Smuzhiyun 		}
1303*4882a593Smuzhiyun 		if (add_sched_out_event(atoms, 'R', timestamp))
1304*4882a593Smuzhiyun 			goto out_put;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	BUG_ON(list_empty(&atoms->work_list));
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1310*4882a593Smuzhiyun 	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	sched->nr_timestamps++;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	if (atom->sched_out_time > timestamp)
1315*4882a593Smuzhiyun 		sched->nr_unordered_timestamps++;
1316*4882a593Smuzhiyun 	err = 0;
1317*4882a593Smuzhiyun out_put:
1318*4882a593Smuzhiyun 	thread__put(migrant);
1319*4882a593Smuzhiyun 	return err;
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun 
output_lat_thread(struct perf_sched * sched,struct work_atoms * work_list)1322*4882a593Smuzhiyun static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun 	int i;
1325*4882a593Smuzhiyun 	int ret;
1326*4882a593Smuzhiyun 	u64 avg;
1327*4882a593Smuzhiyun 	char max_lat_start[32], max_lat_end[32];
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	if (!work_list->nb_atoms)
1330*4882a593Smuzhiyun 		return;
1331*4882a593Smuzhiyun 	/*
1332*4882a593Smuzhiyun 	 * Ignore idle threads:
1333*4882a593Smuzhiyun 	 */
1334*4882a593Smuzhiyun 	if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1335*4882a593Smuzhiyun 		return;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	sched->all_runtime += work_list->total_runtime;
1338*4882a593Smuzhiyun 	sched->all_count   += work_list->nb_atoms;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	if (work_list->num_merged > 1)
1341*4882a593Smuzhiyun 		ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1342*4882a593Smuzhiyun 	else
1343*4882a593Smuzhiyun 		ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	for (i = 0; i < 24 - ret; i++)
1346*4882a593Smuzhiyun 		printf(" ");
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	avg = work_list->total_lat / work_list->nb_atoms;
1349*4882a593Smuzhiyun 	timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1350*4882a593Smuzhiyun 	timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1353*4882a593Smuzhiyun 	      (double)work_list->total_runtime / NSEC_PER_MSEC,
1354*4882a593Smuzhiyun 		 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1355*4882a593Smuzhiyun 		 (double)work_list->max_lat / NSEC_PER_MSEC,
1356*4882a593Smuzhiyun 		 max_lat_start, max_lat_end);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
pid_cmp(struct work_atoms * l,struct work_atoms * r)1359*4882a593Smuzhiyun static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	if (l->thread == r->thread)
1362*4882a593Smuzhiyun 		return 0;
1363*4882a593Smuzhiyun 	if (l->thread->tid < r->thread->tid)
1364*4882a593Smuzhiyun 		return -1;
1365*4882a593Smuzhiyun 	if (l->thread->tid > r->thread->tid)
1366*4882a593Smuzhiyun 		return 1;
1367*4882a593Smuzhiyun 	return (int)(l->thread - r->thread);
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun 
avg_cmp(struct work_atoms * l,struct work_atoms * r)1370*4882a593Smuzhiyun static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun 	u64 avgl, avgr;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	if (!l->nb_atoms)
1375*4882a593Smuzhiyun 		return -1;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	if (!r->nb_atoms)
1378*4882a593Smuzhiyun 		return 1;
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	avgl = l->total_lat / l->nb_atoms;
1381*4882a593Smuzhiyun 	avgr = r->total_lat / r->nb_atoms;
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	if (avgl < avgr)
1384*4882a593Smuzhiyun 		return -1;
1385*4882a593Smuzhiyun 	if (avgl > avgr)
1386*4882a593Smuzhiyun 		return 1;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	return 0;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun 
max_cmp(struct work_atoms * l,struct work_atoms * r)1391*4882a593Smuzhiyun static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun 	if (l->max_lat < r->max_lat)
1394*4882a593Smuzhiyun 		return -1;
1395*4882a593Smuzhiyun 	if (l->max_lat > r->max_lat)
1396*4882a593Smuzhiyun 		return 1;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	return 0;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
switch_cmp(struct work_atoms * l,struct work_atoms * r)1401*4882a593Smuzhiyun static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	if (l->nb_atoms < r->nb_atoms)
1404*4882a593Smuzhiyun 		return -1;
1405*4882a593Smuzhiyun 	if (l->nb_atoms > r->nb_atoms)
1406*4882a593Smuzhiyun 		return 1;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	return 0;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
runtime_cmp(struct work_atoms * l,struct work_atoms * r)1411*4882a593Smuzhiyun static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun 	if (l->total_runtime < r->total_runtime)
1414*4882a593Smuzhiyun 		return -1;
1415*4882a593Smuzhiyun 	if (l->total_runtime > r->total_runtime)
1416*4882a593Smuzhiyun 		return 1;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	return 0;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun 
sort_dimension__add(const char * tok,struct list_head * list)1421*4882a593Smuzhiyun static int sort_dimension__add(const char *tok, struct list_head *list)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun 	size_t i;
1424*4882a593Smuzhiyun 	static struct sort_dimension avg_sort_dimension = {
1425*4882a593Smuzhiyun 		.name = "avg",
1426*4882a593Smuzhiyun 		.cmp  = avg_cmp,
1427*4882a593Smuzhiyun 	};
1428*4882a593Smuzhiyun 	static struct sort_dimension max_sort_dimension = {
1429*4882a593Smuzhiyun 		.name = "max",
1430*4882a593Smuzhiyun 		.cmp  = max_cmp,
1431*4882a593Smuzhiyun 	};
1432*4882a593Smuzhiyun 	static struct sort_dimension pid_sort_dimension = {
1433*4882a593Smuzhiyun 		.name = "pid",
1434*4882a593Smuzhiyun 		.cmp  = pid_cmp,
1435*4882a593Smuzhiyun 	};
1436*4882a593Smuzhiyun 	static struct sort_dimension runtime_sort_dimension = {
1437*4882a593Smuzhiyun 		.name = "runtime",
1438*4882a593Smuzhiyun 		.cmp  = runtime_cmp,
1439*4882a593Smuzhiyun 	};
1440*4882a593Smuzhiyun 	static struct sort_dimension switch_sort_dimension = {
1441*4882a593Smuzhiyun 		.name = "switch",
1442*4882a593Smuzhiyun 		.cmp  = switch_cmp,
1443*4882a593Smuzhiyun 	};
1444*4882a593Smuzhiyun 	struct sort_dimension *available_sorts[] = {
1445*4882a593Smuzhiyun 		&pid_sort_dimension,
1446*4882a593Smuzhiyun 		&avg_sort_dimension,
1447*4882a593Smuzhiyun 		&max_sort_dimension,
1448*4882a593Smuzhiyun 		&switch_sort_dimension,
1449*4882a593Smuzhiyun 		&runtime_sort_dimension,
1450*4882a593Smuzhiyun 	};
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1453*4882a593Smuzhiyun 		if (!strcmp(available_sorts[i]->name, tok)) {
1454*4882a593Smuzhiyun 			list_add_tail(&available_sorts[i]->list, list);
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 			return 0;
1457*4882a593Smuzhiyun 		}
1458*4882a593Smuzhiyun 	}
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	return -1;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun 
perf_sched__sort_lat(struct perf_sched * sched)1463*4882a593Smuzhiyun static void perf_sched__sort_lat(struct perf_sched *sched)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun 	struct rb_node *node;
1466*4882a593Smuzhiyun 	struct rb_root_cached *root = &sched->atom_root;
1467*4882a593Smuzhiyun again:
1468*4882a593Smuzhiyun 	for (;;) {
1469*4882a593Smuzhiyun 		struct work_atoms *data;
1470*4882a593Smuzhiyun 		node = rb_first_cached(root);
1471*4882a593Smuzhiyun 		if (!node)
1472*4882a593Smuzhiyun 			break;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 		rb_erase_cached(node, root);
1475*4882a593Smuzhiyun 		data = rb_entry(node, struct work_atoms, node);
1476*4882a593Smuzhiyun 		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1477*4882a593Smuzhiyun 	}
1478*4882a593Smuzhiyun 	if (root == &sched->atom_root) {
1479*4882a593Smuzhiyun 		root = &sched->merged_atom_root;
1480*4882a593Smuzhiyun 		goto again;
1481*4882a593Smuzhiyun 	}
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
process_sched_wakeup_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1484*4882a593Smuzhiyun static int process_sched_wakeup_event(struct perf_tool *tool,
1485*4882a593Smuzhiyun 				      struct evsel *evsel,
1486*4882a593Smuzhiyun 				      struct perf_sample *sample,
1487*4882a593Smuzhiyun 				      struct machine *machine)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	if (sched->tp_handler->wakeup_event)
1492*4882a593Smuzhiyun 		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	return 0;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun union map_priv {
1498*4882a593Smuzhiyun 	void	*ptr;
1499*4882a593Smuzhiyun 	bool	 color;
1500*4882a593Smuzhiyun };
1501*4882a593Smuzhiyun 
thread__has_color(struct thread * thread)1502*4882a593Smuzhiyun static bool thread__has_color(struct thread *thread)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	union map_priv priv = {
1505*4882a593Smuzhiyun 		.ptr = thread__priv(thread),
1506*4882a593Smuzhiyun 	};
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	return priv.color;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun static struct thread*
map__findnew_thread(struct perf_sched * sched,struct machine * machine,pid_t pid,pid_t tid)1512*4882a593Smuzhiyun map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun 	struct thread *thread = machine__findnew_thread(machine, pid, tid);
1515*4882a593Smuzhiyun 	union map_priv priv = {
1516*4882a593Smuzhiyun 		.color = false,
1517*4882a593Smuzhiyun 	};
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	if (!sched->map.color_pids || !thread || thread__priv(thread))
1520*4882a593Smuzhiyun 		return thread;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	if (thread_map__has(sched->map.color_pids, tid))
1523*4882a593Smuzhiyun 		priv.color = true;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	thread__set_priv(thread, priv.ptr);
1526*4882a593Smuzhiyun 	return thread;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun 
map_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1529*4882a593Smuzhiyun static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1530*4882a593Smuzhiyun 			    struct perf_sample *sample, struct machine *machine)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun 	const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1533*4882a593Smuzhiyun 	struct thread *sched_in;
1534*4882a593Smuzhiyun 	struct thread_runtime *tr;
1535*4882a593Smuzhiyun 	int new_shortname;
1536*4882a593Smuzhiyun 	u64 timestamp0, timestamp = sample->time;
1537*4882a593Smuzhiyun 	s64 delta;
1538*4882a593Smuzhiyun 	int i, this_cpu = sample->cpu;
1539*4882a593Smuzhiyun 	int cpus_nr;
1540*4882a593Smuzhiyun 	bool new_cpu = false;
1541*4882a593Smuzhiyun 	const char *color = PERF_COLOR_NORMAL;
1542*4882a593Smuzhiyun 	char stimestamp[32];
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	if (this_cpu > sched->max_cpu)
1547*4882a593Smuzhiyun 		sched->max_cpu = this_cpu;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	if (sched->map.comp) {
1550*4882a593Smuzhiyun 		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1551*4882a593Smuzhiyun 		if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1552*4882a593Smuzhiyun 			sched->map.comp_cpus[cpus_nr++] = this_cpu;
1553*4882a593Smuzhiyun 			new_cpu = true;
1554*4882a593Smuzhiyun 		}
1555*4882a593Smuzhiyun 	} else
1556*4882a593Smuzhiyun 		cpus_nr = sched->max_cpu;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	timestamp0 = sched->cpu_last_switched[this_cpu];
1559*4882a593Smuzhiyun 	sched->cpu_last_switched[this_cpu] = timestamp;
1560*4882a593Smuzhiyun 	if (timestamp0)
1561*4882a593Smuzhiyun 		delta = timestamp - timestamp0;
1562*4882a593Smuzhiyun 	else
1563*4882a593Smuzhiyun 		delta = 0;
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	if (delta < 0) {
1566*4882a593Smuzhiyun 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1567*4882a593Smuzhiyun 		return -1;
1568*4882a593Smuzhiyun 	}
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1571*4882a593Smuzhiyun 	if (sched_in == NULL)
1572*4882a593Smuzhiyun 		return -1;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	tr = thread__get_runtime(sched_in);
1575*4882a593Smuzhiyun 	if (tr == NULL) {
1576*4882a593Smuzhiyun 		thread__put(sched_in);
1577*4882a593Smuzhiyun 		return -1;
1578*4882a593Smuzhiyun 	}
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	sched->curr_thread[this_cpu] = thread__get(sched_in);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	printf("  ");
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	new_shortname = 0;
1585*4882a593Smuzhiyun 	if (!tr->shortname[0]) {
1586*4882a593Smuzhiyun 		if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1587*4882a593Smuzhiyun 			/*
1588*4882a593Smuzhiyun 			 * Don't allocate a letter-number for swapper:0
1589*4882a593Smuzhiyun 			 * as a shortname. Instead, we use '.' for it.
1590*4882a593Smuzhiyun 			 */
1591*4882a593Smuzhiyun 			tr->shortname[0] = '.';
1592*4882a593Smuzhiyun 			tr->shortname[1] = ' ';
1593*4882a593Smuzhiyun 		} else {
1594*4882a593Smuzhiyun 			tr->shortname[0] = sched->next_shortname1;
1595*4882a593Smuzhiyun 			tr->shortname[1] = sched->next_shortname2;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 			if (sched->next_shortname1 < 'Z') {
1598*4882a593Smuzhiyun 				sched->next_shortname1++;
1599*4882a593Smuzhiyun 			} else {
1600*4882a593Smuzhiyun 				sched->next_shortname1 = 'A';
1601*4882a593Smuzhiyun 				if (sched->next_shortname2 < '9')
1602*4882a593Smuzhiyun 					sched->next_shortname2++;
1603*4882a593Smuzhiyun 				else
1604*4882a593Smuzhiyun 					sched->next_shortname2 = '0';
1605*4882a593Smuzhiyun 			}
1606*4882a593Smuzhiyun 		}
1607*4882a593Smuzhiyun 		new_shortname = 1;
1608*4882a593Smuzhiyun 	}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	for (i = 0; i < cpus_nr; i++) {
1611*4882a593Smuzhiyun 		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1612*4882a593Smuzhiyun 		struct thread *curr_thread = sched->curr_thread[cpu];
1613*4882a593Smuzhiyun 		struct thread_runtime *curr_tr;
1614*4882a593Smuzhiyun 		const char *pid_color = color;
1615*4882a593Smuzhiyun 		const char *cpu_color = color;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 		if (curr_thread && thread__has_color(curr_thread))
1618*4882a593Smuzhiyun 			pid_color = COLOR_PIDS;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1621*4882a593Smuzhiyun 			continue;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1624*4882a593Smuzhiyun 			cpu_color = COLOR_CPUS;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 		if (cpu != this_cpu)
1627*4882a593Smuzhiyun 			color_fprintf(stdout, color, " ");
1628*4882a593Smuzhiyun 		else
1629*4882a593Smuzhiyun 			color_fprintf(stdout, cpu_color, "*");
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 		if (sched->curr_thread[cpu]) {
1632*4882a593Smuzhiyun 			curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
1633*4882a593Smuzhiyun 			if (curr_tr == NULL) {
1634*4882a593Smuzhiyun 				thread__put(sched_in);
1635*4882a593Smuzhiyun 				return -1;
1636*4882a593Smuzhiyun 			}
1637*4882a593Smuzhiyun 			color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1638*4882a593Smuzhiyun 		} else
1639*4882a593Smuzhiyun 			color_fprintf(stdout, color, "   ");
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1643*4882a593Smuzhiyun 		goto out;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1646*4882a593Smuzhiyun 	color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1647*4882a593Smuzhiyun 	if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1648*4882a593Smuzhiyun 		const char *pid_color = color;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 		if (thread__has_color(sched_in))
1651*4882a593Smuzhiyun 			pid_color = COLOR_PIDS;
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 		color_fprintf(stdout, pid_color, "%s => %s:%d",
1654*4882a593Smuzhiyun 		       tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1655*4882a593Smuzhiyun 		tr->comm_changed = false;
1656*4882a593Smuzhiyun 	}
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	if (sched->map.comp && new_cpu)
1659*4882a593Smuzhiyun 		color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun out:
1662*4882a593Smuzhiyun 	color_fprintf(stdout, color, "\n");
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	thread__put(sched_in);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	return 0;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun 
process_sched_switch_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1669*4882a593Smuzhiyun static int process_sched_switch_event(struct perf_tool *tool,
1670*4882a593Smuzhiyun 				      struct evsel *evsel,
1671*4882a593Smuzhiyun 				      struct perf_sample *sample,
1672*4882a593Smuzhiyun 				      struct machine *machine)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1675*4882a593Smuzhiyun 	int this_cpu = sample->cpu, err = 0;
1676*4882a593Smuzhiyun 	u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1677*4882a593Smuzhiyun 	    next_pid = evsel__intval(evsel, sample, "next_pid");
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	if (sched->curr_pid[this_cpu] != (u32)-1) {
1680*4882a593Smuzhiyun 		/*
1681*4882a593Smuzhiyun 		 * Are we trying to switch away a PID that is
1682*4882a593Smuzhiyun 		 * not current?
1683*4882a593Smuzhiyun 		 */
1684*4882a593Smuzhiyun 		if (sched->curr_pid[this_cpu] != prev_pid)
1685*4882a593Smuzhiyun 			sched->nr_context_switch_bugs++;
1686*4882a593Smuzhiyun 	}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	if (sched->tp_handler->switch_event)
1689*4882a593Smuzhiyun 		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	sched->curr_pid[this_cpu] = next_pid;
1692*4882a593Smuzhiyun 	return err;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun 
process_sched_runtime_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1695*4882a593Smuzhiyun static int process_sched_runtime_event(struct perf_tool *tool,
1696*4882a593Smuzhiyun 				       struct evsel *evsel,
1697*4882a593Smuzhiyun 				       struct perf_sample *sample,
1698*4882a593Smuzhiyun 				       struct machine *machine)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	if (sched->tp_handler->runtime_event)
1703*4882a593Smuzhiyun 		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	return 0;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun 
perf_sched__process_fork_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1708*4882a593Smuzhiyun static int perf_sched__process_fork_event(struct perf_tool *tool,
1709*4882a593Smuzhiyun 					  union perf_event *event,
1710*4882a593Smuzhiyun 					  struct perf_sample *sample,
1711*4882a593Smuzhiyun 					  struct machine *machine)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* run the fork event through the perf machineruy */
1716*4882a593Smuzhiyun 	perf_event__process_fork(tool, event, sample, machine);
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	/* and then run additional processing needed for this command */
1719*4882a593Smuzhiyun 	if (sched->tp_handler->fork_event)
1720*4882a593Smuzhiyun 		return sched->tp_handler->fork_event(sched, event, machine);
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	return 0;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun 
process_sched_migrate_task_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1725*4882a593Smuzhiyun static int process_sched_migrate_task_event(struct perf_tool *tool,
1726*4882a593Smuzhiyun 					    struct evsel *evsel,
1727*4882a593Smuzhiyun 					    struct perf_sample *sample,
1728*4882a593Smuzhiyun 					    struct machine *machine)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	if (sched->tp_handler->migrate_task_event)
1733*4882a593Smuzhiyun 		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	return 0;
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun typedef int (*tracepoint_handler)(struct perf_tool *tool,
1739*4882a593Smuzhiyun 				  struct evsel *evsel,
1740*4882a593Smuzhiyun 				  struct perf_sample *sample,
1741*4882a593Smuzhiyun 				  struct machine *machine);
1742*4882a593Smuzhiyun 
perf_sched__process_tracepoint_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1743*4882a593Smuzhiyun static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1744*4882a593Smuzhiyun 						 union perf_event *event __maybe_unused,
1745*4882a593Smuzhiyun 						 struct perf_sample *sample,
1746*4882a593Smuzhiyun 						 struct evsel *evsel,
1747*4882a593Smuzhiyun 						 struct machine *machine)
1748*4882a593Smuzhiyun {
1749*4882a593Smuzhiyun 	int err = 0;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	if (evsel->handler != NULL) {
1752*4882a593Smuzhiyun 		tracepoint_handler f = evsel->handler;
1753*4882a593Smuzhiyun 		err = f(tool, evsel, sample, machine);
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	return err;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun 
perf_sched__process_comm(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)1759*4882a593Smuzhiyun static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1760*4882a593Smuzhiyun 				    union perf_event *event,
1761*4882a593Smuzhiyun 				    struct perf_sample *sample,
1762*4882a593Smuzhiyun 				    struct machine *machine)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun 	struct thread *thread;
1765*4882a593Smuzhiyun 	struct thread_runtime *tr;
1766*4882a593Smuzhiyun 	int err;
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	err = perf_event__process_comm(tool, event, sample, machine);
1769*4882a593Smuzhiyun 	if (err)
1770*4882a593Smuzhiyun 		return err;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	thread = machine__find_thread(machine, sample->pid, sample->tid);
1773*4882a593Smuzhiyun 	if (!thread) {
1774*4882a593Smuzhiyun 		pr_err("Internal error: can't find thread\n");
1775*4882a593Smuzhiyun 		return -1;
1776*4882a593Smuzhiyun 	}
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	tr = thread__get_runtime(thread);
1779*4882a593Smuzhiyun 	if (tr == NULL) {
1780*4882a593Smuzhiyun 		thread__put(thread);
1781*4882a593Smuzhiyun 		return -1;
1782*4882a593Smuzhiyun 	}
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	tr->comm_changed = true;
1785*4882a593Smuzhiyun 	thread__put(thread);
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	return 0;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun 
perf_sched__read_events(struct perf_sched * sched)1790*4882a593Smuzhiyun static int perf_sched__read_events(struct perf_sched *sched)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun 	const struct evsel_str_handler handlers[] = {
1793*4882a593Smuzhiyun 		{ "sched:sched_switch",	      process_sched_switch_event, },
1794*4882a593Smuzhiyun 		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
1795*4882a593Smuzhiyun 		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
1796*4882a593Smuzhiyun 		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1797*4882a593Smuzhiyun 		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
1798*4882a593Smuzhiyun 	};
1799*4882a593Smuzhiyun 	struct perf_session *session;
1800*4882a593Smuzhiyun 	struct perf_data data = {
1801*4882a593Smuzhiyun 		.path  = input_name,
1802*4882a593Smuzhiyun 		.mode  = PERF_DATA_MODE_READ,
1803*4882a593Smuzhiyun 		.force = sched->force,
1804*4882a593Smuzhiyun 	};
1805*4882a593Smuzhiyun 	int rc = -1;
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 	session = perf_session__new(&data, false, &sched->tool);
1808*4882a593Smuzhiyun 	if (IS_ERR(session)) {
1809*4882a593Smuzhiyun 		pr_debug("Error creating perf session");
1810*4882a593Smuzhiyun 		return PTR_ERR(session);
1811*4882a593Smuzhiyun 	}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	symbol__init(&session->header.env);
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	if (perf_session__set_tracepoints_handlers(session, handlers))
1816*4882a593Smuzhiyun 		goto out_delete;
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	if (perf_session__has_traces(session, "record -R")) {
1819*4882a593Smuzhiyun 		int err = perf_session__process_events(session);
1820*4882a593Smuzhiyun 		if (err) {
1821*4882a593Smuzhiyun 			pr_err("Failed to process events, error %d", err);
1822*4882a593Smuzhiyun 			goto out_delete;
1823*4882a593Smuzhiyun 		}
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 		sched->nr_events      = session->evlist->stats.nr_events[0];
1826*4882a593Smuzhiyun 		sched->nr_lost_events = session->evlist->stats.total_lost;
1827*4882a593Smuzhiyun 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	rc = 0;
1831*4882a593Smuzhiyun out_delete:
1832*4882a593Smuzhiyun 	perf_session__delete(session);
1833*4882a593Smuzhiyun 	return rc;
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun /*
1837*4882a593Smuzhiyun  * scheduling times are printed as msec.usec
1838*4882a593Smuzhiyun  */
print_sched_time(unsigned long long nsecs,int width)1839*4882a593Smuzhiyun static inline void print_sched_time(unsigned long long nsecs, int width)
1840*4882a593Smuzhiyun {
1841*4882a593Smuzhiyun 	unsigned long msecs;
1842*4882a593Smuzhiyun 	unsigned long usecs;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	msecs  = nsecs / NSEC_PER_MSEC;
1845*4882a593Smuzhiyun 	nsecs -= msecs * NSEC_PER_MSEC;
1846*4882a593Smuzhiyun 	usecs  = nsecs / NSEC_PER_USEC;
1847*4882a593Smuzhiyun 	printf("%*lu.%03lu ", width, msecs, usecs);
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun /*
1851*4882a593Smuzhiyun  * returns runtime data for event, allocating memory for it the
1852*4882a593Smuzhiyun  * first time it is used.
1853*4882a593Smuzhiyun  */
evsel__get_runtime(struct evsel * evsel)1854*4882a593Smuzhiyun static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun 	struct evsel_runtime *r = evsel->priv;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	if (r == NULL) {
1859*4882a593Smuzhiyun 		r = zalloc(sizeof(struct evsel_runtime));
1860*4882a593Smuzhiyun 		evsel->priv = r;
1861*4882a593Smuzhiyun 	}
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	return r;
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun /*
1867*4882a593Smuzhiyun  * save last time event was seen per cpu
1868*4882a593Smuzhiyun  */
evsel__save_time(struct evsel * evsel,u64 timestamp,u32 cpu)1869*4882a593Smuzhiyun static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	struct evsel_runtime *r = evsel__get_runtime(evsel);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	if (r == NULL)
1874*4882a593Smuzhiyun 		return;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1877*4882a593Smuzhiyun 		int i, n = __roundup_pow_of_two(cpu+1);
1878*4882a593Smuzhiyun 		void *p = r->last_time;
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		p = realloc(r->last_time, n * sizeof(u64));
1881*4882a593Smuzhiyun 		if (!p)
1882*4882a593Smuzhiyun 			return;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 		r->last_time = p;
1885*4882a593Smuzhiyun 		for (i = r->ncpu; i < n; ++i)
1886*4882a593Smuzhiyun 			r->last_time[i] = (u64) 0;
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 		r->ncpu = n;
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	r->last_time[cpu] = timestamp;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun /* returns last time this event was seen on the given cpu */
evsel__get_time(struct evsel * evsel,u32 cpu)1895*4882a593Smuzhiyun static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	struct evsel_runtime *r = evsel__get_runtime(evsel);
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1900*4882a593Smuzhiyun 		return 0;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	return r->last_time[cpu];
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun static int comm_width = 30;
1906*4882a593Smuzhiyun 
timehist_get_commstr(struct thread * thread)1907*4882a593Smuzhiyun static char *timehist_get_commstr(struct thread *thread)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	static char str[32];
1910*4882a593Smuzhiyun 	const char *comm = thread__comm_str(thread);
1911*4882a593Smuzhiyun 	pid_t tid = thread->tid;
1912*4882a593Smuzhiyun 	pid_t pid = thread->pid_;
1913*4882a593Smuzhiyun 	int n;
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if (pid == 0)
1916*4882a593Smuzhiyun 		n = scnprintf(str, sizeof(str), "%s", comm);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	else if (tid != pid)
1919*4882a593Smuzhiyun 		n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	else
1922*4882a593Smuzhiyun 		n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	if (n > comm_width)
1925*4882a593Smuzhiyun 		comm_width = n;
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 	return str;
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun 
timehist_header(struct perf_sched * sched)1930*4882a593Smuzhiyun static void timehist_header(struct perf_sched *sched)
1931*4882a593Smuzhiyun {
1932*4882a593Smuzhiyun 	u32 ncpus = sched->max_cpu + 1;
1933*4882a593Smuzhiyun 	u32 i, j;
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	printf("%15s %6s ", "time", "cpu");
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	if (sched->show_cpu_visual) {
1938*4882a593Smuzhiyun 		printf(" ");
1939*4882a593Smuzhiyun 		for (i = 0, j = 0; i < ncpus; ++i) {
1940*4882a593Smuzhiyun 			printf("%x", j++);
1941*4882a593Smuzhiyun 			if (j > 15)
1942*4882a593Smuzhiyun 				j = 0;
1943*4882a593Smuzhiyun 		}
1944*4882a593Smuzhiyun 		printf(" ");
1945*4882a593Smuzhiyun 	}
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	printf(" %-*s  %9s  %9s  %9s", comm_width,
1948*4882a593Smuzhiyun 		"task name", "wait time", "sch delay", "run time");
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	if (sched->show_state)
1951*4882a593Smuzhiyun 		printf("  %s", "state");
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	printf("\n");
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	/*
1956*4882a593Smuzhiyun 	 * units row
1957*4882a593Smuzhiyun 	 */
1958*4882a593Smuzhiyun 	printf("%15s %-6s ", "", "");
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	if (sched->show_cpu_visual)
1961*4882a593Smuzhiyun 		printf(" %*s ", ncpus, "");
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	printf(" %-*s  %9s  %9s  %9s", comm_width,
1964*4882a593Smuzhiyun 	       "[tid/pid]", "(msec)", "(msec)", "(msec)");
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	if (sched->show_state)
1967*4882a593Smuzhiyun 		printf("  %5s", "");
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	printf("\n");
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	/*
1972*4882a593Smuzhiyun 	 * separator
1973*4882a593Smuzhiyun 	 */
1974*4882a593Smuzhiyun 	printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	if (sched->show_cpu_visual)
1977*4882a593Smuzhiyun 		printf(" %.*s ", ncpus, graph_dotted_line);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	printf(" %.*s  %.9s  %.9s  %.9s", comm_width,
1980*4882a593Smuzhiyun 		graph_dotted_line, graph_dotted_line, graph_dotted_line,
1981*4882a593Smuzhiyun 		graph_dotted_line);
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	if (sched->show_state)
1984*4882a593Smuzhiyun 		printf("  %.5s", graph_dotted_line);
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	printf("\n");
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun 
task_state_char(struct thread * thread,int state)1989*4882a593Smuzhiyun static char task_state_char(struct thread *thread, int state)
1990*4882a593Smuzhiyun {
1991*4882a593Smuzhiyun 	static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1992*4882a593Smuzhiyun 	unsigned bit = state ? ffs(state) : 0;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	/* 'I' for idle */
1995*4882a593Smuzhiyun 	if (thread->tid == 0)
1996*4882a593Smuzhiyun 		return 'I';
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun 
timehist_print_sample(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct addr_location * al,struct thread * thread,u64 t,int state)2001*4882a593Smuzhiyun static void timehist_print_sample(struct perf_sched *sched,
2002*4882a593Smuzhiyun 				  struct evsel *evsel,
2003*4882a593Smuzhiyun 				  struct perf_sample *sample,
2004*4882a593Smuzhiyun 				  struct addr_location *al,
2005*4882a593Smuzhiyun 				  struct thread *thread,
2006*4882a593Smuzhiyun 				  u64 t, int state)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun 	struct thread_runtime *tr = thread__priv(thread);
2009*4882a593Smuzhiyun 	const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2010*4882a593Smuzhiyun 	const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2011*4882a593Smuzhiyun 	u32 max_cpus = sched->max_cpu + 1;
2012*4882a593Smuzhiyun 	char tstr[64];
2013*4882a593Smuzhiyun 	char nstr[30];
2014*4882a593Smuzhiyun 	u64 wait_time;
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2017*4882a593Smuzhiyun 		return;
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun 	timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2020*4882a593Smuzhiyun 	printf("%15s [%04d] ", tstr, sample->cpu);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	if (sched->show_cpu_visual) {
2023*4882a593Smuzhiyun 		u32 i;
2024*4882a593Smuzhiyun 		char c;
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 		printf(" ");
2027*4882a593Smuzhiyun 		for (i = 0; i < max_cpus; ++i) {
2028*4882a593Smuzhiyun 			/* flag idle times with 'i'; others are sched events */
2029*4882a593Smuzhiyun 			if (i == sample->cpu)
2030*4882a593Smuzhiyun 				c = (thread->tid == 0) ? 'i' : 's';
2031*4882a593Smuzhiyun 			else
2032*4882a593Smuzhiyun 				c = ' ';
2033*4882a593Smuzhiyun 			printf("%c", c);
2034*4882a593Smuzhiyun 		}
2035*4882a593Smuzhiyun 		printf(" ");
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2041*4882a593Smuzhiyun 	print_sched_time(wait_time, 6);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	print_sched_time(tr->dt_delay, 6);
2044*4882a593Smuzhiyun 	print_sched_time(tr->dt_run, 6);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (sched->show_state)
2047*4882a593Smuzhiyun 		printf(" %5c ", task_state_char(thread, state));
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	if (sched->show_next) {
2050*4882a593Smuzhiyun 		snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2051*4882a593Smuzhiyun 		printf(" %-*s", comm_width, nstr);
2052*4882a593Smuzhiyun 	}
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	if (sched->show_wakeups && !sched->show_next)
2055*4882a593Smuzhiyun 		printf("  %-*s", comm_width, "");
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	if (thread->tid == 0)
2058*4882a593Smuzhiyun 		goto out;
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	if (sched->show_callchain)
2061*4882a593Smuzhiyun 		printf("  ");
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	sample__fprintf_sym(sample, al, 0,
2064*4882a593Smuzhiyun 			    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2065*4882a593Smuzhiyun 			    EVSEL__PRINT_CALLCHAIN_ARROW |
2066*4882a593Smuzhiyun 			    EVSEL__PRINT_SKIP_IGNORED,
2067*4882a593Smuzhiyun 			    &callchain_cursor, symbol_conf.bt_stop_list,  stdout);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun out:
2070*4882a593Smuzhiyun 	printf("\n");
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun /*
2074*4882a593Smuzhiyun  * Explanation of delta-time stats:
2075*4882a593Smuzhiyun  *
2076*4882a593Smuzhiyun  *            t = time of current schedule out event
2077*4882a593Smuzhiyun  *        tprev = time of previous sched out event
2078*4882a593Smuzhiyun  *                also time of schedule-in event for current task
2079*4882a593Smuzhiyun  *    last_time = time of last sched change event for current task
2080*4882a593Smuzhiyun  *                (i.e, time process was last scheduled out)
2081*4882a593Smuzhiyun  * ready_to_run = time of wakeup for current task
2082*4882a593Smuzhiyun  *
2083*4882a593Smuzhiyun  * -----|------------|------------|------------|------
2084*4882a593Smuzhiyun  *    last         ready        tprev          t
2085*4882a593Smuzhiyun  *    time         to run
2086*4882a593Smuzhiyun  *
2087*4882a593Smuzhiyun  *      |-------- dt_wait --------|
2088*4882a593Smuzhiyun  *                   |- dt_delay -|-- dt_run --|
2089*4882a593Smuzhiyun  *
2090*4882a593Smuzhiyun  *   dt_run = run time of current task
2091*4882a593Smuzhiyun  *  dt_wait = time between last schedule out event for task and tprev
2092*4882a593Smuzhiyun  *            represents time spent off the cpu
2093*4882a593Smuzhiyun  * dt_delay = time between wakeup and schedule-in of task
2094*4882a593Smuzhiyun  */
2095*4882a593Smuzhiyun 
timehist_update_runtime_stats(struct thread_runtime * r,u64 t,u64 tprev)2096*4882a593Smuzhiyun static void timehist_update_runtime_stats(struct thread_runtime *r,
2097*4882a593Smuzhiyun 					 u64 t, u64 tprev)
2098*4882a593Smuzhiyun {
2099*4882a593Smuzhiyun 	r->dt_delay   = 0;
2100*4882a593Smuzhiyun 	r->dt_sleep   = 0;
2101*4882a593Smuzhiyun 	r->dt_iowait  = 0;
2102*4882a593Smuzhiyun 	r->dt_preempt = 0;
2103*4882a593Smuzhiyun 	r->dt_run     = 0;
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	if (tprev) {
2106*4882a593Smuzhiyun 		r->dt_run = t - tprev;
2107*4882a593Smuzhiyun 		if (r->ready_to_run) {
2108*4882a593Smuzhiyun 			if (r->ready_to_run > tprev)
2109*4882a593Smuzhiyun 				pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2110*4882a593Smuzhiyun 			else
2111*4882a593Smuzhiyun 				r->dt_delay = tprev - r->ready_to_run;
2112*4882a593Smuzhiyun 		}
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 		if (r->last_time > tprev)
2115*4882a593Smuzhiyun 			pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2116*4882a593Smuzhiyun 		else if (r->last_time) {
2117*4882a593Smuzhiyun 			u64 dt_wait = tprev - r->last_time;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 			if (r->last_state == TASK_RUNNING)
2120*4882a593Smuzhiyun 				r->dt_preempt = dt_wait;
2121*4882a593Smuzhiyun 			else if (r->last_state == TASK_UNINTERRUPTIBLE)
2122*4882a593Smuzhiyun 				r->dt_iowait = dt_wait;
2123*4882a593Smuzhiyun 			else
2124*4882a593Smuzhiyun 				r->dt_sleep = dt_wait;
2125*4882a593Smuzhiyun 		}
2126*4882a593Smuzhiyun 	}
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	update_stats(&r->run_stats, r->dt_run);
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	r->total_run_time     += r->dt_run;
2131*4882a593Smuzhiyun 	r->total_delay_time   += r->dt_delay;
2132*4882a593Smuzhiyun 	r->total_sleep_time   += r->dt_sleep;
2133*4882a593Smuzhiyun 	r->total_iowait_time  += r->dt_iowait;
2134*4882a593Smuzhiyun 	r->total_preempt_time += r->dt_preempt;
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun 
is_idle_sample(struct perf_sample * sample,struct evsel * evsel)2137*4882a593Smuzhiyun static bool is_idle_sample(struct perf_sample *sample,
2138*4882a593Smuzhiyun 			   struct evsel *evsel)
2139*4882a593Smuzhiyun {
2140*4882a593Smuzhiyun 	/* pid 0 == swapper == idle task */
2141*4882a593Smuzhiyun 	if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
2142*4882a593Smuzhiyun 		return evsel__intval(evsel, sample, "prev_pid") == 0;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	return sample->pid == 0;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
save_task_callchain(struct perf_sched * sched,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2147*4882a593Smuzhiyun static void save_task_callchain(struct perf_sched *sched,
2148*4882a593Smuzhiyun 				struct perf_sample *sample,
2149*4882a593Smuzhiyun 				struct evsel *evsel,
2150*4882a593Smuzhiyun 				struct machine *machine)
2151*4882a593Smuzhiyun {
2152*4882a593Smuzhiyun 	struct callchain_cursor *cursor = &callchain_cursor;
2153*4882a593Smuzhiyun 	struct thread *thread;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	/* want main thread for process - has maps */
2156*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2157*4882a593Smuzhiyun 	if (thread == NULL) {
2158*4882a593Smuzhiyun 		pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2159*4882a593Smuzhiyun 		return;
2160*4882a593Smuzhiyun 	}
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	if (!sched->show_callchain || sample->callchain == NULL)
2163*4882a593Smuzhiyun 		return;
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
2166*4882a593Smuzhiyun 				      NULL, NULL, sched->max_stack + 2) != 0) {
2167*4882a593Smuzhiyun 		if (verbose > 0)
2168*4882a593Smuzhiyun 			pr_err("Failed to resolve callchain. Skipping\n");
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 		return;
2171*4882a593Smuzhiyun 	}
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	callchain_cursor_commit(cursor);
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	while (true) {
2176*4882a593Smuzhiyun 		struct callchain_cursor_node *node;
2177*4882a593Smuzhiyun 		struct symbol *sym;
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 		node = callchain_cursor_current(cursor);
2180*4882a593Smuzhiyun 		if (node == NULL)
2181*4882a593Smuzhiyun 			break;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 		sym = node->ms.sym;
2184*4882a593Smuzhiyun 		if (sym) {
2185*4882a593Smuzhiyun 			if (!strcmp(sym->name, "schedule") ||
2186*4882a593Smuzhiyun 			    !strcmp(sym->name, "__schedule") ||
2187*4882a593Smuzhiyun 			    !strcmp(sym->name, "preempt_schedule"))
2188*4882a593Smuzhiyun 				sym->ignore = 1;
2189*4882a593Smuzhiyun 		}
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 		callchain_cursor_advance(cursor);
2192*4882a593Smuzhiyun 	}
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun 
init_idle_thread(struct thread * thread)2195*4882a593Smuzhiyun static int init_idle_thread(struct thread *thread)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun 	struct idle_thread_runtime *itr;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	thread__set_comm(thread, idle_comm, 0);
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	itr = zalloc(sizeof(*itr));
2202*4882a593Smuzhiyun 	if (itr == NULL)
2203*4882a593Smuzhiyun 		return -ENOMEM;
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	init_stats(&itr->tr.run_stats);
2206*4882a593Smuzhiyun 	callchain_init(&itr->callchain);
2207*4882a593Smuzhiyun 	callchain_cursor_reset(&itr->cursor);
2208*4882a593Smuzhiyun 	thread__set_priv(thread, itr);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	return 0;
2211*4882a593Smuzhiyun }
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun /*
2214*4882a593Smuzhiyun  * Track idle stats per cpu by maintaining a local thread
2215*4882a593Smuzhiyun  * struct for the idle task on each cpu.
2216*4882a593Smuzhiyun  */
init_idle_threads(int ncpu)2217*4882a593Smuzhiyun static int init_idle_threads(int ncpu)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun 	int i, ret;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	idle_threads = zalloc(ncpu * sizeof(struct thread *));
2222*4882a593Smuzhiyun 	if (!idle_threads)
2223*4882a593Smuzhiyun 		return -ENOMEM;
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	idle_max_cpu = ncpu;
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	/* allocate the actual thread struct if needed */
2228*4882a593Smuzhiyun 	for (i = 0; i < ncpu; ++i) {
2229*4882a593Smuzhiyun 		idle_threads[i] = thread__new(0, 0);
2230*4882a593Smuzhiyun 		if (idle_threads[i] == NULL)
2231*4882a593Smuzhiyun 			return -ENOMEM;
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 		ret = init_idle_thread(idle_threads[i]);
2234*4882a593Smuzhiyun 		if (ret < 0)
2235*4882a593Smuzhiyun 			return ret;
2236*4882a593Smuzhiyun 	}
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	return 0;
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun 
free_idle_threads(void)2241*4882a593Smuzhiyun static void free_idle_threads(void)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun 	int i;
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	if (idle_threads == NULL)
2246*4882a593Smuzhiyun 		return;
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	for (i = 0; i < idle_max_cpu; ++i) {
2249*4882a593Smuzhiyun 		if ((idle_threads[i]))
2250*4882a593Smuzhiyun 			thread__delete(idle_threads[i]);
2251*4882a593Smuzhiyun 	}
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	free(idle_threads);
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun 
get_idle_thread(int cpu)2256*4882a593Smuzhiyun static struct thread *get_idle_thread(int cpu)
2257*4882a593Smuzhiyun {
2258*4882a593Smuzhiyun 	/*
2259*4882a593Smuzhiyun 	 * expand/allocate array of pointers to local thread
2260*4882a593Smuzhiyun 	 * structs if needed
2261*4882a593Smuzhiyun 	 */
2262*4882a593Smuzhiyun 	if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2263*4882a593Smuzhiyun 		int i, j = __roundup_pow_of_two(cpu+1);
2264*4882a593Smuzhiyun 		void *p;
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 		p = realloc(idle_threads, j * sizeof(struct thread *));
2267*4882a593Smuzhiyun 		if (!p)
2268*4882a593Smuzhiyun 			return NULL;
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 		idle_threads = (struct thread **) p;
2271*4882a593Smuzhiyun 		for (i = idle_max_cpu; i < j; ++i)
2272*4882a593Smuzhiyun 			idle_threads[i] = NULL;
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 		idle_max_cpu = j;
2275*4882a593Smuzhiyun 	}
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	/* allocate a new thread struct if needed */
2278*4882a593Smuzhiyun 	if (idle_threads[cpu] == NULL) {
2279*4882a593Smuzhiyun 		idle_threads[cpu] = thread__new(0, 0);
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 		if (idle_threads[cpu]) {
2282*4882a593Smuzhiyun 			if (init_idle_thread(idle_threads[cpu]) < 0)
2283*4882a593Smuzhiyun 				return NULL;
2284*4882a593Smuzhiyun 		}
2285*4882a593Smuzhiyun 	}
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	return idle_threads[cpu];
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun 
save_idle_callchain(struct perf_sched * sched,struct idle_thread_runtime * itr,struct perf_sample * sample)2290*4882a593Smuzhiyun static void save_idle_callchain(struct perf_sched *sched,
2291*4882a593Smuzhiyun 				struct idle_thread_runtime *itr,
2292*4882a593Smuzhiyun 				struct perf_sample *sample)
2293*4882a593Smuzhiyun {
2294*4882a593Smuzhiyun 	if (!sched->show_callchain || sample->callchain == NULL)
2295*4882a593Smuzhiyun 		return;
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun 
timehist_get_thread(struct perf_sched * sched,struct perf_sample * sample,struct machine * machine,struct evsel * evsel)2300*4882a593Smuzhiyun static struct thread *timehist_get_thread(struct perf_sched *sched,
2301*4882a593Smuzhiyun 					  struct perf_sample *sample,
2302*4882a593Smuzhiyun 					  struct machine *machine,
2303*4882a593Smuzhiyun 					  struct evsel *evsel)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun 	struct thread *thread;
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	if (is_idle_sample(sample, evsel)) {
2308*4882a593Smuzhiyun 		thread = get_idle_thread(sample->cpu);
2309*4882a593Smuzhiyun 		if (thread == NULL)
2310*4882a593Smuzhiyun 			pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 	} else {
2313*4882a593Smuzhiyun 		/* there were samples with tid 0 but non-zero pid */
2314*4882a593Smuzhiyun 		thread = machine__findnew_thread(machine, sample->pid,
2315*4882a593Smuzhiyun 						 sample->tid ?: sample->pid);
2316*4882a593Smuzhiyun 		if (thread == NULL) {
2317*4882a593Smuzhiyun 			pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2318*4882a593Smuzhiyun 				 sample->tid);
2319*4882a593Smuzhiyun 		}
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 		save_task_callchain(sched, sample, evsel, machine);
2322*4882a593Smuzhiyun 		if (sched->idle_hist) {
2323*4882a593Smuzhiyun 			struct thread *idle;
2324*4882a593Smuzhiyun 			struct idle_thread_runtime *itr;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 			idle = get_idle_thread(sample->cpu);
2327*4882a593Smuzhiyun 			if (idle == NULL) {
2328*4882a593Smuzhiyun 				pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2329*4882a593Smuzhiyun 				return NULL;
2330*4882a593Smuzhiyun 			}
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 			itr = thread__priv(idle);
2333*4882a593Smuzhiyun 			if (itr == NULL)
2334*4882a593Smuzhiyun 				return NULL;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 			itr->last_thread = thread;
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 			/* copy task callchain when entering to idle */
2339*4882a593Smuzhiyun 			if (evsel__intval(evsel, sample, "next_pid") == 0)
2340*4882a593Smuzhiyun 				save_idle_callchain(sched, itr, sample);
2341*4882a593Smuzhiyun 		}
2342*4882a593Smuzhiyun 	}
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	return thread;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun 
timehist_skip_sample(struct perf_sched * sched,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)2347*4882a593Smuzhiyun static bool timehist_skip_sample(struct perf_sched *sched,
2348*4882a593Smuzhiyun 				 struct thread *thread,
2349*4882a593Smuzhiyun 				 struct evsel *evsel,
2350*4882a593Smuzhiyun 				 struct perf_sample *sample)
2351*4882a593Smuzhiyun {
2352*4882a593Smuzhiyun 	bool rc = false;
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 	if (thread__is_filtered(thread)) {
2355*4882a593Smuzhiyun 		rc = true;
2356*4882a593Smuzhiyun 		sched->skipped_samples++;
2357*4882a593Smuzhiyun 	}
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	if (sched->idle_hist) {
2360*4882a593Smuzhiyun 		if (strcmp(evsel__name(evsel), "sched:sched_switch"))
2361*4882a593Smuzhiyun 			rc = true;
2362*4882a593Smuzhiyun 		else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2363*4882a593Smuzhiyun 			 evsel__intval(evsel, sample, "next_pid") != 0)
2364*4882a593Smuzhiyun 			rc = true;
2365*4882a593Smuzhiyun 	}
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	return rc;
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun 
timehist_print_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * awakened)2370*4882a593Smuzhiyun static void timehist_print_wakeup_event(struct perf_sched *sched,
2371*4882a593Smuzhiyun 					struct evsel *evsel,
2372*4882a593Smuzhiyun 					struct perf_sample *sample,
2373*4882a593Smuzhiyun 					struct machine *machine,
2374*4882a593Smuzhiyun 					struct thread *awakened)
2375*4882a593Smuzhiyun {
2376*4882a593Smuzhiyun 	struct thread *thread;
2377*4882a593Smuzhiyun 	char tstr[64];
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2380*4882a593Smuzhiyun 	if (thread == NULL)
2381*4882a593Smuzhiyun 		return;
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	/* show wakeup unless both awakee and awaker are filtered */
2384*4882a593Smuzhiyun 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2385*4882a593Smuzhiyun 	    timehist_skip_sample(sched, awakened, evsel, sample)) {
2386*4882a593Smuzhiyun 		return;
2387*4882a593Smuzhiyun 	}
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2390*4882a593Smuzhiyun 	printf("%15s [%04d] ", tstr, sample->cpu);
2391*4882a593Smuzhiyun 	if (sched->show_cpu_visual)
2392*4882a593Smuzhiyun 		printf(" %*s ", sched->max_cpu + 1, "");
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	/* dt spacer */
2397*4882a593Smuzhiyun 	printf("  %9s  %9s  %9s ", "", "", "");
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	printf("awakened: %s", timehist_get_commstr(awakened));
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	printf("\n");
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun 
timehist_sched_wakeup_ignore(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2404*4882a593Smuzhiyun static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2405*4882a593Smuzhiyun 					union perf_event *event __maybe_unused,
2406*4882a593Smuzhiyun 					struct evsel *evsel __maybe_unused,
2407*4882a593Smuzhiyun 					struct perf_sample *sample __maybe_unused,
2408*4882a593Smuzhiyun 					struct machine *machine __maybe_unused)
2409*4882a593Smuzhiyun {
2410*4882a593Smuzhiyun 	return 0;
2411*4882a593Smuzhiyun }
2412*4882a593Smuzhiyun 
timehist_sched_wakeup_event(struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2413*4882a593Smuzhiyun static int timehist_sched_wakeup_event(struct perf_tool *tool,
2414*4882a593Smuzhiyun 				       union perf_event *event __maybe_unused,
2415*4882a593Smuzhiyun 				       struct evsel *evsel,
2416*4882a593Smuzhiyun 				       struct perf_sample *sample,
2417*4882a593Smuzhiyun 				       struct machine *machine)
2418*4882a593Smuzhiyun {
2419*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2420*4882a593Smuzhiyun 	struct thread *thread;
2421*4882a593Smuzhiyun 	struct thread_runtime *tr = NULL;
2422*4882a593Smuzhiyun 	/* want pid of awakened task not pid in sample */
2423*4882a593Smuzhiyun 	const u32 pid = evsel__intval(evsel, sample, "pid");
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, 0, pid);
2426*4882a593Smuzhiyun 	if (thread == NULL)
2427*4882a593Smuzhiyun 		return -1;
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	tr = thread__get_runtime(thread);
2430*4882a593Smuzhiyun 	if (tr == NULL)
2431*4882a593Smuzhiyun 		return -1;
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	if (tr->ready_to_run == 0)
2434*4882a593Smuzhiyun 		tr->ready_to_run = sample->time;
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	/* show wakeups if requested */
2437*4882a593Smuzhiyun 	if (sched->show_wakeups &&
2438*4882a593Smuzhiyun 	    !perf_time__skip_sample(&sched->ptime, sample->time))
2439*4882a593Smuzhiyun 		timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	return 0;
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun 
timehist_print_migration_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * migrated)2444*4882a593Smuzhiyun static void timehist_print_migration_event(struct perf_sched *sched,
2445*4882a593Smuzhiyun 					struct evsel *evsel,
2446*4882a593Smuzhiyun 					struct perf_sample *sample,
2447*4882a593Smuzhiyun 					struct machine *machine,
2448*4882a593Smuzhiyun 					struct thread *migrated)
2449*4882a593Smuzhiyun {
2450*4882a593Smuzhiyun 	struct thread *thread;
2451*4882a593Smuzhiyun 	char tstr[64];
2452*4882a593Smuzhiyun 	u32 max_cpus = sched->max_cpu + 1;
2453*4882a593Smuzhiyun 	u32 ocpu, dcpu;
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	if (sched->summary_only)
2456*4882a593Smuzhiyun 		return;
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun 	max_cpus = sched->max_cpu + 1;
2459*4882a593Smuzhiyun 	ocpu = evsel__intval(evsel, sample, "orig_cpu");
2460*4882a593Smuzhiyun 	dcpu = evsel__intval(evsel, sample, "dest_cpu");
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2463*4882a593Smuzhiyun 	if (thread == NULL)
2464*4882a593Smuzhiyun 		return;
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2467*4882a593Smuzhiyun 	    timehist_skip_sample(sched, migrated, evsel, sample)) {
2468*4882a593Smuzhiyun 		return;
2469*4882a593Smuzhiyun 	}
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2472*4882a593Smuzhiyun 	printf("%15s [%04d] ", tstr, sample->cpu);
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	if (sched->show_cpu_visual) {
2475*4882a593Smuzhiyun 		u32 i;
2476*4882a593Smuzhiyun 		char c;
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 		printf("  ");
2479*4882a593Smuzhiyun 		for (i = 0; i < max_cpus; ++i) {
2480*4882a593Smuzhiyun 			c = (i == sample->cpu) ? 'm' : ' ';
2481*4882a593Smuzhiyun 			printf("%c", c);
2482*4882a593Smuzhiyun 		}
2483*4882a593Smuzhiyun 		printf("  ");
2484*4882a593Smuzhiyun 	}
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	/* dt spacer */
2489*4882a593Smuzhiyun 	printf("  %9s  %9s  %9s ", "", "", "");
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	printf("migrated: %s", timehist_get_commstr(migrated));
2492*4882a593Smuzhiyun 	printf(" cpu %d => %d", ocpu, dcpu);
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	printf("\n");
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun 
timehist_migrate_task_event(struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2497*4882a593Smuzhiyun static int timehist_migrate_task_event(struct perf_tool *tool,
2498*4882a593Smuzhiyun 				       union perf_event *event __maybe_unused,
2499*4882a593Smuzhiyun 				       struct evsel *evsel,
2500*4882a593Smuzhiyun 				       struct perf_sample *sample,
2501*4882a593Smuzhiyun 				       struct machine *machine)
2502*4882a593Smuzhiyun {
2503*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2504*4882a593Smuzhiyun 	struct thread *thread;
2505*4882a593Smuzhiyun 	struct thread_runtime *tr = NULL;
2506*4882a593Smuzhiyun 	/* want pid of migrated task not pid in sample */
2507*4882a593Smuzhiyun 	const u32 pid = evsel__intval(evsel, sample, "pid");
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	thread = machine__findnew_thread(machine, 0, pid);
2510*4882a593Smuzhiyun 	if (thread == NULL)
2511*4882a593Smuzhiyun 		return -1;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 	tr = thread__get_runtime(thread);
2514*4882a593Smuzhiyun 	if (tr == NULL)
2515*4882a593Smuzhiyun 		return -1;
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	tr->migrations++;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	/* show migrations if requested */
2520*4882a593Smuzhiyun 	timehist_print_migration_event(sched, evsel, sample, machine, thread);
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 	return 0;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun 
timehist_sched_change_event(struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2525*4882a593Smuzhiyun static int timehist_sched_change_event(struct perf_tool *tool,
2526*4882a593Smuzhiyun 				       union perf_event *event,
2527*4882a593Smuzhiyun 				       struct evsel *evsel,
2528*4882a593Smuzhiyun 				       struct perf_sample *sample,
2529*4882a593Smuzhiyun 				       struct machine *machine)
2530*4882a593Smuzhiyun {
2531*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2532*4882a593Smuzhiyun 	struct perf_time_interval *ptime = &sched->ptime;
2533*4882a593Smuzhiyun 	struct addr_location al;
2534*4882a593Smuzhiyun 	struct thread *thread;
2535*4882a593Smuzhiyun 	struct thread_runtime *tr = NULL;
2536*4882a593Smuzhiyun 	u64 tprev, t = sample->time;
2537*4882a593Smuzhiyun 	int rc = 0;
2538*4882a593Smuzhiyun 	int state = evsel__intval(evsel, sample, "prev_state");
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	if (machine__resolve(machine, &al, sample) < 0) {
2541*4882a593Smuzhiyun 		pr_err("problem processing %d event. skipping it\n",
2542*4882a593Smuzhiyun 		       event->header.type);
2543*4882a593Smuzhiyun 		rc = -1;
2544*4882a593Smuzhiyun 		goto out;
2545*4882a593Smuzhiyun 	}
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	thread = timehist_get_thread(sched, sample, machine, evsel);
2548*4882a593Smuzhiyun 	if (thread == NULL) {
2549*4882a593Smuzhiyun 		rc = -1;
2550*4882a593Smuzhiyun 		goto out;
2551*4882a593Smuzhiyun 	}
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	if (timehist_skip_sample(sched, thread, evsel, sample))
2554*4882a593Smuzhiyun 		goto out;
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	tr = thread__get_runtime(thread);
2557*4882a593Smuzhiyun 	if (tr == NULL) {
2558*4882a593Smuzhiyun 		rc = -1;
2559*4882a593Smuzhiyun 		goto out;
2560*4882a593Smuzhiyun 	}
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	tprev = evsel__get_time(evsel, sample->cpu);
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	/*
2565*4882a593Smuzhiyun 	 * If start time given:
2566*4882a593Smuzhiyun 	 * - sample time is under window user cares about - skip sample
2567*4882a593Smuzhiyun 	 * - tprev is under window user cares about  - reset to start of window
2568*4882a593Smuzhiyun 	 */
2569*4882a593Smuzhiyun 	if (ptime->start && ptime->start > t)
2570*4882a593Smuzhiyun 		goto out;
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	if (tprev && ptime->start > tprev)
2573*4882a593Smuzhiyun 		tprev = ptime->start;
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	/*
2576*4882a593Smuzhiyun 	 * If end time given:
2577*4882a593Smuzhiyun 	 * - previous sched event is out of window - we are done
2578*4882a593Smuzhiyun 	 * - sample time is beyond window user cares about - reset it
2579*4882a593Smuzhiyun 	 *   to close out stats for time window interest
2580*4882a593Smuzhiyun 	 */
2581*4882a593Smuzhiyun 	if (ptime->end) {
2582*4882a593Smuzhiyun 		if (tprev > ptime->end)
2583*4882a593Smuzhiyun 			goto out;
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 		if (t > ptime->end)
2586*4882a593Smuzhiyun 			t = ptime->end;
2587*4882a593Smuzhiyun 	}
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	if (!sched->idle_hist || thread->tid == 0) {
2590*4882a593Smuzhiyun 		if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2591*4882a593Smuzhiyun 			timehist_update_runtime_stats(tr, t, tprev);
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 		if (sched->idle_hist) {
2594*4882a593Smuzhiyun 			struct idle_thread_runtime *itr = (void *)tr;
2595*4882a593Smuzhiyun 			struct thread_runtime *last_tr;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 			BUG_ON(thread->tid != 0);
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 			if (itr->last_thread == NULL)
2600*4882a593Smuzhiyun 				goto out;
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 			/* add current idle time as last thread's runtime */
2603*4882a593Smuzhiyun 			last_tr = thread__get_runtime(itr->last_thread);
2604*4882a593Smuzhiyun 			if (last_tr == NULL)
2605*4882a593Smuzhiyun 				goto out;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 			timehist_update_runtime_stats(last_tr, t, tprev);
2608*4882a593Smuzhiyun 			/*
2609*4882a593Smuzhiyun 			 * remove delta time of last thread as it's not updated
2610*4882a593Smuzhiyun 			 * and otherwise it will show an invalid value next
2611*4882a593Smuzhiyun 			 * time.  we only care total run time and run stat.
2612*4882a593Smuzhiyun 			 */
2613*4882a593Smuzhiyun 			last_tr->dt_run = 0;
2614*4882a593Smuzhiyun 			last_tr->dt_delay = 0;
2615*4882a593Smuzhiyun 			last_tr->dt_sleep = 0;
2616*4882a593Smuzhiyun 			last_tr->dt_iowait = 0;
2617*4882a593Smuzhiyun 			last_tr->dt_preempt = 0;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 			if (itr->cursor.nr)
2620*4882a593Smuzhiyun 				callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 			itr->last_thread = NULL;
2623*4882a593Smuzhiyun 		}
2624*4882a593Smuzhiyun 	}
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun 	if (!sched->summary_only)
2627*4882a593Smuzhiyun 		timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun out:
2630*4882a593Smuzhiyun 	if (sched->hist_time.start == 0 && t >= ptime->start)
2631*4882a593Smuzhiyun 		sched->hist_time.start = t;
2632*4882a593Smuzhiyun 	if (ptime->end == 0 || t <= ptime->end)
2633*4882a593Smuzhiyun 		sched->hist_time.end = t;
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	if (tr) {
2636*4882a593Smuzhiyun 		/* time of this sched_switch event becomes last time task seen */
2637*4882a593Smuzhiyun 		tr->last_time = sample->time;
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 		/* last state is used to determine where to account wait time */
2640*4882a593Smuzhiyun 		tr->last_state = state;
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 		/* sched out event for task so reset ready to run time */
2643*4882a593Smuzhiyun 		tr->ready_to_run = 0;
2644*4882a593Smuzhiyun 	}
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 	evsel__save_time(evsel, sample->time, sample->cpu);
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	return rc;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
timehist_sched_switch_event(struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)2651*4882a593Smuzhiyun static int timehist_sched_switch_event(struct perf_tool *tool,
2652*4882a593Smuzhiyun 			     union perf_event *event,
2653*4882a593Smuzhiyun 			     struct evsel *evsel,
2654*4882a593Smuzhiyun 			     struct perf_sample *sample,
2655*4882a593Smuzhiyun 			     struct machine *machine __maybe_unused)
2656*4882a593Smuzhiyun {
2657*4882a593Smuzhiyun 	return timehist_sched_change_event(tool, event, evsel, sample, machine);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun 
process_lost(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)2660*4882a593Smuzhiyun static int process_lost(struct perf_tool *tool __maybe_unused,
2661*4882a593Smuzhiyun 			union perf_event *event,
2662*4882a593Smuzhiyun 			struct perf_sample *sample,
2663*4882a593Smuzhiyun 			struct machine *machine __maybe_unused)
2664*4882a593Smuzhiyun {
2665*4882a593Smuzhiyun 	char tstr[64];
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2668*4882a593Smuzhiyun 	printf("%15s ", tstr);
2669*4882a593Smuzhiyun 	printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	return 0;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 
print_thread_runtime(struct thread * t,struct thread_runtime * r)2675*4882a593Smuzhiyun static void print_thread_runtime(struct thread *t,
2676*4882a593Smuzhiyun 				 struct thread_runtime *r)
2677*4882a593Smuzhiyun {
2678*4882a593Smuzhiyun 	double mean = avg_stats(&r->run_stats);
2679*4882a593Smuzhiyun 	float stddev;
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	printf("%*s   %5d  %9" PRIu64 " ",
2682*4882a593Smuzhiyun 	       comm_width, timehist_get_commstr(t), t->ppid,
2683*4882a593Smuzhiyun 	       (u64) r->run_stats.n);
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	print_sched_time(r->total_run_time, 8);
2686*4882a593Smuzhiyun 	stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2687*4882a593Smuzhiyun 	print_sched_time(r->run_stats.min, 6);
2688*4882a593Smuzhiyun 	printf(" ");
2689*4882a593Smuzhiyun 	print_sched_time((u64) mean, 6);
2690*4882a593Smuzhiyun 	printf(" ");
2691*4882a593Smuzhiyun 	print_sched_time(r->run_stats.max, 6);
2692*4882a593Smuzhiyun 	printf("  ");
2693*4882a593Smuzhiyun 	printf("%5.2f", stddev);
2694*4882a593Smuzhiyun 	printf("   %5" PRIu64, r->migrations);
2695*4882a593Smuzhiyun 	printf("\n");
2696*4882a593Smuzhiyun }
2697*4882a593Smuzhiyun 
print_thread_waittime(struct thread * t,struct thread_runtime * r)2698*4882a593Smuzhiyun static void print_thread_waittime(struct thread *t,
2699*4882a593Smuzhiyun 				  struct thread_runtime *r)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun 	printf("%*s   %5d  %9" PRIu64 " ",
2702*4882a593Smuzhiyun 	       comm_width, timehist_get_commstr(t), t->ppid,
2703*4882a593Smuzhiyun 	       (u64) r->run_stats.n);
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	print_sched_time(r->total_run_time, 8);
2706*4882a593Smuzhiyun 	print_sched_time(r->total_sleep_time, 6);
2707*4882a593Smuzhiyun 	printf(" ");
2708*4882a593Smuzhiyun 	print_sched_time(r->total_iowait_time, 6);
2709*4882a593Smuzhiyun 	printf(" ");
2710*4882a593Smuzhiyun 	print_sched_time(r->total_preempt_time, 6);
2711*4882a593Smuzhiyun 	printf(" ");
2712*4882a593Smuzhiyun 	print_sched_time(r->total_delay_time, 6);
2713*4882a593Smuzhiyun 	printf("\n");
2714*4882a593Smuzhiyun }
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun struct total_run_stats {
2717*4882a593Smuzhiyun 	struct perf_sched *sched;
2718*4882a593Smuzhiyun 	u64  sched_count;
2719*4882a593Smuzhiyun 	u64  task_count;
2720*4882a593Smuzhiyun 	u64  total_run_time;
2721*4882a593Smuzhiyun };
2722*4882a593Smuzhiyun 
__show_thread_runtime(struct thread * t,void * priv)2723*4882a593Smuzhiyun static int __show_thread_runtime(struct thread *t, void *priv)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun 	struct total_run_stats *stats = priv;
2726*4882a593Smuzhiyun 	struct thread_runtime *r;
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun 	if (thread__is_filtered(t))
2729*4882a593Smuzhiyun 		return 0;
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	r = thread__priv(t);
2732*4882a593Smuzhiyun 	if (r && r->run_stats.n) {
2733*4882a593Smuzhiyun 		stats->task_count++;
2734*4882a593Smuzhiyun 		stats->sched_count += r->run_stats.n;
2735*4882a593Smuzhiyun 		stats->total_run_time += r->total_run_time;
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 		if (stats->sched->show_state)
2738*4882a593Smuzhiyun 			print_thread_waittime(t, r);
2739*4882a593Smuzhiyun 		else
2740*4882a593Smuzhiyun 			print_thread_runtime(t, r);
2741*4882a593Smuzhiyun 	}
2742*4882a593Smuzhiyun 
2743*4882a593Smuzhiyun 	return 0;
2744*4882a593Smuzhiyun }
2745*4882a593Smuzhiyun 
show_thread_runtime(struct thread * t,void * priv)2746*4882a593Smuzhiyun static int show_thread_runtime(struct thread *t, void *priv)
2747*4882a593Smuzhiyun {
2748*4882a593Smuzhiyun 	if (t->dead)
2749*4882a593Smuzhiyun 		return 0;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 	return __show_thread_runtime(t, priv);
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun 
show_deadthread_runtime(struct thread * t,void * priv)2754*4882a593Smuzhiyun static int show_deadthread_runtime(struct thread *t, void *priv)
2755*4882a593Smuzhiyun {
2756*4882a593Smuzhiyun 	if (!t->dead)
2757*4882a593Smuzhiyun 		return 0;
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	return __show_thread_runtime(t, priv);
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun 
callchain__fprintf_folded(FILE * fp,struct callchain_node * node)2762*4882a593Smuzhiyun static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2763*4882a593Smuzhiyun {
2764*4882a593Smuzhiyun 	const char *sep = " <- ";
2765*4882a593Smuzhiyun 	struct callchain_list *chain;
2766*4882a593Smuzhiyun 	size_t ret = 0;
2767*4882a593Smuzhiyun 	char bf[1024];
2768*4882a593Smuzhiyun 	bool first;
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	if (node == NULL)
2771*4882a593Smuzhiyun 		return 0;
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 	ret = callchain__fprintf_folded(fp, node->parent);
2774*4882a593Smuzhiyun 	first = (ret == 0);
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 	list_for_each_entry(chain, &node->val, list) {
2777*4882a593Smuzhiyun 		if (chain->ip >= PERF_CONTEXT_MAX)
2778*4882a593Smuzhiyun 			continue;
2779*4882a593Smuzhiyun 		if (chain->ms.sym && chain->ms.sym->ignore)
2780*4882a593Smuzhiyun 			continue;
2781*4882a593Smuzhiyun 		ret += fprintf(fp, "%s%s", first ? "" : sep,
2782*4882a593Smuzhiyun 			       callchain_list__sym_name(chain, bf, sizeof(bf),
2783*4882a593Smuzhiyun 							false));
2784*4882a593Smuzhiyun 		first = false;
2785*4882a593Smuzhiyun 	}
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	return ret;
2788*4882a593Smuzhiyun }
2789*4882a593Smuzhiyun 
timehist_print_idlehist_callchain(struct rb_root_cached * root)2790*4882a593Smuzhiyun static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2791*4882a593Smuzhiyun {
2792*4882a593Smuzhiyun 	size_t ret = 0;
2793*4882a593Smuzhiyun 	FILE *fp = stdout;
2794*4882a593Smuzhiyun 	struct callchain_node *chain;
2795*4882a593Smuzhiyun 	struct rb_node *rb_node = rb_first_cached(root);
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	printf("  %16s  %8s  %s\n", "Idle time (msec)", "Count", "Callchains");
2798*4882a593Smuzhiyun 	printf("  %.16s  %.8s  %.50s\n", graph_dotted_line, graph_dotted_line,
2799*4882a593Smuzhiyun 	       graph_dotted_line);
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	while (rb_node) {
2802*4882a593Smuzhiyun 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
2803*4882a593Smuzhiyun 		rb_node = rb_next(rb_node);
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 		ret += fprintf(fp, "  ");
2806*4882a593Smuzhiyun 		print_sched_time(chain->hit, 12);
2807*4882a593Smuzhiyun 		ret += 16;  /* print_sched_time returns 2nd arg + 4 */
2808*4882a593Smuzhiyun 		ret += fprintf(fp, " %8d  ", chain->count);
2809*4882a593Smuzhiyun 		ret += callchain__fprintf_folded(fp, chain);
2810*4882a593Smuzhiyun 		ret += fprintf(fp, "\n");
2811*4882a593Smuzhiyun 	}
2812*4882a593Smuzhiyun 
2813*4882a593Smuzhiyun 	return ret;
2814*4882a593Smuzhiyun }
2815*4882a593Smuzhiyun 
timehist_print_summary(struct perf_sched * sched,struct perf_session * session)2816*4882a593Smuzhiyun static void timehist_print_summary(struct perf_sched *sched,
2817*4882a593Smuzhiyun 				   struct perf_session *session)
2818*4882a593Smuzhiyun {
2819*4882a593Smuzhiyun 	struct machine *m = &session->machines.host;
2820*4882a593Smuzhiyun 	struct total_run_stats totals;
2821*4882a593Smuzhiyun 	u64 task_count;
2822*4882a593Smuzhiyun 	struct thread *t;
2823*4882a593Smuzhiyun 	struct thread_runtime *r;
2824*4882a593Smuzhiyun 	int i;
2825*4882a593Smuzhiyun 	u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2826*4882a593Smuzhiyun 
2827*4882a593Smuzhiyun 	memset(&totals, 0, sizeof(totals));
2828*4882a593Smuzhiyun 	totals.sched = sched;
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 	if (sched->idle_hist) {
2831*4882a593Smuzhiyun 		printf("\nIdle-time summary\n");
2832*4882a593Smuzhiyun 		printf("%*s  parent  sched-out  ", comm_width, "comm");
2833*4882a593Smuzhiyun 		printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
2834*4882a593Smuzhiyun 	} else if (sched->show_state) {
2835*4882a593Smuzhiyun 		printf("\nWait-time summary\n");
2836*4882a593Smuzhiyun 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2837*4882a593Smuzhiyun 		printf("   run-time      sleep      iowait     preempt       delay\n");
2838*4882a593Smuzhiyun 	} else {
2839*4882a593Smuzhiyun 		printf("\nRuntime summary\n");
2840*4882a593Smuzhiyun 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2841*4882a593Smuzhiyun 		printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
2842*4882a593Smuzhiyun 	}
2843*4882a593Smuzhiyun 	printf("%*s            (count)  ", comm_width, "");
2844*4882a593Smuzhiyun 	printf("     (msec)     (msec)      (msec)      (msec)       %s\n",
2845*4882a593Smuzhiyun 	       sched->show_state ? "(msec)" : "%");
2846*4882a593Smuzhiyun 	printf("%.117s\n", graph_dotted_line);
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	machine__for_each_thread(m, show_thread_runtime, &totals);
2849*4882a593Smuzhiyun 	task_count = totals.task_count;
2850*4882a593Smuzhiyun 	if (!task_count)
2851*4882a593Smuzhiyun 		printf("<no still running tasks>\n");
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	printf("\nTerminated tasks:\n");
2854*4882a593Smuzhiyun 	machine__for_each_thread(m, show_deadthread_runtime, &totals);
2855*4882a593Smuzhiyun 	if (task_count == totals.task_count)
2856*4882a593Smuzhiyun 		printf("<no terminated tasks>\n");
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 	/* CPU idle stats not tracked when samples were skipped */
2859*4882a593Smuzhiyun 	if (sched->skipped_samples && !sched->idle_hist)
2860*4882a593Smuzhiyun 		return;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	printf("\nIdle stats:\n");
2863*4882a593Smuzhiyun 	for (i = 0; i < idle_max_cpu; ++i) {
2864*4882a593Smuzhiyun 		if (cpu_list && !test_bit(i, cpu_bitmap))
2865*4882a593Smuzhiyun 			continue;
2866*4882a593Smuzhiyun 
2867*4882a593Smuzhiyun 		t = idle_threads[i];
2868*4882a593Smuzhiyun 		if (!t)
2869*4882a593Smuzhiyun 			continue;
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 		r = thread__priv(t);
2872*4882a593Smuzhiyun 		if (r && r->run_stats.n) {
2873*4882a593Smuzhiyun 			totals.sched_count += r->run_stats.n;
2874*4882a593Smuzhiyun 			printf("    CPU %2d idle for ", i);
2875*4882a593Smuzhiyun 			print_sched_time(r->total_run_time, 6);
2876*4882a593Smuzhiyun 			printf(" msec  (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2877*4882a593Smuzhiyun 		} else
2878*4882a593Smuzhiyun 			printf("    CPU %2d idle entire time window\n", i);
2879*4882a593Smuzhiyun 	}
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	if (sched->idle_hist && sched->show_callchain) {
2882*4882a593Smuzhiyun 		callchain_param.mode  = CHAIN_FOLDED;
2883*4882a593Smuzhiyun 		callchain_param.value = CCVAL_PERIOD;
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 		callchain_register_param(&callchain_param);
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 		printf("\nIdle stats by callchain:\n");
2888*4882a593Smuzhiyun 		for (i = 0; i < idle_max_cpu; ++i) {
2889*4882a593Smuzhiyun 			struct idle_thread_runtime *itr;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 			t = idle_threads[i];
2892*4882a593Smuzhiyun 			if (!t)
2893*4882a593Smuzhiyun 				continue;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 			itr = thread__priv(t);
2896*4882a593Smuzhiyun 			if (itr == NULL)
2897*4882a593Smuzhiyun 				continue;
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 			callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2900*4882a593Smuzhiyun 					     0, &callchain_param);
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun 			printf("  CPU %2d:", i);
2903*4882a593Smuzhiyun 			print_sched_time(itr->tr.total_run_time, 6);
2904*4882a593Smuzhiyun 			printf(" msec\n");
2905*4882a593Smuzhiyun 			timehist_print_idlehist_callchain(&itr->sorted_root);
2906*4882a593Smuzhiyun 			printf("\n");
2907*4882a593Smuzhiyun 		}
2908*4882a593Smuzhiyun 	}
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	printf("\n"
2911*4882a593Smuzhiyun 	       "    Total number of unique tasks: %" PRIu64 "\n"
2912*4882a593Smuzhiyun 	       "Total number of context switches: %" PRIu64 "\n",
2913*4882a593Smuzhiyun 	       totals.task_count, totals.sched_count);
2914*4882a593Smuzhiyun 
2915*4882a593Smuzhiyun 	printf("           Total run time (msec): ");
2916*4882a593Smuzhiyun 	print_sched_time(totals.total_run_time, 2);
2917*4882a593Smuzhiyun 	printf("\n");
2918*4882a593Smuzhiyun 
2919*4882a593Smuzhiyun 	printf("    Total scheduling time (msec): ");
2920*4882a593Smuzhiyun 	print_sched_time(hist_time, 2);
2921*4882a593Smuzhiyun 	printf(" (x %d)\n", sched->max_cpu);
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun typedef int (*sched_handler)(struct perf_tool *tool,
2925*4882a593Smuzhiyun 			  union perf_event *event,
2926*4882a593Smuzhiyun 			  struct evsel *evsel,
2927*4882a593Smuzhiyun 			  struct perf_sample *sample,
2928*4882a593Smuzhiyun 			  struct machine *machine);
2929*4882a593Smuzhiyun 
perf_timehist__process_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2930*4882a593Smuzhiyun static int perf_timehist__process_sample(struct perf_tool *tool,
2931*4882a593Smuzhiyun 					 union perf_event *event,
2932*4882a593Smuzhiyun 					 struct perf_sample *sample,
2933*4882a593Smuzhiyun 					 struct evsel *evsel,
2934*4882a593Smuzhiyun 					 struct machine *machine)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2937*4882a593Smuzhiyun 	int err = 0;
2938*4882a593Smuzhiyun 	int this_cpu = sample->cpu;
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	if (this_cpu > sched->max_cpu)
2941*4882a593Smuzhiyun 		sched->max_cpu = this_cpu;
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	if (evsel->handler != NULL) {
2944*4882a593Smuzhiyun 		sched_handler f = evsel->handler;
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 		err = f(tool, event, evsel, sample, machine);
2947*4882a593Smuzhiyun 	}
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	return err;
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun 
timehist_check_attr(struct perf_sched * sched,struct evlist * evlist)2952*4882a593Smuzhiyun static int timehist_check_attr(struct perf_sched *sched,
2953*4882a593Smuzhiyun 			       struct evlist *evlist)
2954*4882a593Smuzhiyun {
2955*4882a593Smuzhiyun 	struct evsel *evsel;
2956*4882a593Smuzhiyun 	struct evsel_runtime *er;
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 	list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2959*4882a593Smuzhiyun 		er = evsel__get_runtime(evsel);
2960*4882a593Smuzhiyun 		if (er == NULL) {
2961*4882a593Smuzhiyun 			pr_err("Failed to allocate memory for evsel runtime data\n");
2962*4882a593Smuzhiyun 			return -1;
2963*4882a593Smuzhiyun 		}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 		if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2966*4882a593Smuzhiyun 			pr_info("Samples do not have callchains.\n");
2967*4882a593Smuzhiyun 			sched->show_callchain = 0;
2968*4882a593Smuzhiyun 			symbol_conf.use_callchain = 0;
2969*4882a593Smuzhiyun 		}
2970*4882a593Smuzhiyun 	}
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	return 0;
2973*4882a593Smuzhiyun }
2974*4882a593Smuzhiyun 
perf_sched__timehist(struct perf_sched * sched)2975*4882a593Smuzhiyun static int perf_sched__timehist(struct perf_sched *sched)
2976*4882a593Smuzhiyun {
2977*4882a593Smuzhiyun 	struct evsel_str_handler handlers[] = {
2978*4882a593Smuzhiyun 		{ "sched:sched_switch",       timehist_sched_switch_event, },
2979*4882a593Smuzhiyun 		{ "sched:sched_wakeup",	      timehist_sched_wakeup_event, },
2980*4882a593Smuzhiyun 		{ "sched:sched_waking",       timehist_sched_wakeup_event, },
2981*4882a593Smuzhiyun 		{ "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
2982*4882a593Smuzhiyun 	};
2983*4882a593Smuzhiyun 	const struct evsel_str_handler migrate_handlers[] = {
2984*4882a593Smuzhiyun 		{ "sched:sched_migrate_task", timehist_migrate_task_event, },
2985*4882a593Smuzhiyun 	};
2986*4882a593Smuzhiyun 	struct perf_data data = {
2987*4882a593Smuzhiyun 		.path  = input_name,
2988*4882a593Smuzhiyun 		.mode  = PERF_DATA_MODE_READ,
2989*4882a593Smuzhiyun 		.force = sched->force,
2990*4882a593Smuzhiyun 	};
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun 	struct perf_session *session;
2993*4882a593Smuzhiyun 	struct evlist *evlist;
2994*4882a593Smuzhiyun 	int err = -1;
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun 	/*
2997*4882a593Smuzhiyun 	 * event handlers for timehist option
2998*4882a593Smuzhiyun 	 */
2999*4882a593Smuzhiyun 	sched->tool.sample	 = perf_timehist__process_sample;
3000*4882a593Smuzhiyun 	sched->tool.mmap	 = perf_event__process_mmap;
3001*4882a593Smuzhiyun 	sched->tool.comm	 = perf_event__process_comm;
3002*4882a593Smuzhiyun 	sched->tool.exit	 = perf_event__process_exit;
3003*4882a593Smuzhiyun 	sched->tool.fork	 = perf_event__process_fork;
3004*4882a593Smuzhiyun 	sched->tool.lost	 = process_lost;
3005*4882a593Smuzhiyun 	sched->tool.attr	 = perf_event__process_attr;
3006*4882a593Smuzhiyun 	sched->tool.tracing_data = perf_event__process_tracing_data;
3007*4882a593Smuzhiyun 	sched->tool.build_id	 = perf_event__process_build_id;
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	sched->tool.ordered_events = true;
3010*4882a593Smuzhiyun 	sched->tool.ordering_requires_timestamps = true;
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	symbol_conf.use_callchain = sched->show_callchain;
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	session = perf_session__new(&data, false, &sched->tool);
3015*4882a593Smuzhiyun 	if (IS_ERR(session))
3016*4882a593Smuzhiyun 		return PTR_ERR(session);
3017*4882a593Smuzhiyun 
3018*4882a593Smuzhiyun 	if (cpu_list) {
3019*4882a593Smuzhiyun 		err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3020*4882a593Smuzhiyun 		if (err < 0)
3021*4882a593Smuzhiyun 			goto out;
3022*4882a593Smuzhiyun 	}
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun 	evlist = session->evlist;
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun 	symbol__init(&session->header.env);
3027*4882a593Smuzhiyun 
3028*4882a593Smuzhiyun 	if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3029*4882a593Smuzhiyun 		pr_err("Invalid time string\n");
3030*4882a593Smuzhiyun 		return -EINVAL;
3031*4882a593Smuzhiyun 	}
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 	if (timehist_check_attr(sched, evlist) != 0)
3034*4882a593Smuzhiyun 		goto out;
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun 	setup_pager();
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 	/* prefer sched_waking if it is captured */
3039*4882a593Smuzhiyun 	if (perf_evlist__find_tracepoint_by_name(session->evlist,
3040*4882a593Smuzhiyun 						  "sched:sched_waking"))
3041*4882a593Smuzhiyun 		handlers[1].handler = timehist_sched_wakeup_ignore;
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 	/* setup per-evsel handlers */
3044*4882a593Smuzhiyun 	if (perf_session__set_tracepoints_handlers(session, handlers))
3045*4882a593Smuzhiyun 		goto out;
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 	/* sched_switch event at a minimum needs to exist */
3048*4882a593Smuzhiyun 	if (!perf_evlist__find_tracepoint_by_name(session->evlist,
3049*4882a593Smuzhiyun 						  "sched:sched_switch")) {
3050*4882a593Smuzhiyun 		pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3051*4882a593Smuzhiyun 		goto out;
3052*4882a593Smuzhiyun 	}
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 	if (sched->show_migrations &&
3055*4882a593Smuzhiyun 	    perf_session__set_tracepoints_handlers(session, migrate_handlers))
3056*4882a593Smuzhiyun 		goto out;
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	/* pre-allocate struct for per-CPU idle stats */
3059*4882a593Smuzhiyun 	sched->max_cpu = session->header.env.nr_cpus_online;
3060*4882a593Smuzhiyun 	if (sched->max_cpu == 0)
3061*4882a593Smuzhiyun 		sched->max_cpu = 4;
3062*4882a593Smuzhiyun 	if (init_idle_threads(sched->max_cpu))
3063*4882a593Smuzhiyun 		goto out;
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	/* summary_only implies summary option, but don't overwrite summary if set */
3066*4882a593Smuzhiyun 	if (sched->summary_only)
3067*4882a593Smuzhiyun 		sched->summary = sched->summary_only;
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	if (!sched->summary_only)
3070*4882a593Smuzhiyun 		timehist_header(sched);
3071*4882a593Smuzhiyun 
3072*4882a593Smuzhiyun 	err = perf_session__process_events(session);
3073*4882a593Smuzhiyun 	if (err) {
3074*4882a593Smuzhiyun 		pr_err("Failed to process events, error %d", err);
3075*4882a593Smuzhiyun 		goto out;
3076*4882a593Smuzhiyun 	}
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 	sched->nr_events      = evlist->stats.nr_events[0];
3079*4882a593Smuzhiyun 	sched->nr_lost_events = evlist->stats.total_lost;
3080*4882a593Smuzhiyun 	sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	if (sched->summary)
3083*4882a593Smuzhiyun 		timehist_print_summary(sched, session);
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun out:
3086*4882a593Smuzhiyun 	free_idle_threads();
3087*4882a593Smuzhiyun 	perf_session__delete(session);
3088*4882a593Smuzhiyun 
3089*4882a593Smuzhiyun 	return err;
3090*4882a593Smuzhiyun }
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 
print_bad_events(struct perf_sched * sched)3093*4882a593Smuzhiyun static void print_bad_events(struct perf_sched *sched)
3094*4882a593Smuzhiyun {
3095*4882a593Smuzhiyun 	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3096*4882a593Smuzhiyun 		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3097*4882a593Smuzhiyun 			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3098*4882a593Smuzhiyun 			sched->nr_unordered_timestamps, sched->nr_timestamps);
3099*4882a593Smuzhiyun 	}
3100*4882a593Smuzhiyun 	if (sched->nr_lost_events && sched->nr_events) {
3101*4882a593Smuzhiyun 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3102*4882a593Smuzhiyun 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3103*4882a593Smuzhiyun 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3104*4882a593Smuzhiyun 	}
3105*4882a593Smuzhiyun 	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3106*4882a593Smuzhiyun 		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
3107*4882a593Smuzhiyun 			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3108*4882a593Smuzhiyun 			sched->nr_context_switch_bugs, sched->nr_timestamps);
3109*4882a593Smuzhiyun 		if (sched->nr_lost_events)
3110*4882a593Smuzhiyun 			printf(" (due to lost events?)");
3111*4882a593Smuzhiyun 		printf("\n");
3112*4882a593Smuzhiyun 	}
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun 
__merge_work_atoms(struct rb_root_cached * root,struct work_atoms * data)3115*4882a593Smuzhiyun static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3116*4882a593Smuzhiyun {
3117*4882a593Smuzhiyun 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3118*4882a593Smuzhiyun 	struct work_atoms *this;
3119*4882a593Smuzhiyun 	const char *comm = thread__comm_str(data->thread), *this_comm;
3120*4882a593Smuzhiyun 	bool leftmost = true;
3121*4882a593Smuzhiyun 
3122*4882a593Smuzhiyun 	while (*new) {
3123*4882a593Smuzhiyun 		int cmp;
3124*4882a593Smuzhiyun 
3125*4882a593Smuzhiyun 		this = container_of(*new, struct work_atoms, node);
3126*4882a593Smuzhiyun 		parent = *new;
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 		this_comm = thread__comm_str(this->thread);
3129*4882a593Smuzhiyun 		cmp = strcmp(comm, this_comm);
3130*4882a593Smuzhiyun 		if (cmp > 0) {
3131*4882a593Smuzhiyun 			new = &((*new)->rb_left);
3132*4882a593Smuzhiyun 		} else if (cmp < 0) {
3133*4882a593Smuzhiyun 			new = &((*new)->rb_right);
3134*4882a593Smuzhiyun 			leftmost = false;
3135*4882a593Smuzhiyun 		} else {
3136*4882a593Smuzhiyun 			this->num_merged++;
3137*4882a593Smuzhiyun 			this->total_runtime += data->total_runtime;
3138*4882a593Smuzhiyun 			this->nb_atoms += data->nb_atoms;
3139*4882a593Smuzhiyun 			this->total_lat += data->total_lat;
3140*4882a593Smuzhiyun 			list_splice(&data->work_list, &this->work_list);
3141*4882a593Smuzhiyun 			if (this->max_lat < data->max_lat) {
3142*4882a593Smuzhiyun 				this->max_lat = data->max_lat;
3143*4882a593Smuzhiyun 				this->max_lat_start = data->max_lat_start;
3144*4882a593Smuzhiyun 				this->max_lat_end = data->max_lat_end;
3145*4882a593Smuzhiyun 			}
3146*4882a593Smuzhiyun 			zfree(&data);
3147*4882a593Smuzhiyun 			return;
3148*4882a593Smuzhiyun 		}
3149*4882a593Smuzhiyun 	}
3150*4882a593Smuzhiyun 
3151*4882a593Smuzhiyun 	data->num_merged++;
3152*4882a593Smuzhiyun 	rb_link_node(&data->node, parent, new);
3153*4882a593Smuzhiyun 	rb_insert_color_cached(&data->node, root, leftmost);
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun 
perf_sched__merge_lat(struct perf_sched * sched)3156*4882a593Smuzhiyun static void perf_sched__merge_lat(struct perf_sched *sched)
3157*4882a593Smuzhiyun {
3158*4882a593Smuzhiyun 	struct work_atoms *data;
3159*4882a593Smuzhiyun 	struct rb_node *node;
3160*4882a593Smuzhiyun 
3161*4882a593Smuzhiyun 	if (sched->skip_merge)
3162*4882a593Smuzhiyun 		return;
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 	while ((node = rb_first_cached(&sched->atom_root))) {
3165*4882a593Smuzhiyun 		rb_erase_cached(node, &sched->atom_root);
3166*4882a593Smuzhiyun 		data = rb_entry(node, struct work_atoms, node);
3167*4882a593Smuzhiyun 		__merge_work_atoms(&sched->merged_atom_root, data);
3168*4882a593Smuzhiyun 	}
3169*4882a593Smuzhiyun }
3170*4882a593Smuzhiyun 
perf_sched__lat(struct perf_sched * sched)3171*4882a593Smuzhiyun static int perf_sched__lat(struct perf_sched *sched)
3172*4882a593Smuzhiyun {
3173*4882a593Smuzhiyun 	struct rb_node *next;
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	setup_pager();
3176*4882a593Smuzhiyun 
3177*4882a593Smuzhiyun 	if (perf_sched__read_events(sched))
3178*4882a593Smuzhiyun 		return -1;
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	perf_sched__merge_lat(sched);
3181*4882a593Smuzhiyun 	perf_sched__sort_lat(sched);
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun 	printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3184*4882a593Smuzhiyun 	printf("  Task                  |   Runtime ms  | Switches | Avg delay ms    | Max delay ms    | Max delay start           | Max delay end          |\n");
3185*4882a593Smuzhiyun 	printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun 	next = rb_first_cached(&sched->sorted_atom_root);
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 	while (next) {
3190*4882a593Smuzhiyun 		struct work_atoms *work_list;
3191*4882a593Smuzhiyun 
3192*4882a593Smuzhiyun 		work_list = rb_entry(next, struct work_atoms, node);
3193*4882a593Smuzhiyun 		output_lat_thread(sched, work_list);
3194*4882a593Smuzhiyun 		next = rb_next(next);
3195*4882a593Smuzhiyun 		thread__zput(work_list->thread);
3196*4882a593Smuzhiyun 	}
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 	printf(" -----------------------------------------------------------------------------------------------------------------\n");
3199*4882a593Smuzhiyun 	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
3200*4882a593Smuzhiyun 		(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3201*4882a593Smuzhiyun 
3202*4882a593Smuzhiyun 	printf(" ---------------------------------------------------\n");
3203*4882a593Smuzhiyun 
3204*4882a593Smuzhiyun 	print_bad_events(sched);
3205*4882a593Smuzhiyun 	printf("\n");
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	return 0;
3208*4882a593Smuzhiyun }
3209*4882a593Smuzhiyun 
setup_map_cpus(struct perf_sched * sched)3210*4882a593Smuzhiyun static int setup_map_cpus(struct perf_sched *sched)
3211*4882a593Smuzhiyun {
3212*4882a593Smuzhiyun 	struct perf_cpu_map *map;
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 	if (sched->map.comp) {
3217*4882a593Smuzhiyun 		sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
3218*4882a593Smuzhiyun 		if (!sched->map.comp_cpus)
3219*4882a593Smuzhiyun 			return -1;
3220*4882a593Smuzhiyun 	}
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun 	if (!sched->map.cpus_str)
3223*4882a593Smuzhiyun 		return 0;
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	map = perf_cpu_map__new(sched->map.cpus_str);
3226*4882a593Smuzhiyun 	if (!map) {
3227*4882a593Smuzhiyun 		pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3228*4882a593Smuzhiyun 		return -1;
3229*4882a593Smuzhiyun 	}
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	sched->map.cpus = map;
3232*4882a593Smuzhiyun 	return 0;
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun 
setup_color_pids(struct perf_sched * sched)3235*4882a593Smuzhiyun static int setup_color_pids(struct perf_sched *sched)
3236*4882a593Smuzhiyun {
3237*4882a593Smuzhiyun 	struct perf_thread_map *map;
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	if (!sched->map.color_pids_str)
3240*4882a593Smuzhiyun 		return 0;
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3243*4882a593Smuzhiyun 	if (!map) {
3244*4882a593Smuzhiyun 		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3245*4882a593Smuzhiyun 		return -1;
3246*4882a593Smuzhiyun 	}
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	sched->map.color_pids = map;
3249*4882a593Smuzhiyun 	return 0;
3250*4882a593Smuzhiyun }
3251*4882a593Smuzhiyun 
setup_color_cpus(struct perf_sched * sched)3252*4882a593Smuzhiyun static int setup_color_cpus(struct perf_sched *sched)
3253*4882a593Smuzhiyun {
3254*4882a593Smuzhiyun 	struct perf_cpu_map *map;
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun 	if (!sched->map.color_cpus_str)
3257*4882a593Smuzhiyun 		return 0;
3258*4882a593Smuzhiyun 
3259*4882a593Smuzhiyun 	map = perf_cpu_map__new(sched->map.color_cpus_str);
3260*4882a593Smuzhiyun 	if (!map) {
3261*4882a593Smuzhiyun 		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3262*4882a593Smuzhiyun 		return -1;
3263*4882a593Smuzhiyun 	}
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	sched->map.color_cpus = map;
3266*4882a593Smuzhiyun 	return 0;
3267*4882a593Smuzhiyun }
3268*4882a593Smuzhiyun 
perf_sched__map(struct perf_sched * sched)3269*4882a593Smuzhiyun static int perf_sched__map(struct perf_sched *sched)
3270*4882a593Smuzhiyun {
3271*4882a593Smuzhiyun 	if (setup_map_cpus(sched))
3272*4882a593Smuzhiyun 		return -1;
3273*4882a593Smuzhiyun 
3274*4882a593Smuzhiyun 	if (setup_color_pids(sched))
3275*4882a593Smuzhiyun 		return -1;
3276*4882a593Smuzhiyun 
3277*4882a593Smuzhiyun 	if (setup_color_cpus(sched))
3278*4882a593Smuzhiyun 		return -1;
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	setup_pager();
3281*4882a593Smuzhiyun 	if (perf_sched__read_events(sched))
3282*4882a593Smuzhiyun 		return -1;
3283*4882a593Smuzhiyun 	print_bad_events(sched);
3284*4882a593Smuzhiyun 	return 0;
3285*4882a593Smuzhiyun }
3286*4882a593Smuzhiyun 
perf_sched__replay(struct perf_sched * sched)3287*4882a593Smuzhiyun static int perf_sched__replay(struct perf_sched *sched)
3288*4882a593Smuzhiyun {
3289*4882a593Smuzhiyun 	unsigned long i;
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 	calibrate_run_measurement_overhead(sched);
3292*4882a593Smuzhiyun 	calibrate_sleep_measurement_overhead(sched);
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 	test_calibrations(sched);
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 	if (perf_sched__read_events(sched))
3297*4882a593Smuzhiyun 		return -1;
3298*4882a593Smuzhiyun 
3299*4882a593Smuzhiyun 	printf("nr_run_events:        %ld\n", sched->nr_run_events);
3300*4882a593Smuzhiyun 	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
3301*4882a593Smuzhiyun 	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun 	if (sched->targetless_wakeups)
3304*4882a593Smuzhiyun 		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
3305*4882a593Smuzhiyun 	if (sched->multitarget_wakeups)
3306*4882a593Smuzhiyun 		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3307*4882a593Smuzhiyun 	if (sched->nr_run_events_optimized)
3308*4882a593Smuzhiyun 		printf("run atoms optimized: %ld\n",
3309*4882a593Smuzhiyun 			sched->nr_run_events_optimized);
3310*4882a593Smuzhiyun 
3311*4882a593Smuzhiyun 	print_task_traces(sched);
3312*4882a593Smuzhiyun 	add_cross_task_wakeups(sched);
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	create_tasks(sched);
3315*4882a593Smuzhiyun 	printf("------------------------------------------------------------\n");
3316*4882a593Smuzhiyun 	for (i = 0; i < sched->replay_repeat; i++)
3317*4882a593Smuzhiyun 		run_one_test(sched);
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun 	return 0;
3320*4882a593Smuzhiyun }
3321*4882a593Smuzhiyun 
setup_sorting(struct perf_sched * sched,const struct option * options,const char * const usage_msg[])3322*4882a593Smuzhiyun static void setup_sorting(struct perf_sched *sched, const struct option *options,
3323*4882a593Smuzhiyun 			  const char * const usage_msg[])
3324*4882a593Smuzhiyun {
3325*4882a593Smuzhiyun 	char *tmp, *tok, *str = strdup(sched->sort_order);
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	for (tok = strtok_r(str, ", ", &tmp);
3328*4882a593Smuzhiyun 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3329*4882a593Smuzhiyun 		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3330*4882a593Smuzhiyun 			usage_with_options_msg(usage_msg, options,
3331*4882a593Smuzhiyun 					"Unknown --sort key: `%s'", tok);
3332*4882a593Smuzhiyun 		}
3333*4882a593Smuzhiyun 	}
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 	free(str);
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	sort_dimension__add("pid", &sched->cmp_pid);
3338*4882a593Smuzhiyun }
3339*4882a593Smuzhiyun 
schedstat_events_exposed(void)3340*4882a593Smuzhiyun static bool schedstat_events_exposed(void)
3341*4882a593Smuzhiyun {
3342*4882a593Smuzhiyun 	/*
3343*4882a593Smuzhiyun 	 * Select "sched:sched_stat_wait" event to check
3344*4882a593Smuzhiyun 	 * whether schedstat tracepoints are exposed.
3345*4882a593Smuzhiyun 	 */
3346*4882a593Smuzhiyun 	return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3347*4882a593Smuzhiyun 		false : true;
3348*4882a593Smuzhiyun }
3349*4882a593Smuzhiyun 
__cmd_record(int argc,const char ** argv)3350*4882a593Smuzhiyun static int __cmd_record(int argc, const char **argv)
3351*4882a593Smuzhiyun {
3352*4882a593Smuzhiyun 	unsigned int rec_argc, i, j;
3353*4882a593Smuzhiyun 	const char **rec_argv;
3354*4882a593Smuzhiyun 	const char * const record_args[] = {
3355*4882a593Smuzhiyun 		"record",
3356*4882a593Smuzhiyun 		"-a",
3357*4882a593Smuzhiyun 		"-R",
3358*4882a593Smuzhiyun 		"-m", "1024",
3359*4882a593Smuzhiyun 		"-c", "1",
3360*4882a593Smuzhiyun 		"-e", "sched:sched_switch",
3361*4882a593Smuzhiyun 		"-e", "sched:sched_stat_runtime",
3362*4882a593Smuzhiyun 		"-e", "sched:sched_process_fork",
3363*4882a593Smuzhiyun 		"-e", "sched:sched_wakeup_new",
3364*4882a593Smuzhiyun 		"-e", "sched:sched_migrate_task",
3365*4882a593Smuzhiyun 	};
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	/*
3368*4882a593Smuzhiyun 	 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3369*4882a593Smuzhiyun 	 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3370*4882a593Smuzhiyun 	 * to prevent "perf sched record" execution failure, determine
3371*4882a593Smuzhiyun 	 * whether to record schedstat events according to actual situation.
3372*4882a593Smuzhiyun 	 */
3373*4882a593Smuzhiyun 	const char * const schedstat_args[] = {
3374*4882a593Smuzhiyun 		"-e", "sched:sched_stat_wait",
3375*4882a593Smuzhiyun 		"-e", "sched:sched_stat_sleep",
3376*4882a593Smuzhiyun 		"-e", "sched:sched_stat_iowait",
3377*4882a593Smuzhiyun 	};
3378*4882a593Smuzhiyun 	unsigned int schedstat_argc = schedstat_events_exposed() ?
3379*4882a593Smuzhiyun 		ARRAY_SIZE(schedstat_args) : 0;
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun 	struct tep_event *waking_event;
3382*4882a593Smuzhiyun 
3383*4882a593Smuzhiyun 	/*
3384*4882a593Smuzhiyun 	 * +2 for either "-e", "sched:sched_wakeup" or
3385*4882a593Smuzhiyun 	 * "-e", "sched:sched_waking"
3386*4882a593Smuzhiyun 	 */
3387*4882a593Smuzhiyun 	rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3388*4882a593Smuzhiyun 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3389*4882a593Smuzhiyun 
3390*4882a593Smuzhiyun 	if (rec_argv == NULL)
3391*4882a593Smuzhiyun 		return -ENOMEM;
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3394*4882a593Smuzhiyun 		rec_argv[i] = strdup(record_args[i]);
3395*4882a593Smuzhiyun 
3396*4882a593Smuzhiyun 	rec_argv[i++] = "-e";
3397*4882a593Smuzhiyun 	waking_event = trace_event__tp_format("sched", "sched_waking");
3398*4882a593Smuzhiyun 	if (!IS_ERR(waking_event))
3399*4882a593Smuzhiyun 		rec_argv[i++] = strdup("sched:sched_waking");
3400*4882a593Smuzhiyun 	else
3401*4882a593Smuzhiyun 		rec_argv[i++] = strdup("sched:sched_wakeup");
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	for (j = 0; j < schedstat_argc; j++)
3404*4882a593Smuzhiyun 		rec_argv[i++] = strdup(schedstat_args[j]);
3405*4882a593Smuzhiyun 
3406*4882a593Smuzhiyun 	for (j = 1; j < (unsigned int)argc; j++, i++)
3407*4882a593Smuzhiyun 		rec_argv[i] = argv[j];
3408*4882a593Smuzhiyun 
3409*4882a593Smuzhiyun 	BUG_ON(i != rec_argc);
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	return cmd_record(i, rec_argv);
3412*4882a593Smuzhiyun }
3413*4882a593Smuzhiyun 
cmd_sched(int argc,const char ** argv)3414*4882a593Smuzhiyun int cmd_sched(int argc, const char **argv)
3415*4882a593Smuzhiyun {
3416*4882a593Smuzhiyun 	static const char default_sort_order[] = "avg, max, switch, runtime";
3417*4882a593Smuzhiyun 	struct perf_sched sched = {
3418*4882a593Smuzhiyun 		.tool = {
3419*4882a593Smuzhiyun 			.sample		 = perf_sched__process_tracepoint_sample,
3420*4882a593Smuzhiyun 			.comm		 = perf_sched__process_comm,
3421*4882a593Smuzhiyun 			.namespaces	 = perf_event__process_namespaces,
3422*4882a593Smuzhiyun 			.lost		 = perf_event__process_lost,
3423*4882a593Smuzhiyun 			.fork		 = perf_sched__process_fork_event,
3424*4882a593Smuzhiyun 			.ordered_events = true,
3425*4882a593Smuzhiyun 		},
3426*4882a593Smuzhiyun 		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
3427*4882a593Smuzhiyun 		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
3428*4882a593Smuzhiyun 		.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
3429*4882a593Smuzhiyun 		.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3430*4882a593Smuzhiyun 		.sort_order	      = default_sort_order,
3431*4882a593Smuzhiyun 		.replay_repeat	      = 10,
3432*4882a593Smuzhiyun 		.profile_cpu	      = -1,
3433*4882a593Smuzhiyun 		.next_shortname1      = 'A',
3434*4882a593Smuzhiyun 		.next_shortname2      = '0',
3435*4882a593Smuzhiyun 		.skip_merge           = 0,
3436*4882a593Smuzhiyun 		.show_callchain	      = 1,
3437*4882a593Smuzhiyun 		.max_stack            = 5,
3438*4882a593Smuzhiyun 	};
3439*4882a593Smuzhiyun 	const struct option sched_options[] = {
3440*4882a593Smuzhiyun 	OPT_STRING('i', "input", &input_name, "file",
3441*4882a593Smuzhiyun 		    "input file name"),
3442*4882a593Smuzhiyun 	OPT_INCR('v', "verbose", &verbose,
3443*4882a593Smuzhiyun 		    "be more verbose (show symbol address, etc)"),
3444*4882a593Smuzhiyun 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3445*4882a593Smuzhiyun 		    "dump raw trace in ASCII"),
3446*4882a593Smuzhiyun 	OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3447*4882a593Smuzhiyun 	OPT_END()
3448*4882a593Smuzhiyun 	};
3449*4882a593Smuzhiyun 	const struct option latency_options[] = {
3450*4882a593Smuzhiyun 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3451*4882a593Smuzhiyun 		   "sort by key(s): runtime, switch, avg, max"),
3452*4882a593Smuzhiyun 	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3453*4882a593Smuzhiyun 		    "CPU to profile on"),
3454*4882a593Smuzhiyun 	OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3455*4882a593Smuzhiyun 		    "latency stats per pid instead of per comm"),
3456*4882a593Smuzhiyun 	OPT_PARENT(sched_options)
3457*4882a593Smuzhiyun 	};
3458*4882a593Smuzhiyun 	const struct option replay_options[] = {
3459*4882a593Smuzhiyun 	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3460*4882a593Smuzhiyun 		     "repeat the workload replay N times (-1: infinite)"),
3461*4882a593Smuzhiyun 	OPT_PARENT(sched_options)
3462*4882a593Smuzhiyun 	};
3463*4882a593Smuzhiyun 	const struct option map_options[] = {
3464*4882a593Smuzhiyun 	OPT_BOOLEAN(0, "compact", &sched.map.comp,
3465*4882a593Smuzhiyun 		    "map output in compact mode"),
3466*4882a593Smuzhiyun 	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3467*4882a593Smuzhiyun 		   "highlight given pids in map"),
3468*4882a593Smuzhiyun 	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3469*4882a593Smuzhiyun                     "highlight given CPUs in map"),
3470*4882a593Smuzhiyun 	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3471*4882a593Smuzhiyun                     "display given CPUs in map"),
3472*4882a593Smuzhiyun 	OPT_PARENT(sched_options)
3473*4882a593Smuzhiyun 	};
3474*4882a593Smuzhiyun 	const struct option timehist_options[] = {
3475*4882a593Smuzhiyun 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3476*4882a593Smuzhiyun 		   "file", "vmlinux pathname"),
3477*4882a593Smuzhiyun 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3478*4882a593Smuzhiyun 		   "file", "kallsyms pathname"),
3479*4882a593Smuzhiyun 	OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3480*4882a593Smuzhiyun 		    "Display call chains if present (default on)"),
3481*4882a593Smuzhiyun 	OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3482*4882a593Smuzhiyun 		   "Maximum number of functions to display backtrace."),
3483*4882a593Smuzhiyun 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3484*4882a593Smuzhiyun 		    "Look for files with symbols relative to this directory"),
3485*4882a593Smuzhiyun 	OPT_BOOLEAN('s', "summary", &sched.summary_only,
3486*4882a593Smuzhiyun 		    "Show only syscall summary with statistics"),
3487*4882a593Smuzhiyun 	OPT_BOOLEAN('S', "with-summary", &sched.summary,
3488*4882a593Smuzhiyun 		    "Show all syscalls and summary with statistics"),
3489*4882a593Smuzhiyun 	OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3490*4882a593Smuzhiyun 	OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3491*4882a593Smuzhiyun 	OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3492*4882a593Smuzhiyun 	OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3493*4882a593Smuzhiyun 	OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3494*4882a593Smuzhiyun 	OPT_STRING(0, "time", &sched.time_str, "str",
3495*4882a593Smuzhiyun 		   "Time span for analysis (start,stop)"),
3496*4882a593Smuzhiyun 	OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3497*4882a593Smuzhiyun 	OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3498*4882a593Smuzhiyun 		   "analyze events only for given process id(s)"),
3499*4882a593Smuzhiyun 	OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3500*4882a593Smuzhiyun 		   "analyze events only for given thread id(s)"),
3501*4882a593Smuzhiyun 	OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3502*4882a593Smuzhiyun 	OPT_PARENT(sched_options)
3503*4882a593Smuzhiyun 	};
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun 	const char * const latency_usage[] = {
3506*4882a593Smuzhiyun 		"perf sched latency [<options>]",
3507*4882a593Smuzhiyun 		NULL
3508*4882a593Smuzhiyun 	};
3509*4882a593Smuzhiyun 	const char * const replay_usage[] = {
3510*4882a593Smuzhiyun 		"perf sched replay [<options>]",
3511*4882a593Smuzhiyun 		NULL
3512*4882a593Smuzhiyun 	};
3513*4882a593Smuzhiyun 	const char * const map_usage[] = {
3514*4882a593Smuzhiyun 		"perf sched map [<options>]",
3515*4882a593Smuzhiyun 		NULL
3516*4882a593Smuzhiyun 	};
3517*4882a593Smuzhiyun 	const char * const timehist_usage[] = {
3518*4882a593Smuzhiyun 		"perf sched timehist [<options>]",
3519*4882a593Smuzhiyun 		NULL
3520*4882a593Smuzhiyun 	};
3521*4882a593Smuzhiyun 	const char *const sched_subcommands[] = { "record", "latency", "map",
3522*4882a593Smuzhiyun 						  "replay", "script",
3523*4882a593Smuzhiyun 						  "timehist", NULL };
3524*4882a593Smuzhiyun 	const char *sched_usage[] = {
3525*4882a593Smuzhiyun 		NULL,
3526*4882a593Smuzhiyun 		NULL
3527*4882a593Smuzhiyun 	};
3528*4882a593Smuzhiyun 	struct trace_sched_handler lat_ops  = {
3529*4882a593Smuzhiyun 		.wakeup_event	    = latency_wakeup_event,
3530*4882a593Smuzhiyun 		.switch_event	    = latency_switch_event,
3531*4882a593Smuzhiyun 		.runtime_event	    = latency_runtime_event,
3532*4882a593Smuzhiyun 		.migrate_task_event = latency_migrate_task_event,
3533*4882a593Smuzhiyun 	};
3534*4882a593Smuzhiyun 	struct trace_sched_handler map_ops  = {
3535*4882a593Smuzhiyun 		.switch_event	    = map_switch_event,
3536*4882a593Smuzhiyun 	};
3537*4882a593Smuzhiyun 	struct trace_sched_handler replay_ops  = {
3538*4882a593Smuzhiyun 		.wakeup_event	    = replay_wakeup_event,
3539*4882a593Smuzhiyun 		.switch_event	    = replay_switch_event,
3540*4882a593Smuzhiyun 		.fork_event	    = replay_fork_event,
3541*4882a593Smuzhiyun 	};
3542*4882a593Smuzhiyun 	unsigned int i;
3543*4882a593Smuzhiyun 
3544*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3545*4882a593Smuzhiyun 		sched.curr_pid[i] = -1;
3546*4882a593Smuzhiyun 
3547*4882a593Smuzhiyun 	argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3548*4882a593Smuzhiyun 					sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3549*4882a593Smuzhiyun 	if (!argc)
3550*4882a593Smuzhiyun 		usage_with_options(sched_usage, sched_options);
3551*4882a593Smuzhiyun 
3552*4882a593Smuzhiyun 	/*
3553*4882a593Smuzhiyun 	 * Aliased to 'perf script' for now:
3554*4882a593Smuzhiyun 	 */
3555*4882a593Smuzhiyun 	if (!strcmp(argv[0], "script"))
3556*4882a593Smuzhiyun 		return cmd_script(argc, argv);
3557*4882a593Smuzhiyun 
3558*4882a593Smuzhiyun 	if (!strncmp(argv[0], "rec", 3)) {
3559*4882a593Smuzhiyun 		return __cmd_record(argc, argv);
3560*4882a593Smuzhiyun 	} else if (!strncmp(argv[0], "lat", 3)) {
3561*4882a593Smuzhiyun 		sched.tp_handler = &lat_ops;
3562*4882a593Smuzhiyun 		if (argc > 1) {
3563*4882a593Smuzhiyun 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3564*4882a593Smuzhiyun 			if (argc)
3565*4882a593Smuzhiyun 				usage_with_options(latency_usage, latency_options);
3566*4882a593Smuzhiyun 		}
3567*4882a593Smuzhiyun 		setup_sorting(&sched, latency_options, latency_usage);
3568*4882a593Smuzhiyun 		return perf_sched__lat(&sched);
3569*4882a593Smuzhiyun 	} else if (!strcmp(argv[0], "map")) {
3570*4882a593Smuzhiyun 		if (argc) {
3571*4882a593Smuzhiyun 			argc = parse_options(argc, argv, map_options, map_usage, 0);
3572*4882a593Smuzhiyun 			if (argc)
3573*4882a593Smuzhiyun 				usage_with_options(map_usage, map_options);
3574*4882a593Smuzhiyun 		}
3575*4882a593Smuzhiyun 		sched.tp_handler = &map_ops;
3576*4882a593Smuzhiyun 		setup_sorting(&sched, latency_options, latency_usage);
3577*4882a593Smuzhiyun 		return perf_sched__map(&sched);
3578*4882a593Smuzhiyun 	} else if (!strncmp(argv[0], "rep", 3)) {
3579*4882a593Smuzhiyun 		sched.tp_handler = &replay_ops;
3580*4882a593Smuzhiyun 		if (argc) {
3581*4882a593Smuzhiyun 			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3582*4882a593Smuzhiyun 			if (argc)
3583*4882a593Smuzhiyun 				usage_with_options(replay_usage, replay_options);
3584*4882a593Smuzhiyun 		}
3585*4882a593Smuzhiyun 		return perf_sched__replay(&sched);
3586*4882a593Smuzhiyun 	} else if (!strcmp(argv[0], "timehist")) {
3587*4882a593Smuzhiyun 		if (argc) {
3588*4882a593Smuzhiyun 			argc = parse_options(argc, argv, timehist_options,
3589*4882a593Smuzhiyun 					     timehist_usage, 0);
3590*4882a593Smuzhiyun 			if (argc)
3591*4882a593Smuzhiyun 				usage_with_options(timehist_usage, timehist_options);
3592*4882a593Smuzhiyun 		}
3593*4882a593Smuzhiyun 		if ((sched.show_wakeups || sched.show_next) &&
3594*4882a593Smuzhiyun 		    sched.summary_only) {
3595*4882a593Smuzhiyun 			pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3596*4882a593Smuzhiyun 			parse_options_usage(timehist_usage, timehist_options, "s", true);
3597*4882a593Smuzhiyun 			if (sched.show_wakeups)
3598*4882a593Smuzhiyun 				parse_options_usage(NULL, timehist_options, "w", true);
3599*4882a593Smuzhiyun 			if (sched.show_next)
3600*4882a593Smuzhiyun 				parse_options_usage(NULL, timehist_options, "n", true);
3601*4882a593Smuzhiyun 			return -EINVAL;
3602*4882a593Smuzhiyun 		}
3603*4882a593Smuzhiyun 
3604*4882a593Smuzhiyun 		return perf_sched__timehist(&sched);
3605*4882a593Smuzhiyun 	} else {
3606*4882a593Smuzhiyun 		usage_with_options(sched_usage, sched_options);
3607*4882a593Smuzhiyun 	}
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun 	return 0;
3610*4882a593Smuzhiyun }
3611