1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_CLOCK_H 3*4882a593Smuzhiyun #define _LINUX_SCHED_CLOCK_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <linux/smp.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun /* 8*4882a593Smuzhiyun * Do not use outside of architecture code which knows its limitations. 9*4882a593Smuzhiyun * 10*4882a593Smuzhiyun * sched_clock() has no promise of monotonicity or bounded drift between 11*4882a593Smuzhiyun * CPUs, use (which you should not) requires disabling IRQs. 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * Please use one of the three interfaces below. 14*4882a593Smuzhiyun */ 15*4882a593Smuzhiyun extern unsigned long long notrace sched_clock(void); 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun /* 18*4882a593Smuzhiyun * See the comment in kernel/sched/clock.c 19*4882a593Smuzhiyun */ 20*4882a593Smuzhiyun extern u64 running_clock(void); 21*4882a593Smuzhiyun extern u64 sched_clock_cpu(int cpu); 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun extern void sched_clock_init(void); 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK sched_clock_tick(void)27*4882a593Smuzhiyunstatic inline void sched_clock_tick(void) 28*4882a593Smuzhiyun { 29*4882a593Smuzhiyun } 30*4882a593Smuzhiyun clear_sched_clock_stable(void)31*4882a593Smuzhiyunstatic inline void clear_sched_clock_stable(void) 32*4882a593Smuzhiyun { 33*4882a593Smuzhiyun } 34*4882a593Smuzhiyun sched_clock_idle_sleep_event(void)35*4882a593Smuzhiyunstatic inline void sched_clock_idle_sleep_event(void) 36*4882a593Smuzhiyun { 37*4882a593Smuzhiyun } 38*4882a593Smuzhiyun sched_clock_idle_wakeup_event(void)39*4882a593Smuzhiyunstatic inline void sched_clock_idle_wakeup_event(void) 40*4882a593Smuzhiyun { 41*4882a593Smuzhiyun } 42*4882a593Smuzhiyun cpu_clock(int cpu)43*4882a593Smuzhiyunstatic inline u64 cpu_clock(int cpu) 44*4882a593Smuzhiyun { 45*4882a593Smuzhiyun return sched_clock(); 46*4882a593Smuzhiyun } 47*4882a593Smuzhiyun local_clock(void)48*4882a593Smuzhiyunstatic inline u64 local_clock(void) 49*4882a593Smuzhiyun { 50*4882a593Smuzhiyun return sched_clock(); 51*4882a593Smuzhiyun } 52*4882a593Smuzhiyun #else 53*4882a593Smuzhiyun extern int sched_clock_stable(void); 54*4882a593Smuzhiyun extern void clear_sched_clock_stable(void); 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun /* 57*4882a593Smuzhiyun * When sched_clock_stable(), __sched_clock_offset provides the offset 58*4882a593Smuzhiyun * between local_clock() and sched_clock(). 59*4882a593Smuzhiyun */ 60*4882a593Smuzhiyun extern u64 __sched_clock_offset; 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun extern void sched_clock_tick(void); 63*4882a593Smuzhiyun extern void sched_clock_tick_stable(void); 64*4882a593Smuzhiyun extern void sched_clock_idle_sleep_event(void); 65*4882a593Smuzhiyun extern void sched_clock_idle_wakeup_event(void); 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * As outlined in clock.c, provides a fast, high resolution, nanosecond 69*4882a593Smuzhiyun * time source that is monotonic per cpu argument and has bounded drift 70*4882a593Smuzhiyun * between cpus. 71*4882a593Smuzhiyun * 72*4882a593Smuzhiyun * ######################### BIG FAT WARNING ########################## 73*4882a593Smuzhiyun * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 74*4882a593Smuzhiyun * # go backwards !! # 75*4882a593Smuzhiyun * #################################################################### 76*4882a593Smuzhiyun */ cpu_clock(int cpu)77*4882a593Smuzhiyunstatic inline u64 cpu_clock(int cpu) 78*4882a593Smuzhiyun { 79*4882a593Smuzhiyun return sched_clock_cpu(cpu); 80*4882a593Smuzhiyun } 81*4882a593Smuzhiyun local_clock(void)82*4882a593Smuzhiyunstatic inline u64 local_clock(void) 83*4882a593Smuzhiyun { 84*4882a593Smuzhiyun return sched_clock_cpu(raw_smp_processor_id()); 85*4882a593Smuzhiyun } 86*4882a593Smuzhiyun #endif 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun #ifdef CONFIG_IRQ_TIME_ACCOUNTING 89*4882a593Smuzhiyun /* 90*4882a593Smuzhiyun * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 91*4882a593Smuzhiyun * The reason for this explicit opt-in is not to have perf penalty with 92*4882a593Smuzhiyun * slow sched_clocks. 93*4882a593Smuzhiyun */ 94*4882a593Smuzhiyun extern void enable_sched_clock_irqtime(void); 95*4882a593Smuzhiyun extern void disable_sched_clock_irqtime(void); 96*4882a593Smuzhiyun #else enable_sched_clock_irqtime(void)97*4882a593Smuzhiyunstatic inline void enable_sched_clock_irqtime(void) {} disable_sched_clock_irqtime(void)98*4882a593Smuzhiyunstatic inline void disable_sched_clock_irqtime(void) {} 99*4882a593Smuzhiyun #endif 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun #endif /* _LINUX_SCHED_CLOCK_H */ 102