xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/tsc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * x86 TSC related functions
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef _ASM_X86_TSC_H
6*4882a593Smuzhiyun #define _ASM_X86_TSC_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/processor.h>
9*4882a593Smuzhiyun #include <asm/cpufeature.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * Standard way to access the cycle counter.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun typedef unsigned long long cycles_t;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun extern unsigned int cpu_khz;
17*4882a593Smuzhiyun extern unsigned int tsc_khz;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun extern void disable_TSC(void);
20*4882a593Smuzhiyun 
get_cycles(void)21*4882a593Smuzhiyun static inline cycles_t get_cycles(void)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_X86_TSC) &&
24*4882a593Smuzhiyun 	    !cpu_feature_enabled(X86_FEATURE_TSC))
25*4882a593Smuzhiyun 		return 0;
26*4882a593Smuzhiyun 	return rdtsc();
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun #define get_cycles get_cycles
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun extern struct system_counterval_t convert_art_to_tsc(u64 art);
31*4882a593Smuzhiyun extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun extern void tsc_early_init(void);
34*4882a593Smuzhiyun extern void tsc_init(void);
35*4882a593Smuzhiyun extern unsigned long calibrate_delay_is_known(void);
36*4882a593Smuzhiyun extern void mark_tsc_unstable(char *reason);
37*4882a593Smuzhiyun extern int unsynchronized_tsc(void);
38*4882a593Smuzhiyun extern int check_tsc_unstable(void);
39*4882a593Smuzhiyun extern void mark_tsc_async_resets(char *reason);
40*4882a593Smuzhiyun extern unsigned long native_calibrate_cpu_early(void);
41*4882a593Smuzhiyun extern unsigned long native_calibrate_tsc(void);
42*4882a593Smuzhiyun extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun extern int tsc_clocksource_reliable;
45*4882a593Smuzhiyun #ifdef CONFIG_X86_TSC
46*4882a593Smuzhiyun extern bool tsc_async_resets;
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun # define tsc_async_resets	false
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Boot-time check whether the TSCs are synchronized across
53*4882a593Smuzhiyun  * all CPUs/cores:
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #ifdef CONFIG_X86_TSC
56*4882a593Smuzhiyun extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
57*4882a593Smuzhiyun extern void tsc_verify_tsc_adjust(bool resume);
58*4882a593Smuzhiyun extern void check_tsc_sync_source(int cpu);
59*4882a593Smuzhiyun extern void check_tsc_sync_target(void);
60*4882a593Smuzhiyun #else
tsc_store_and_check_tsc_adjust(bool bootcpu)61*4882a593Smuzhiyun static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
tsc_verify_tsc_adjust(bool resume)62*4882a593Smuzhiyun static inline void tsc_verify_tsc_adjust(bool resume) { }
check_tsc_sync_source(int cpu)63*4882a593Smuzhiyun static inline void check_tsc_sync_source(int cpu) { }
check_tsc_sync_target(void)64*4882a593Smuzhiyun static inline void check_tsc_sync_target(void) { }
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun extern int notsc_setup(char *);
68*4882a593Smuzhiyun extern void tsc_save_sched_clock_state(void);
69*4882a593Smuzhiyun extern void tsc_restore_sched_clock_state(void);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun unsigned long cpu_khz_from_msr(void);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #endif /* _ASM_X86_TSC_H */
74