1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PVCLOCK_H
3*4882a593Smuzhiyun #define _ASM_X86_PVCLOCK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/clocksource.h>
6*4882a593Smuzhiyun #include <asm/pvclock-abi.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /* some helper functions for xen and kvm pv clock sources */
9*4882a593Smuzhiyun u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
10*4882a593Smuzhiyun u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
11*4882a593Smuzhiyun void pvclock_set_flags(u8 flags);
12*4882a593Smuzhiyun unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
13*4882a593Smuzhiyun void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
14*4882a593Smuzhiyun struct pvclock_vcpu_time_info *vcpu,
15*4882a593Smuzhiyun struct timespec64 *ts);
16*4882a593Smuzhiyun void pvclock_resume(void);
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun void pvclock_touch_watchdogs(void);
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static __always_inline
pvclock_read_begin(const struct pvclock_vcpu_time_info * src)21*4882a593Smuzhiyun unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun unsigned version = src->version & ~1;
24*4882a593Smuzhiyun /* Make sure that the version is read before the data. */
25*4882a593Smuzhiyun virt_rmb();
26*4882a593Smuzhiyun return version;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static __always_inline
pvclock_read_retry(const struct pvclock_vcpu_time_info * src,unsigned version)30*4882a593Smuzhiyun bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
31*4882a593Smuzhiyun unsigned version)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun /* Make sure that the version is re-read after the data. */
34*4882a593Smuzhiyun virt_rmb();
35*4882a593Smuzhiyun return unlikely(version != src->version);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
40*4882a593Smuzhiyun * yielding a 64-bit result.
41*4882a593Smuzhiyun */
pvclock_scale_delta(u64 delta,u32 mul_frac,int shift)42*4882a593Smuzhiyun static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun u64 product;
45*4882a593Smuzhiyun #ifdef __i386__
46*4882a593Smuzhiyun u32 tmp1, tmp2;
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun ulong tmp;
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (shift < 0)
52*4882a593Smuzhiyun delta >>= -shift;
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun delta <<= shift;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifdef __i386__
57*4882a593Smuzhiyun __asm__ (
58*4882a593Smuzhiyun "mul %5 ; "
59*4882a593Smuzhiyun "mov %4,%%eax ; "
60*4882a593Smuzhiyun "mov %%edx,%4 ; "
61*4882a593Smuzhiyun "mul %5 ; "
62*4882a593Smuzhiyun "xor %5,%5 ; "
63*4882a593Smuzhiyun "add %4,%%eax ; "
64*4882a593Smuzhiyun "adc %5,%%edx ; "
65*4882a593Smuzhiyun : "=A" (product), "=r" (tmp1), "=r" (tmp2)
66*4882a593Smuzhiyun : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
67*4882a593Smuzhiyun #elif defined(__x86_64__)
68*4882a593Smuzhiyun __asm__ (
69*4882a593Smuzhiyun "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
70*4882a593Smuzhiyun : [lo]"=a"(product),
71*4882a593Smuzhiyun [hi]"=d"(tmp)
72*4882a593Smuzhiyun : "0"(delta),
73*4882a593Smuzhiyun [mul_frac]"rm"((u64)mul_frac));
74*4882a593Smuzhiyun #else
75*4882a593Smuzhiyun #error implement me!
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return product;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static __always_inline
__pvclock_read_cycles(const struct pvclock_vcpu_time_info * src,u64 tsc)82*4882a593Smuzhiyun u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun u64 delta = tsc - src->tsc_timestamp;
85*4882a593Smuzhiyun u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
86*4882a593Smuzhiyun src->tsc_shift);
87*4882a593Smuzhiyun return src->system_time + offset;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun struct pvclock_vsyscall_time_info {
91*4882a593Smuzhiyun struct pvclock_vcpu_time_info pvti;
92*4882a593Smuzhiyun } __attribute__((__aligned__(SMP_CACHE_BYTES)));
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_CLOCK
97*4882a593Smuzhiyun void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
98*4882a593Smuzhiyun struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
99*4882a593Smuzhiyun #else
pvclock_get_pvti_cpu0_va(void)100*4882a593Smuzhiyun static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return NULL;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #endif /* _ASM_X86_PVCLOCK_H */
107