xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/vdso/gettimeofday.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 ARM Limited
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6*4882a593Smuzhiyun #define __ASM_VDSO_GETTIMEOFDAY_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef __ASSEMBLY__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/barrier.h>
11*4882a593Smuzhiyun #include <asm/errno.h>
12*4882a593Smuzhiyun #include <asm/unistd.h>
13*4882a593Smuzhiyun #include <asm/vdso/cp15.h>
14*4882a593Smuzhiyun #include <uapi/linux/time.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define VDSO_HAS_CLOCK_GETRES		1
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun extern struct vdso_data *__get_datapage(void);
19*4882a593Smuzhiyun 
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)20*4882a593Smuzhiyun static __always_inline int gettimeofday_fallback(
21*4882a593Smuzhiyun 				struct __kernel_old_timeval *_tv,
22*4882a593Smuzhiyun 				struct timezone *_tz)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	register struct timezone *tz asm("r1") = _tz;
25*4882a593Smuzhiyun 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
26*4882a593Smuzhiyun 	register long ret asm ("r0");
27*4882a593Smuzhiyun 	register long nr asm("r7") = __NR_gettimeofday;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	asm volatile(
30*4882a593Smuzhiyun 	"	swi #0\n"
31*4882a593Smuzhiyun 	: "=r" (ret)
32*4882a593Smuzhiyun 	: "r" (tv), "r" (tz), "r" (nr)
33*4882a593Smuzhiyun 	: "memory");
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return ret;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)38*4882a593Smuzhiyun static __always_inline long clock_gettime_fallback(
39*4882a593Smuzhiyun 					clockid_t _clkid,
40*4882a593Smuzhiyun 					struct __kernel_timespec *_ts)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	register struct __kernel_timespec *ts asm("r1") = _ts;
43*4882a593Smuzhiyun 	register clockid_t clkid asm("r0") = _clkid;
44*4882a593Smuzhiyun 	register long ret asm ("r0");
45*4882a593Smuzhiyun 	register long nr asm("r7") = __NR_clock_gettime64;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	asm volatile(
48*4882a593Smuzhiyun 	"	swi #0\n"
49*4882a593Smuzhiyun 	: "=r" (ret)
50*4882a593Smuzhiyun 	: "r" (clkid), "r" (ts), "r" (nr)
51*4882a593Smuzhiyun 	: "memory");
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return ret;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)56*4882a593Smuzhiyun static __always_inline long clock_gettime32_fallback(
57*4882a593Smuzhiyun 					clockid_t _clkid,
58*4882a593Smuzhiyun 					struct old_timespec32 *_ts)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	register struct old_timespec32 *ts asm("r1") = _ts;
61*4882a593Smuzhiyun 	register clockid_t clkid asm("r0") = _clkid;
62*4882a593Smuzhiyun 	register long ret asm ("r0");
63*4882a593Smuzhiyun 	register long nr asm("r7") = __NR_clock_gettime;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	asm volatile(
66*4882a593Smuzhiyun 	"	swi #0\n"
67*4882a593Smuzhiyun 	: "=r" (ret)
68*4882a593Smuzhiyun 	: "r" (clkid), "r" (ts), "r" (nr)
69*4882a593Smuzhiyun 	: "memory");
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return ret;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)74*4882a593Smuzhiyun static __always_inline int clock_getres_fallback(
75*4882a593Smuzhiyun 					clockid_t _clkid,
76*4882a593Smuzhiyun 					struct __kernel_timespec *_ts)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	register struct __kernel_timespec *ts asm("r1") = _ts;
79*4882a593Smuzhiyun 	register clockid_t clkid asm("r0") = _clkid;
80*4882a593Smuzhiyun 	register long ret asm ("r0");
81*4882a593Smuzhiyun 	register long nr asm("r7") = __NR_clock_getres_time64;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	asm volatile(
84*4882a593Smuzhiyun 	"       swi #0\n"
85*4882a593Smuzhiyun 	: "=r" (ret)
86*4882a593Smuzhiyun 	: "r" (clkid), "r" (ts), "r" (nr)
87*4882a593Smuzhiyun 	: "memory");
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return ret;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)92*4882a593Smuzhiyun static __always_inline int clock_getres32_fallback(
93*4882a593Smuzhiyun 					clockid_t _clkid,
94*4882a593Smuzhiyun 					struct old_timespec32 *_ts)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	register struct old_timespec32 *ts asm("r1") = _ts;
97*4882a593Smuzhiyun 	register clockid_t clkid asm("r0") = _clkid;
98*4882a593Smuzhiyun 	register long ret asm ("r0");
99*4882a593Smuzhiyun 	register long nr asm("r7") = __NR_clock_getres;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	asm volatile(
102*4882a593Smuzhiyun 	"       swi #0\n"
103*4882a593Smuzhiyun 	: "=r" (ret)
104*4882a593Smuzhiyun 	: "r" (clkid), "r" (ts), "r" (nr)
105*4882a593Smuzhiyun 	: "memory");
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
arm_vdso_hres_capable(void)110*4882a593Smuzhiyun static inline bool arm_vdso_hres_capable(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun #define __arch_vdso_hres_capable arm_vdso_hres_capable
115*4882a593Smuzhiyun 
__arch_get_hw_counter(int clock_mode,const struct vdso_data * vd)116*4882a593Smuzhiyun static __always_inline u64 __arch_get_hw_counter(int clock_mode,
117*4882a593Smuzhiyun 						 const struct vdso_data *vd)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun #ifdef CONFIG_ARM_ARCH_TIMER
120*4882a593Smuzhiyun 	u64 cycle_now;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/*
123*4882a593Smuzhiyun 	 * Core checks for mode already, so this raced against a concurrent
124*4882a593Smuzhiyun 	 * update. Return something. Core will do another round and then
125*4882a593Smuzhiyun 	 * see the mode change and fallback to the syscall.
126*4882a593Smuzhiyun 	 */
127*4882a593Smuzhiyun 	if (clock_mode == VDSO_CLOCKMODE_NONE)
128*4882a593Smuzhiyun 		return 0;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	isb();
131*4882a593Smuzhiyun 	cycle_now = read_sysreg(CNTVCT);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return cycle_now;
134*4882a593Smuzhiyun #else
135*4882a593Smuzhiyun 	/* Make GCC happy. This is compiled out anyway */
136*4882a593Smuzhiyun 	return 0;
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
__arch_get_vdso_data(void)140*4882a593Smuzhiyun static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	return __get_datapage();
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
148