1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2018 ARM Limited
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6*4882a593Smuzhiyun #define __ASM_VDSO_GETTIMEOFDAY_H
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef __ASSEMBLY__
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <asm/barrier.h>
11*4882a593Smuzhiyun #include <asm/unistd.h>
12*4882a593Smuzhiyun #include <asm/errno.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <asm/vdso/compat_barrier.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define VDSO_HAS_CLOCK_GETRES 1
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define BUILD_VDSO32 1
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static __always_inline
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)21*4882a593Smuzhiyun int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
22*4882a593Smuzhiyun struct timezone *_tz)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun register struct timezone *tz asm("r1") = _tz;
25*4882a593Smuzhiyun register struct __kernel_old_timeval *tv asm("r0") = _tv;
26*4882a593Smuzhiyun register long ret asm ("r0");
27*4882a593Smuzhiyun register long nr asm("r7") = __NR_compat_gettimeofday;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun asm volatile(
30*4882a593Smuzhiyun " swi #0\n"
31*4882a593Smuzhiyun : "=r" (ret)
32*4882a593Smuzhiyun : "r" (tv), "r" (tz), "r" (nr)
33*4882a593Smuzhiyun : "memory");
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun return ret;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)39*4882a593Smuzhiyun long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun register struct __kernel_timespec *ts asm("r1") = _ts;
42*4882a593Smuzhiyun register clockid_t clkid asm("r0") = _clkid;
43*4882a593Smuzhiyun register long ret asm ("r0");
44*4882a593Smuzhiyun register long nr asm("r7") = __NR_compat_clock_gettime64;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun asm volatile(
47*4882a593Smuzhiyun " swi #0\n"
48*4882a593Smuzhiyun : "=r" (ret)
49*4882a593Smuzhiyun : "r" (clkid), "r" (ts), "r" (nr)
50*4882a593Smuzhiyun : "memory");
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return ret;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static __always_inline
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)56*4882a593Smuzhiyun long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun register struct old_timespec32 *ts asm("r1") = _ts;
59*4882a593Smuzhiyun register clockid_t clkid asm("r0") = _clkid;
60*4882a593Smuzhiyun register long ret asm ("r0");
61*4882a593Smuzhiyun register long nr asm("r7") = __NR_compat_clock_gettime;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun asm volatile(
64*4882a593Smuzhiyun " swi #0\n"
65*4882a593Smuzhiyun : "=r" (ret)
66*4882a593Smuzhiyun : "r" (clkid), "r" (ts), "r" (nr)
67*4882a593Smuzhiyun : "memory");
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return ret;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)73*4882a593Smuzhiyun int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun register struct __kernel_timespec *ts asm("r1") = _ts;
76*4882a593Smuzhiyun register clockid_t clkid asm("r0") = _clkid;
77*4882a593Smuzhiyun register long ret asm ("r0");
78*4882a593Smuzhiyun register long nr asm("r7") = __NR_compat_clock_getres_time64;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun asm volatile(
81*4882a593Smuzhiyun " swi #0\n"
82*4882a593Smuzhiyun : "=r" (ret)
83*4882a593Smuzhiyun : "r" (clkid), "r" (ts), "r" (nr)
84*4882a593Smuzhiyun : "memory");
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return ret;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static __always_inline
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)90*4882a593Smuzhiyun int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun register struct old_timespec32 *ts asm("r1") = _ts;
93*4882a593Smuzhiyun register clockid_t clkid asm("r0") = _clkid;
94*4882a593Smuzhiyun register long ret asm ("r0");
95*4882a593Smuzhiyun register long nr asm("r7") = __NR_compat_clock_getres;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun asm volatile(
98*4882a593Smuzhiyun " swi #0\n"
99*4882a593Smuzhiyun : "=r" (ret)
100*4882a593Smuzhiyun : "r" (clkid), "r" (ts), "r" (nr)
101*4882a593Smuzhiyun : "memory");
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return ret;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
__arch_get_hw_counter(s32 clock_mode,const struct vdso_data * vd)106*4882a593Smuzhiyun static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
107*4882a593Smuzhiyun const struct vdso_data *vd)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun u64 res;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Core checks for mode already, so this raced against a concurrent
113*4882a593Smuzhiyun * update. Return something. Core will do another round and then
114*4882a593Smuzhiyun * see the mode change and fallback to the syscall.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * This isb() is required to prevent that the counter value
121*4882a593Smuzhiyun * is speculated.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun isb();
124*4882a593Smuzhiyun asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * This isb() is required to prevent that the seq lock is
127*4882a593Smuzhiyun * speculated.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun isb();
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return res;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
__arch_get_vdso_data(void)134*4882a593Smuzhiyun static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun const struct vdso_data *ret;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * This simply puts &_vdso_data into ret. The reason why we don't use
140*4882a593Smuzhiyun * `ret = _vdso_data` is that the compiler tends to optimise this in a
141*4882a593Smuzhiyun * very suboptimal way: instead of keeping &_vdso_data in a register,
142*4882a593Smuzhiyun * it goes through a relocation almost every time _vdso_data must be
143*4882a593Smuzhiyun * accessed (even in subfunctions). This is both time and space
144*4882a593Smuzhiyun * consuming: each relocation uses a word in the code section, and it
145*4882a593Smuzhiyun * has to be loaded at runtime.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * This trick hides the assignment from the compiler. Since it cannot
148*4882a593Smuzhiyun * track where the pointer comes from, it will only use one relocation
149*4882a593Smuzhiyun * where __arch_get_vdso_data() is called, and then keep the result in
150*4882a593Smuzhiyun * a register.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return ret;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #ifdef CONFIG_TIME_NS
__arch_get_timens_vdso_data(void)158*4882a593Smuzhiyun static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun const struct vdso_data *ret;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* See __arch_get_vdso_data(). */
163*4882a593Smuzhiyun asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return ret;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun #endif
168*4882a593Smuzhiyun
vdso_clocksource_ok(const struct vdso_data * vd)169*4882a593Smuzhiyun static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun #define vdso_clocksource_ok vdso_clocksource_ok
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
178