xref: /OK3568_Linux_fs/kernel/arch/riscv/include/asm/timex.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 Regents of the University of California
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _ASM_RISCV_TIMEX_H
7*4882a593Smuzhiyun #define _ASM_RISCV_TIMEX_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <asm/csr.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun typedef unsigned long cycles_t;
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifdef CONFIG_RISCV_M_MODE
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/clint.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #ifdef CONFIG_64BIT
get_cycles(void)18*4882a593Smuzhiyun static inline cycles_t get_cycles(void)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	return readq_relaxed(clint_time_val);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun #else /* !CONFIG_64BIT */
get_cycles(void)23*4882a593Smuzhiyun static inline u32 get_cycles(void)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	return readl_relaxed(((u32 *)clint_time_val));
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun #define get_cycles get_cycles
28*4882a593Smuzhiyun 
get_cycles_hi(void)29*4882a593Smuzhiyun static inline u32 get_cycles_hi(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return readl_relaxed(((u32 *)clint_time_val) + 1);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun #define get_cycles_hi get_cycles_hi
34*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Much like MIPS, we may not have a viable counter to use at an early point
38*4882a593Smuzhiyun  * in the boot process. Unfortunately we don't have a fallback, so instead
39*4882a593Smuzhiyun  * we just return 0.
40*4882a593Smuzhiyun  */
random_get_entropy(void)41*4882a593Smuzhiyun static inline unsigned long random_get_entropy(void)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	if (unlikely(clint_time_val == NULL))
44*4882a593Smuzhiyun 		return random_get_entropy_fallback();
45*4882a593Smuzhiyun 	return get_cycles();
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun #define random_get_entropy()	random_get_entropy()
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #else /* CONFIG_RISCV_M_MODE */
50*4882a593Smuzhiyun 
get_cycles(void)51*4882a593Smuzhiyun static inline cycles_t get_cycles(void)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return csr_read(CSR_TIME);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun #define get_cycles get_cycles
56*4882a593Smuzhiyun 
get_cycles_hi(void)57*4882a593Smuzhiyun static inline u32 get_cycles_hi(void)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return csr_read(CSR_TIMEH);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun #define get_cycles_hi get_cycles_hi
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #endif /* !CONFIG_RISCV_M_MODE */
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #ifdef CONFIG_64BIT
get_cycles64(void)66*4882a593Smuzhiyun static inline u64 get_cycles64(void)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	return get_cycles();
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun #else /* CONFIG_64BIT */
get_cycles64(void)71*4882a593Smuzhiyun static inline u64 get_cycles64(void)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	u32 hi, lo;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	do {
76*4882a593Smuzhiyun 		hi = get_cycles_hi();
77*4882a593Smuzhiyun 		lo = get_cycles();
78*4882a593Smuzhiyun 	} while (hi != get_cycles_hi());
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return ((u64)hi << 32) | lo;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define ARCH_HAS_READ_CURRENT_TIMER
read_current_timer(unsigned long * timer_val)85*4882a593Smuzhiyun static inline int read_current_timer(unsigned long *timer_val)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	*timer_val = get_cycles();
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #endif /* _ASM_RISCV_TIMEX_H */
92