xref: /OK3568_Linux_fs/kernel/include/linux/cnt32_to_63.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Extend a 32-bit counter to 63 bits
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Author:	Nicolas Pitre
6*4882a593Smuzhiyun  *  Created:	December 3, 2006
7*4882a593Smuzhiyun  *  Copyright:	MontaVista Software, Inc.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __LINUX_CNT32_TO_63_H__
11*4882a593Smuzhiyun #define __LINUX_CNT32_TO_63_H__
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/compiler.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <asm/byteorder.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* this is used only to give gcc a clue about good code generation */
18*4882a593Smuzhiyun union cnt32_to_63 {
19*4882a593Smuzhiyun 	struct {
20*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN)
21*4882a593Smuzhiyun 		u32 lo, hi;
22*4882a593Smuzhiyun #elif defined(__BIG_ENDIAN)
23*4882a593Smuzhiyun 		u32 hi, lo;
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun 	};
26*4882a593Smuzhiyun 	u64 val;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun  * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
32*4882a593Smuzhiyun  * @cnt_lo: The low part of the counter
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * Many hardware clock counters are only 32 bits wide and therefore have
35*4882a593Smuzhiyun  * a relatively short period making wrap-arounds rather frequent.  This
36*4882a593Smuzhiyun  * is a problem when implementing sched_clock() for example, where a 64-bit
37*4882a593Smuzhiyun  * non-wrapping monotonic value is expected to be returned.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * To overcome that limitation, let's extend a 32-bit counter to 63 bits
40*4882a593Smuzhiyun  * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
41*4882a593Smuzhiyun  * by the hardware while bits 32 to 62 are stored in memory.  The top bit in
42*4882a593Smuzhiyun  * memory is used to synchronize with the hardware clock half-period.  When
43*4882a593Smuzhiyun  * the top bit of both counters (hardware and in memory) differ then the
44*4882a593Smuzhiyun  * memory is updated with a new value, incrementing it when the hardware
45*4882a593Smuzhiyun  * counter wraps around.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * Because a word store in memory is atomic then the incremented value will
48*4882a593Smuzhiyun  * always be in synch with the top bit indicating to any potential concurrent
49*4882a593Smuzhiyun  * reader if the value in memory is up to date or not with regards to the
50*4882a593Smuzhiyun  * needed increment.  And any race in updating the value in memory is harmless
51*4882a593Smuzhiyun  * as the same value would simply be stored more than once.
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * The restrictions for the algorithm to work properly are:
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * 1) this code must be called at least once per each half period of the
56*4882a593Smuzhiyun  *    32-bit counter;
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * 2) this code must not be preempted for a duration longer than the
59*4882a593Smuzhiyun  *    32-bit counter half period minus the longest period between two
60*4882a593Smuzhiyun  *    calls to this code;
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Those requirements ensure proper update to the state bit in memory.
63*4882a593Smuzhiyun  * This is usually not a problem in practice, but if it is then a kernel
64*4882a593Smuzhiyun  * timer should be scheduled to manage for this code to be executed often
65*4882a593Smuzhiyun  * enough.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * And finally:
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * 3) the cnt_lo argument must be seen as a globally incrementing value,
70*4882a593Smuzhiyun  *    meaning that it should be a direct reference to the counter data which
71*4882a593Smuzhiyun  *    can be evaluated according to a specific ordering within the macro,
72*4882a593Smuzhiyun  *    and not the result of a previous evaluation stored in a variable.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * For example, this is wrong:
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  *	u32 partial = get_hw_count();
77*4882a593Smuzhiyun  *	u64 full = cnt32_to_63(partial);
78*4882a593Smuzhiyun  *	return full;
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * This is fine:
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  *	u64 full = cnt32_to_63(get_hw_count());
83*4882a593Smuzhiyun  *	return full;
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * Note that the top bit (bit 63) in the returned value should be considered
86*4882a593Smuzhiyun  * as garbage.  It is not cleared here because callers are likely to use a
87*4882a593Smuzhiyun  * multiplier on the returned value which can get rid of the top bit
88*4882a593Smuzhiyun  * implicitly by making the multiplier even, therefore saving on a runtime
89*4882a593Smuzhiyun  * clear-bit instruction. Otherwise caller must remember to clear the top
90*4882a593Smuzhiyun  * bit explicitly.
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun #define cnt32_to_63(cnt_lo) \
93*4882a593Smuzhiyun ({ \
94*4882a593Smuzhiyun 	static u32 __m_cnt_hi; \
95*4882a593Smuzhiyun 	union cnt32_to_63 __x; \
96*4882a593Smuzhiyun 	__x.hi = __m_cnt_hi; \
97*4882a593Smuzhiyun  	smp_rmb(); \
98*4882a593Smuzhiyun 	__x.lo = (cnt_lo); \
99*4882a593Smuzhiyun 	if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
100*4882a593Smuzhiyun 		__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
101*4882a593Smuzhiyun 	__x.val; \
102*4882a593Smuzhiyun })
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #endif
105