xref: /OK3568_Linux_fs/u-boot/arch/x86/include/asm/msr.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Taken from the linux kernel file of the same name
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 2012
5*4882a593Smuzhiyun  * Graeme Russ, <graeme.russ@gmail.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef _ASM_X86_MSR_H
11*4882a593Smuzhiyun #define _ASM_X86_MSR_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/msr-index.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifndef __ASSEMBLY__
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/ioctl.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define X86_IOC_RDMSR_REGS	_IOWR('c', 0xA0, __u32[8])
21*4882a593Smuzhiyun #define X86_IOC_WRMSR_REGS	_IOWR('c', 0xA1, __u32[8])
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #ifdef __KERNEL__
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/errno.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct msr {
28*4882a593Smuzhiyun 	union {
29*4882a593Smuzhiyun 		struct {
30*4882a593Smuzhiyun 			u32 l;
31*4882a593Smuzhiyun 			u32 h;
32*4882a593Smuzhiyun 		};
33*4882a593Smuzhiyun 		u64 q;
34*4882a593Smuzhiyun 	};
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct msr_info {
38*4882a593Smuzhiyun 	u32 msr_no;
39*4882a593Smuzhiyun 	struct msr reg;
40*4882a593Smuzhiyun 	struct msr *msrs;
41*4882a593Smuzhiyun 	int err;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct msr_regs_info {
45*4882a593Smuzhiyun 	u32 *regs;
46*4882a593Smuzhiyun 	int err;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
native_read_tscp(unsigned int * aux)49*4882a593Smuzhiyun static inline unsigned long long native_read_tscp(unsigned int *aux)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	unsigned long low, high;
52*4882a593Smuzhiyun 	asm volatile(".byte 0x0f,0x01,0xf9"
53*4882a593Smuzhiyun 		     : "=a" (low), "=d" (high), "=c" (*aux));
54*4882a593Smuzhiyun 	return low | ((u64)high << 32);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
59*4882a593Smuzhiyun  * constraint has different meanings. For i386, "A" means exactly
60*4882a593Smuzhiyun  * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
61*4882a593Smuzhiyun  * it means rax *or* rdx.
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun #ifdef CONFIG_X86_64
64*4882a593Smuzhiyun #define DECLARE_ARGS(val, low, high)	unsigned low, high
65*4882a593Smuzhiyun #define EAX_EDX_VAL(val, low, high)	((low) | ((u64)(high) << 32))
66*4882a593Smuzhiyun #define EAX_EDX_ARGS(val, low, high)	"a" (low), "d" (high)
67*4882a593Smuzhiyun #define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
68*4882a593Smuzhiyun #else
69*4882a593Smuzhiyun #define DECLARE_ARGS(val, low, high)	unsigned long long val
70*4882a593Smuzhiyun #define EAX_EDX_VAL(val, low, high)	(val)
71*4882a593Smuzhiyun #define EAX_EDX_ARGS(val, low, high)	"A" (val)
72*4882a593Smuzhiyun #define EAX_EDX_RET(val, low, high)	"=A" (val)
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static inline __attribute__((no_instrument_function))
native_read_msr(unsigned int msr)76*4882a593Smuzhiyun 	unsigned long long native_read_msr(unsigned int msr)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
81*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
native_write_msr(unsigned int msr,unsigned low,unsigned high)84*4882a593Smuzhiyun static inline void native_write_msr(unsigned int msr,
85*4882a593Smuzhiyun 				    unsigned low, unsigned high)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun extern unsigned long long native_read_tsc(void);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun extern int native_rdmsr_safe_regs(u32 regs[8]);
93*4882a593Smuzhiyun extern int native_wrmsr_safe_regs(u32 regs[8]);
94*4882a593Smuzhiyun 
native_read_pmc(int counter)95*4882a593Smuzhiyun static inline unsigned long long native_read_pmc(int counter)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
100*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT
104*4882a593Smuzhiyun #include <asm/paravirt.h>
105*4882a593Smuzhiyun #else
106*4882a593Smuzhiyun #include <errno.h>
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * Access to machine-specific registers (available on 586 and better only)
109*4882a593Smuzhiyun  * Note: the rd* operations modify the parameters directly (without using
110*4882a593Smuzhiyun  * pointer indirection), this allows gcc to optimize better
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define rdmsr(msr, val1, val2)					\
114*4882a593Smuzhiyun do {								\
115*4882a593Smuzhiyun 	u64 __val = native_read_msr((msr));			\
116*4882a593Smuzhiyun 	(void)((val1) = (u32)__val);				\
117*4882a593Smuzhiyun 	(void)((val2) = (u32)(__val >> 32));			\
118*4882a593Smuzhiyun } while (0)
119*4882a593Smuzhiyun 
wrmsr(unsigned msr,unsigned low,unsigned high)120*4882a593Smuzhiyun static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	native_write_msr(msr, low, high);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define rdmsrl(msr, val)			\
126*4882a593Smuzhiyun 	((val) = native_read_msr((msr)))
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define wrmsrl(msr, val)						\
129*4882a593Smuzhiyun 	native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
130*4882a593Smuzhiyun 
msr_clrsetbits_64(unsigned msr,u64 clear,u64 set)131*4882a593Smuzhiyun static inline void msr_clrsetbits_64(unsigned msr, u64 clear, u64 set)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	u64 val;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	val = native_read_msr(msr);
136*4882a593Smuzhiyun 	val &= ~clear;
137*4882a593Smuzhiyun 	val |= set;
138*4882a593Smuzhiyun 	wrmsrl(msr, val);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
msr_setbits_64(unsigned msr,u64 set)141*4882a593Smuzhiyun static inline void msr_setbits_64(unsigned msr, u64 set)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	u64 val;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	val = native_read_msr(msr);
146*4882a593Smuzhiyun 	val |= set;
147*4882a593Smuzhiyun 	wrmsrl(msr, val);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
msr_clrbits_64(unsigned msr,u64 clear)150*4882a593Smuzhiyun static inline void msr_clrbits_64(unsigned msr, u64 clear)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	u64 val;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	val = native_read_msr(msr);
155*4882a593Smuzhiyun 	val &= ~clear;
156*4882a593Smuzhiyun 	wrmsrl(msr, val);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /* rdmsr with exception handling */
160*4882a593Smuzhiyun #define rdmsr_safe(msr, p1, p2)					\
161*4882a593Smuzhiyun ({								\
162*4882a593Smuzhiyun 	int __err;						\
163*4882a593Smuzhiyun 	u64 __val = native_read_msr_safe((msr), &__err);	\
164*4882a593Smuzhiyun 	(*p1) = (u32)__val;					\
165*4882a593Smuzhiyun 	(*p2) = (u32)(__val >> 32);				\
166*4882a593Smuzhiyun 	__err;							\
167*4882a593Smuzhiyun })
168*4882a593Smuzhiyun 
rdmsrl_amd_safe(unsigned msr,unsigned long long * p)169*4882a593Smuzhiyun static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	u32 gprs[8] = { 0 };
172*4882a593Smuzhiyun 	int err;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	gprs[1] = msr;
175*4882a593Smuzhiyun 	gprs[7] = 0x9c5a203a;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	err = native_rdmsr_safe_regs(gprs);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	*p = gprs[0] | ((u64)gprs[2] << 32);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return err;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
wrmsrl_amd_safe(unsigned msr,unsigned long long val)184*4882a593Smuzhiyun static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	u32 gprs[8] = { 0 };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	gprs[0] = (u32)val;
189*4882a593Smuzhiyun 	gprs[1] = msr;
190*4882a593Smuzhiyun 	gprs[2] = val >> 32;
191*4882a593Smuzhiyun 	gprs[7] = 0x9c5a203a;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	return native_wrmsr_safe_regs(gprs);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
rdmsr_safe_regs(u32 regs[8])196*4882a593Smuzhiyun static inline int rdmsr_safe_regs(u32 regs[8])
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	return native_rdmsr_safe_regs(regs);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
wrmsr_safe_regs(u32 regs[8])201*4882a593Smuzhiyun static inline int wrmsr_safe_regs(u32 regs[8])
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	return native_wrmsr_safe_regs(regs);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun typedef struct msr_t {
207*4882a593Smuzhiyun 	uint32_t lo;
208*4882a593Smuzhiyun 	uint32_t hi;
209*4882a593Smuzhiyun } msr_t;
210*4882a593Smuzhiyun 
msr_read(unsigned msr_num)211*4882a593Smuzhiyun static inline struct msr_t msr_read(unsigned msr_num)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct msr_t msr;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	rdmsr(msr_num, msr.lo, msr.hi);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return msr;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
msr_write(unsigned msr_num,msr_t msr)220*4882a593Smuzhiyun static inline void msr_write(unsigned msr_num, msr_t msr)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	wrmsr(msr_num, msr.lo, msr.hi);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #define rdtscl(low)						\
226*4882a593Smuzhiyun 	((low) = (u32)__native_read_tsc())
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun #define rdtscll(val)						\
229*4882a593Smuzhiyun 	((val) = __native_read_tsc())
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #define rdpmc(counter, low, high)			\
232*4882a593Smuzhiyun do {							\
233*4882a593Smuzhiyun 	u64 _l = native_read_pmc((counter));		\
234*4882a593Smuzhiyun 	(low)  = (u32)_l;				\
235*4882a593Smuzhiyun 	(high) = (u32)(_l >> 32);			\
236*4882a593Smuzhiyun } while (0)
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun #define rdtscp(low, high, aux)					\
239*4882a593Smuzhiyun do {                                                            \
240*4882a593Smuzhiyun 	unsigned long long _val = native_read_tscp(&(aux));     \
241*4882a593Smuzhiyun 	(low) = (u32)_val;                                      \
242*4882a593Smuzhiyun 	(high) = (u32)(_val >> 32);                             \
243*4882a593Smuzhiyun } while (0)
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #endif	/* !CONFIG_PARAVIRT */
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val),		\
251*4882a593Smuzhiyun 					     (u32)((val) >> 32))
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun struct msr *msrs_alloc(void);
258*4882a593Smuzhiyun void msrs_free(struct msr *msrs);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun #endif /* __KERNEL__ */
261*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
262*4882a593Smuzhiyun #endif /* _ASM_X86_MSR_H */
263