xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/msr.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_MSR_H
3*4882a593Smuzhiyun #define _ASM_X86_MSR_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include "msr-index.h"
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ASSEMBLY__
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <asm/asm.h>
10*4882a593Smuzhiyun #include <asm/errno.h>
11*4882a593Smuzhiyun #include <asm/cpumask.h>
12*4882a593Smuzhiyun #include <uapi/asm/msr.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun struct msr {
15*4882a593Smuzhiyun 	union {
16*4882a593Smuzhiyun 		struct {
17*4882a593Smuzhiyun 			u32 l;
18*4882a593Smuzhiyun 			u32 h;
19*4882a593Smuzhiyun 		};
20*4882a593Smuzhiyun 		u64 q;
21*4882a593Smuzhiyun 	};
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct msr_info {
25*4882a593Smuzhiyun 	u32 msr_no;
26*4882a593Smuzhiyun 	struct msr reg;
27*4882a593Smuzhiyun 	struct msr *msrs;
28*4882a593Smuzhiyun 	int err;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct msr_regs_info {
32*4882a593Smuzhiyun 	u32 *regs;
33*4882a593Smuzhiyun 	int err;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct saved_msr {
37*4882a593Smuzhiyun 	bool valid;
38*4882a593Smuzhiyun 	struct msr_info info;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct saved_msrs {
42*4882a593Smuzhiyun 	unsigned int num;
43*4882a593Smuzhiyun 	struct saved_msr *array;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
48*4882a593Smuzhiyun  * constraint has different meanings. For i386, "A" means exactly
49*4882a593Smuzhiyun  * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
50*4882a593Smuzhiyun  * it means rax *or* rdx.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun #ifdef CONFIG_X86_64
53*4882a593Smuzhiyun /* Using 64-bit values saves one instruction clearing the high half of low */
54*4882a593Smuzhiyun #define DECLARE_ARGS(val, low, high)	unsigned long low, high
55*4882a593Smuzhiyun #define EAX_EDX_VAL(val, low, high)	((low) | (high) << 32)
56*4882a593Smuzhiyun #define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
57*4882a593Smuzhiyun #else
58*4882a593Smuzhiyun #define DECLARE_ARGS(val, low, high)	unsigned long long val
59*4882a593Smuzhiyun #define EAX_EDX_VAL(val, low, high)	(val)
60*4882a593Smuzhiyun #define EAX_EDX_RET(val, low, high)	"=A" (val)
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * Be very careful with includes. This header is prone to include loops.
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun #include <asm/atomic.h>
67*4882a593Smuzhiyun #include <linux/tracepoint-defs.h>
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #ifdef CONFIG_TRACEPOINTS
70*4882a593Smuzhiyun DECLARE_TRACEPOINT(read_msr);
71*4882a593Smuzhiyun DECLARE_TRACEPOINT(write_msr);
72*4882a593Smuzhiyun DECLARE_TRACEPOINT(rdpmc);
73*4882a593Smuzhiyun extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
74*4882a593Smuzhiyun extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
75*4882a593Smuzhiyun extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
76*4882a593Smuzhiyun #else
do_trace_write_msr(unsigned int msr,u64 val,int failed)77*4882a593Smuzhiyun static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
do_trace_read_msr(unsigned int msr,u64 val,int failed)78*4882a593Smuzhiyun static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
do_trace_rdpmc(unsigned int msr,u64 val,int failed)79*4882a593Smuzhiyun static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
84*4882a593Smuzhiyun  * accessors and should not have any tracing or other functionality piggybacking
85*4882a593Smuzhiyun  * on them - those are *purely* for accessing MSRs and nothing more. So don't even
86*4882a593Smuzhiyun  * think of extending them - you will be slapped with a stinking trout or a frozen
87*4882a593Smuzhiyun  * shark will reach you, wherever you are! You've been warned.
88*4882a593Smuzhiyun  */
__rdmsr(unsigned int msr)89*4882a593Smuzhiyun static __always_inline unsigned long long __rdmsr(unsigned int msr)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	asm volatile("1: rdmsr\n"
94*4882a593Smuzhiyun 		     "2:\n"
95*4882a593Smuzhiyun 		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
96*4882a593Smuzhiyun 		     : EAX_EDX_RET(val, low, high) : "c" (msr));
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
__wrmsr(unsigned int msr,u32 low,u32 high)101*4882a593Smuzhiyun static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	asm volatile("1: wrmsr\n"
104*4882a593Smuzhiyun 		     "2:\n"
105*4882a593Smuzhiyun 		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
106*4882a593Smuzhiyun 		     : : "c" (msr), "a"(low), "d" (high) : "memory");
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define native_rdmsr(msr, val1, val2)			\
110*4882a593Smuzhiyun do {							\
111*4882a593Smuzhiyun 	u64 __val = __rdmsr((msr));			\
112*4882a593Smuzhiyun 	(void)((val1) = (u32)__val);			\
113*4882a593Smuzhiyun 	(void)((val2) = (u32)(__val >> 32));		\
114*4882a593Smuzhiyun } while (0)
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define native_wrmsr(msr, low, high)			\
117*4882a593Smuzhiyun 	__wrmsr(msr, low, high)
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define native_wrmsrl(msr, val)				\
120*4882a593Smuzhiyun 	__wrmsr((msr), (u32)((u64)(val)),		\
121*4882a593Smuzhiyun 		       (u32)((u64)(val) >> 32))
122*4882a593Smuzhiyun 
native_read_msr(unsigned int msr)123*4882a593Smuzhiyun static inline unsigned long long native_read_msr(unsigned int msr)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	unsigned long long val;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	val = __rdmsr(msr);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (tracepoint_enabled(read_msr))
130*4882a593Smuzhiyun 		do_trace_read_msr(msr, val, 0);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return val;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
native_read_msr_safe(unsigned int msr,int * err)135*4882a593Smuzhiyun static inline unsigned long long native_read_msr_safe(unsigned int msr,
136*4882a593Smuzhiyun 						      int *err)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	asm volatile("2: rdmsr ; xor %[err],%[err]\n"
141*4882a593Smuzhiyun 		     "1:\n\t"
142*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n\t"
143*4882a593Smuzhiyun 		     "3: mov %[fault],%[err]\n\t"
144*4882a593Smuzhiyun 		     "xorl %%eax, %%eax\n\t"
145*4882a593Smuzhiyun 		     "xorl %%edx, %%edx\n\t"
146*4882a593Smuzhiyun 		     "jmp 1b\n\t"
147*4882a593Smuzhiyun 		     ".previous\n\t"
148*4882a593Smuzhiyun 		     _ASM_EXTABLE(2b, 3b)
149*4882a593Smuzhiyun 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
150*4882a593Smuzhiyun 		     : "c" (msr), [fault] "i" (-EIO));
151*4882a593Smuzhiyun 	if (tracepoint_enabled(read_msr))
152*4882a593Smuzhiyun 		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
153*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* Can be uninlined because referenced by paravirt */
157*4882a593Smuzhiyun static inline void notrace
native_write_msr(unsigned int msr,u32 low,u32 high)158*4882a593Smuzhiyun native_write_msr(unsigned int msr, u32 low, u32 high)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	__wrmsr(msr, low, high);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (tracepoint_enabled(write_msr))
163*4882a593Smuzhiyun 		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /* Can be uninlined because referenced by paravirt */
167*4882a593Smuzhiyun static inline int notrace
native_write_msr_safe(unsigned int msr,u32 low,u32 high)168*4882a593Smuzhiyun native_write_msr_safe(unsigned int msr, u32 low, u32 high)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	int err;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	asm volatile("2: wrmsr ; xor %[err],%[err]\n"
173*4882a593Smuzhiyun 		     "1:\n\t"
174*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n\t"
175*4882a593Smuzhiyun 		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
176*4882a593Smuzhiyun 		     ".previous\n\t"
177*4882a593Smuzhiyun 		     _ASM_EXTABLE(2b, 3b)
178*4882a593Smuzhiyun 		     : [err] "=a" (err)
179*4882a593Smuzhiyun 		     : "c" (msr), "0" (low), "d" (high),
180*4882a593Smuzhiyun 		       [fault] "i" (-EIO)
181*4882a593Smuzhiyun 		     : "memory");
182*4882a593Smuzhiyun 	if (tracepoint_enabled(write_msr))
183*4882a593Smuzhiyun 		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
184*4882a593Smuzhiyun 	return err;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun extern int rdmsr_safe_regs(u32 regs[8]);
188*4882a593Smuzhiyun extern int wrmsr_safe_regs(u32 regs[8]);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * rdtsc() - returns the current TSC without ordering constraints
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
194*4882a593Smuzhiyun  * only ordering constraint it supplies is the ordering implied by
195*4882a593Smuzhiyun  * "asm volatile": it will put the RDTSC in the place you expect.  The
196*4882a593Smuzhiyun  * CPU can and will speculatively execute that RDTSC, though, so the
197*4882a593Smuzhiyun  * results can be non-monotonic if compared on different CPUs.
198*4882a593Smuzhiyun  */
rdtsc(void)199*4882a593Smuzhiyun static __always_inline unsigned long long rdtsc(void)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /**
209*4882a593Smuzhiyun  * rdtsc_ordered() - read the current TSC in program order
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
212*4882a593Smuzhiyun  * It is ordered like a load to a global in-memory counter.  It should
213*4882a593Smuzhiyun  * be impossible to observe non-monotonic rdtsc_unordered() behavior
214*4882a593Smuzhiyun  * across multiple CPUs as long as the TSC is synced.
215*4882a593Smuzhiyun  */
rdtsc_ordered(void)216*4882a593Smuzhiyun static __always_inline unsigned long long rdtsc_ordered(void)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/*
221*4882a593Smuzhiyun 	 * The RDTSC instruction is not ordered relative to memory
222*4882a593Smuzhiyun 	 * access.  The Intel SDM and the AMD APM are both vague on this
223*4882a593Smuzhiyun 	 * point, but empirically an RDTSC instruction can be
224*4882a593Smuzhiyun 	 * speculatively executed before prior loads.  An RDTSC
225*4882a593Smuzhiyun 	 * immediately after an appropriate barrier appears to be
226*4882a593Smuzhiyun 	 * ordered as a normal load, that is, it provides the same
227*4882a593Smuzhiyun 	 * ordering guarantees as reading from a global memory location
228*4882a593Smuzhiyun 	 * that some other imaginary CPU is updating continuously with a
229*4882a593Smuzhiyun 	 * time stamp.
230*4882a593Smuzhiyun 	 *
231*4882a593Smuzhiyun 	 * Thus, use the preferred barrier on the respective CPU, aiming for
232*4882a593Smuzhiyun 	 * RDTSCP as the default.
233*4882a593Smuzhiyun 	 */
234*4882a593Smuzhiyun 	asm volatile(ALTERNATIVE_2("rdtsc",
235*4882a593Smuzhiyun 				   "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
236*4882a593Smuzhiyun 				   "rdtscp", X86_FEATURE_RDTSCP)
237*4882a593Smuzhiyun 			: EAX_EDX_RET(val, low, high)
238*4882a593Smuzhiyun 			/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
239*4882a593Smuzhiyun 			:: "ecx");
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
native_read_pmc(int counter)244*4882a593Smuzhiyun static inline unsigned long long native_read_pmc(int counter)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	DECLARE_ARGS(val, low, high);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
249*4882a593Smuzhiyun 	if (tracepoint_enabled(rdpmc))
250*4882a593Smuzhiyun 		do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
251*4882a593Smuzhiyun 	return EAX_EDX_VAL(val, low, high);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_XXL
255*4882a593Smuzhiyun #include <asm/paravirt.h>
256*4882a593Smuzhiyun #else
257*4882a593Smuzhiyun #include <linux/errno.h>
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun  * Access to machine-specific registers (available on 586 and better only)
260*4882a593Smuzhiyun  * Note: the rd* operations modify the parameters directly (without using
261*4882a593Smuzhiyun  * pointer indirection), this allows gcc to optimize better
262*4882a593Smuzhiyun  */
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #define rdmsr(msr, low, high)					\
265*4882a593Smuzhiyun do {								\
266*4882a593Smuzhiyun 	u64 __val = native_read_msr((msr));			\
267*4882a593Smuzhiyun 	(void)((low) = (u32)__val);				\
268*4882a593Smuzhiyun 	(void)((high) = (u32)(__val >> 32));			\
269*4882a593Smuzhiyun } while (0)
270*4882a593Smuzhiyun 
wrmsr(unsigned int msr,u32 low,u32 high)271*4882a593Smuzhiyun static inline void wrmsr(unsigned int msr, u32 low, u32 high)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	native_write_msr(msr, low, high);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define rdmsrl(msr, val)			\
277*4882a593Smuzhiyun 	((val) = native_read_msr((msr)))
278*4882a593Smuzhiyun 
wrmsrl(unsigned int msr,u64 val)279*4882a593Smuzhiyun static inline void wrmsrl(unsigned int msr, u64 val)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /* wrmsr with exception handling */
wrmsr_safe(unsigned int msr,u32 low,u32 high)285*4882a593Smuzhiyun static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return native_write_msr_safe(msr, low, high);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* rdmsr with exception handling */
291*4882a593Smuzhiyun #define rdmsr_safe(msr, low, high)				\
292*4882a593Smuzhiyun ({								\
293*4882a593Smuzhiyun 	int __err;						\
294*4882a593Smuzhiyun 	u64 __val = native_read_msr_safe((msr), &__err);	\
295*4882a593Smuzhiyun 	(*low) = (u32)__val;					\
296*4882a593Smuzhiyun 	(*high) = (u32)(__val >> 32);				\
297*4882a593Smuzhiyun 	__err;							\
298*4882a593Smuzhiyun })
299*4882a593Smuzhiyun 
rdmsrl_safe(unsigned int msr,unsigned long long * p)300*4882a593Smuzhiyun static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	int err;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	*p = native_read_msr_safe(msr, &err);
305*4882a593Smuzhiyun 	return err;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define rdpmc(counter, low, high)			\
309*4882a593Smuzhiyun do {							\
310*4882a593Smuzhiyun 	u64 _l = native_read_pmc((counter));		\
311*4882a593Smuzhiyun 	(low)  = (u32)_l;				\
312*4882a593Smuzhiyun 	(high) = (u32)(_l >> 32);			\
313*4882a593Smuzhiyun } while (0)
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #endif	/* !CONFIG_PARAVIRT_XXL */
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun  * 64-bit version of wrmsr_safe():
321*4882a593Smuzhiyun  */
wrmsrl_safe(u32 msr,u64 val)322*4882a593Smuzhiyun static inline int wrmsrl_safe(u32 msr, u64 val)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun struct msr *msrs_alloc(void);
332*4882a593Smuzhiyun void msrs_free(struct msr *msrs);
333*4882a593Smuzhiyun int msr_set_bit(u32 msr, u8 bit);
334*4882a593Smuzhiyun int msr_clear_bit(u32 msr, u8 bit);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun #ifdef CONFIG_SMP
337*4882a593Smuzhiyun int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
338*4882a593Smuzhiyun int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
339*4882a593Smuzhiyun int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
340*4882a593Smuzhiyun int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
341*4882a593Smuzhiyun void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
342*4882a593Smuzhiyun void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
343*4882a593Smuzhiyun int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
344*4882a593Smuzhiyun int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
345*4882a593Smuzhiyun int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
346*4882a593Smuzhiyun int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
347*4882a593Smuzhiyun int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
348*4882a593Smuzhiyun int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
349*4882a593Smuzhiyun #else  /*  CONFIG_SMP  */
rdmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)350*4882a593Smuzhiyun static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	rdmsr(msr_no, *l, *h);
353*4882a593Smuzhiyun 	return 0;
354*4882a593Smuzhiyun }
wrmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)355*4882a593Smuzhiyun static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	wrmsr(msr_no, l, h);
358*4882a593Smuzhiyun 	return 0;
359*4882a593Smuzhiyun }
rdmsrl_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)360*4882a593Smuzhiyun static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	rdmsrl(msr_no, *q);
363*4882a593Smuzhiyun 	return 0;
364*4882a593Smuzhiyun }
wrmsrl_on_cpu(unsigned int cpu,u32 msr_no,u64 q)365*4882a593Smuzhiyun static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	wrmsrl(msr_no, q);
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun }
rdmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr * msrs)370*4882a593Smuzhiyun static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
371*4882a593Smuzhiyun 				struct msr *msrs)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
374*4882a593Smuzhiyun }
wrmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr * msrs)375*4882a593Smuzhiyun static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
376*4882a593Smuzhiyun 				struct msr *msrs)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
379*4882a593Smuzhiyun }
rdmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)380*4882a593Smuzhiyun static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
381*4882a593Smuzhiyun 				    u32 *l, u32 *h)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	return rdmsr_safe(msr_no, l, h);
384*4882a593Smuzhiyun }
wrmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)385*4882a593Smuzhiyun static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	return wrmsr_safe(msr_no, l, h);
388*4882a593Smuzhiyun }
rdmsrl_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)389*4882a593Smuzhiyun static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	return rdmsrl_safe(msr_no, q);
392*4882a593Smuzhiyun }
wrmsrl_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 q)393*4882a593Smuzhiyun static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	return wrmsrl_safe(msr_no, q);
396*4882a593Smuzhiyun }
rdmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])397*4882a593Smuzhiyun static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return rdmsr_safe_regs(regs);
400*4882a593Smuzhiyun }
wrmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])401*4882a593Smuzhiyun static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	return wrmsr_safe_regs(regs);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun #endif  /* CONFIG_SMP */
406*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
407*4882a593Smuzhiyun #endif /* _ASM_X86_MSR_H */
408