xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/acenv.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * X86 specific ACPICA environments and implementation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014, Intel Corporation
6*4882a593Smuzhiyun  *   Author: Lv Zheng <lv.zheng@intel.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef _ASM_X86_ACENV_H
10*4882a593Smuzhiyun #define _ASM_X86_ACENV_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/special_insns.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* Asm macros */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
18*4882a593Smuzhiyun  * It is required to prevent data loss.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * While running inside virtual machine, the kernel can bypass cache flushing.
21*4882a593Smuzhiyun  * Changing sleep state in a virtual machine doesn't affect the host system
22*4882a593Smuzhiyun  * sleep state and cannot lead to data loss.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #define ACPI_FLUSH_CPU_CACHE()					\
25*4882a593Smuzhiyun do {								\
26*4882a593Smuzhiyun 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))	\
27*4882a593Smuzhiyun 		wbinvd();					\
28*4882a593Smuzhiyun } while (0)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun int __acpi_acquire_global_lock(unsigned int *lock);
31*4882a593Smuzhiyun int __acpi_release_global_lock(unsigned int *lock);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
34*4882a593Smuzhiyun 	((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
37*4882a593Smuzhiyun 	((Acq) = __acpi_release_global_lock(&facs->global_lock))
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Math helper asm macros
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
43*4882a593Smuzhiyun 	asm("divl %2;"				     \
44*4882a593Smuzhiyun 	    : "=a"(q32), "=d"(r32)		     \
45*4882a593Smuzhiyun 	    : "r"(d32),				     \
46*4882a593Smuzhiyun 	     "0"(n_lo), "1"(n_hi))
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
49*4882a593Smuzhiyun 	asm("shrl   $1,%2	;"	\
50*4882a593Smuzhiyun 	    "rcrl   $1,%3;"		\
51*4882a593Smuzhiyun 	    : "=r"(n_hi), "=r"(n_lo)	\
52*4882a593Smuzhiyun 	    : "0"(n_hi), "1"(n_lo))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #endif /* _ASM_X86_ACENV_H */
55