1*4882a593Smuzhiyun /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2000 - 2020, Intel Corp.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun *****************************************************************************/
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifndef __ACLINUXEX_H__
11*4882a593Smuzhiyun #define __ACLINUXEX_H__
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #ifdef __KERNEL__
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifndef ACPI_USE_NATIVE_DIVIDE
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #ifndef ACPI_DIV_64_BY_32
18*4882a593Smuzhiyun #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
19*4882a593Smuzhiyun do { \
20*4882a593Smuzhiyun u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
21*4882a593Smuzhiyun (r32) = do_div ((__n), (d32)); \
22*4882a593Smuzhiyun (q32) = (u32) (__n); \
23*4882a593Smuzhiyun } while (0)
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifndef ACPI_SHIFT_RIGHT_64
27*4882a593Smuzhiyun #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
28*4882a593Smuzhiyun do { \
29*4882a593Smuzhiyun (n_lo) >>= 1; \
30*4882a593Smuzhiyun (n_lo) |= (((n_hi) & 1) << 31); \
31*4882a593Smuzhiyun (n_hi) >>= 1; \
32*4882a593Smuzhiyun } while (0)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Overrides for in-kernel ACPICA
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun acpi_status acpi_os_terminate(void);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * The irqs_disabled() check is for resume from RAM.
46*4882a593Smuzhiyun * Interrupts are off during resume, just like they are for boot.
47*4882a593Smuzhiyun * However, boot has (system_state != SYSTEM_RUNNING)
48*4882a593Smuzhiyun * to quiet __might_sleep() in kmalloc() and resume does not.
49*4882a593Smuzhiyun */
acpi_os_allocate(acpi_size size)50*4882a593Smuzhiyun static inline void *acpi_os_allocate(acpi_size size)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
acpi_os_allocate_zeroed(acpi_size size)55*4882a593Smuzhiyun static inline void *acpi_os_allocate_zeroed(acpi_size size)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
acpi_os_free(void * memory)60*4882a593Smuzhiyun static inline void acpi_os_free(void *memory)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun kfree(memory);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
acpi_os_acquire_object(acpi_cache_t * cache)65*4882a593Smuzhiyun static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun return kmem_cache_zalloc(cache,
68*4882a593Smuzhiyun irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
acpi_os_get_thread_id(void)71*4882a593Smuzhiyun static inline acpi_thread_id acpi_os_get_thread_id(void)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun return (acpi_thread_id) (unsigned long)current;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * When lockdep is enabled, the spin_lock_init() macro stringifies it's
78*4882a593Smuzhiyun * argument and uses that as a name for the lock in debugging.
79*4882a593Smuzhiyun * By executing spin_lock_init() in a macro the key changes from "lock" for
80*4882a593Smuzhiyun * all locks to the name of the argument of acpi_os_create_lock(), which
81*4882a593Smuzhiyun * prevents lockdep from reporting false positives for ACPICA locks.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun #define acpi_os_create_lock(__handle) \
84*4882a593Smuzhiyun ({ \
85*4882a593Smuzhiyun spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
86*4882a593Smuzhiyun if (lock) { \
87*4882a593Smuzhiyun *(__handle) = lock; \
88*4882a593Smuzhiyun spin_lock_init(*(__handle)); \
89*4882a593Smuzhiyun } \
90*4882a593Smuzhiyun lock ? AE_OK : AE_NO_MEMORY; \
91*4882a593Smuzhiyun })
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define acpi_os_create_raw_lock(__handle) \
95*4882a593Smuzhiyun ({ \
96*4882a593Smuzhiyun raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
97*4882a593Smuzhiyun if (lock) { \
98*4882a593Smuzhiyun *(__handle) = lock; \
99*4882a593Smuzhiyun raw_spin_lock_init(*(__handle)); \
100*4882a593Smuzhiyun } \
101*4882a593Smuzhiyun lock ? AE_OK : AE_NO_MEMORY; \
102*4882a593Smuzhiyun })
103*4882a593Smuzhiyun
acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)104*4882a593Smuzhiyun static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun acpi_cpu_flags flags;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun raw_spin_lock_irqsave(lockp, flags);
109*4882a593Smuzhiyun return flags;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
acpi_os_release_raw_lock(acpi_raw_spinlock lockp,acpi_cpu_flags flags)112*4882a593Smuzhiyun static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
113*4882a593Smuzhiyun acpi_cpu_flags flags)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun raw_spin_unlock_irqrestore(lockp, flags);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
acpi_os_delete_raw_lock(acpi_raw_spinlock handle)118*4882a593Smuzhiyun static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun ACPI_FREE(handle);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
acpi_os_readable(void * pointer,acpi_size length)123*4882a593Smuzhiyun static inline u8 acpi_os_readable(void *pointer, acpi_size length)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return TRUE;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
acpi_os_initialize_debugger(void)128*4882a593Smuzhiyun static inline acpi_status acpi_os_initialize_debugger(void)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return AE_OK;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
acpi_os_terminate_debugger(void)133*4882a593Smuzhiyun static inline void acpi_os_terminate_debugger(void)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun return;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * OSL interfaces added by Linux
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #endif /* __KERNEL__ */
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #endif /* __ACLINUXEX_H__ */
145