xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/efi.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_EFI_H
3*4882a593Smuzhiyun #define _ASM_X86_EFI_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/fpu/api.h>
6*4882a593Smuzhiyun #include <asm/processor-flags.h>
7*4882a593Smuzhiyun #include <asm/tlb.h>
8*4882a593Smuzhiyun #include <asm/nospec-branch.h>
9*4882a593Smuzhiyun #include <asm/mmu_context.h>
10*4882a593Smuzhiyun #include <linux/build_bug.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/pgtable.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun extern unsigned long efi_fw_vendor, efi_config_table;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * We map the EFI regions needed for runtime services non-contiguously,
18*4882a593Smuzhiyun  * with preserved alignment on virtual addresses starting from -4G down
19*4882a593Smuzhiyun  * for a total max space of 64G. This way, we provide for stable runtime
20*4882a593Smuzhiyun  * services addresses across kernels so that a kexec'd kernel can still
21*4882a593Smuzhiyun  * use them.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * This is the main reason why we're doing stable VA mappings for RT
24*4882a593Smuzhiyun  * services.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define EFI32_LOADER_SIGNATURE	"EL32"
28*4882a593Smuzhiyun #define EFI64_LOADER_SIGNATURE	"EL64"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * The EFI services are called through variadic functions in many cases. These
34*4882a593Smuzhiyun  * functions are implemented in assembler and support only a fixed number of
35*4882a593Smuzhiyun  * arguments. The macros below allows us to check at build time that we don't
36*4882a593Smuzhiyun  * try to call them with too many arguments.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * __efi_nargs() will return the number of arguments if it is 7 or less, and
39*4882a593Smuzhiyun  * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
40*4882a593Smuzhiyun  * impossible to calculate the exact number of arguments beyond some
41*4882a593Smuzhiyun  * pre-defined limit. The maximum number of arguments currently supported by
42*4882a593Smuzhiyun  * any of the thunks is 7, so this is good enough for now and can be extended
43*4882a593Smuzhiyun  * in the obvious way if we ever need more.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
47*4882a593Smuzhiyun #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,	\
48*4882a593Smuzhiyun 	__efi_arg_sentinel(7), __efi_arg_sentinel(6),		\
49*4882a593Smuzhiyun 	__efi_arg_sentinel(5), __efi_arg_sentinel(4),		\
50*4882a593Smuzhiyun 	__efi_arg_sentinel(3), __efi_arg_sentinel(2),		\
51*4882a593Smuzhiyun 	__efi_arg_sentinel(1), __efi_arg_sentinel(0))
52*4882a593Smuzhiyun #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...)	\
53*4882a593Smuzhiyun 	__take_second_arg(n,					\
54*4882a593Smuzhiyun 		({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; }))
55*4882a593Smuzhiyun #define __efi_arg_sentinel(n) , n
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
59*4882a593Smuzhiyun  * represents more than n arguments.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define __efi_nargs_check(f, n, ...)					\
63*4882a593Smuzhiyun 	__efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
64*4882a593Smuzhiyun #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
65*4882a593Smuzhiyun #define __efi_nargs_check__(f, p, n) ({					\
66*4882a593Smuzhiyun 	BUILD_BUG_ON_MSG(						\
67*4882a593Smuzhiyun 		(p) > (n),						\
68*4882a593Smuzhiyun 		#f " called with too many arguments (" #p ">" #n ")");	\
69*4882a593Smuzhiyun })
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #ifdef CONFIG_X86_32
72*4882a593Smuzhiyun #define arch_efi_call_virt_setup()					\
73*4882a593Smuzhiyun ({									\
74*4882a593Smuzhiyun 	kernel_fpu_begin();						\
75*4882a593Smuzhiyun 	firmware_restrict_branch_speculation_start();			\
76*4882a593Smuzhiyun })
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define arch_efi_call_virt_teardown()					\
79*4882a593Smuzhiyun ({									\
80*4882a593Smuzhiyun 	firmware_restrict_branch_speculation_end();			\
81*4882a593Smuzhiyun 	kernel_fpu_end();						\
82*4882a593Smuzhiyun })
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define arch_efi_call_virt(p, f, args...)	p->f(args)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #else /* !CONFIG_X86_32 */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define EFI_LOADER_SIGNATURE	"EL64"
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun extern asmlinkage u64 __efi_call(void *fp, ...);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define efi_call(...) ({						\
93*4882a593Smuzhiyun 	__efi_nargs_check(efi_call, 7, __VA_ARGS__);			\
94*4882a593Smuzhiyun 	__efi_call(__VA_ARGS__);					\
95*4882a593Smuzhiyun })
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * struct efi_scratch - Scratch space used while switching to/from efi_mm
99*4882a593Smuzhiyun  * @phys_stack: stack used during EFI Mixed Mode
100*4882a593Smuzhiyun  * @prev_mm:    store/restore stolen mm_struct while switching to/from efi_mm
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun struct efi_scratch {
103*4882a593Smuzhiyun 	u64			phys_stack;
104*4882a593Smuzhiyun 	struct mm_struct	*prev_mm;
105*4882a593Smuzhiyun } __packed;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define arch_efi_call_virt_setup()					\
108*4882a593Smuzhiyun ({									\
109*4882a593Smuzhiyun 	efi_sync_low_kernel_mappings();					\
110*4882a593Smuzhiyun 	kernel_fpu_begin();						\
111*4882a593Smuzhiyun 	firmware_restrict_branch_speculation_start();			\
112*4882a593Smuzhiyun 	efi_switch_mm(&efi_mm);						\
113*4882a593Smuzhiyun })
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define arch_efi_call_virt(p, f, args...)				\
116*4882a593Smuzhiyun 	efi_call((void *)p->f, args)					\
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define arch_efi_call_virt_teardown()					\
119*4882a593Smuzhiyun ({									\
120*4882a593Smuzhiyun 	efi_switch_mm(efi_scratch.prev_mm);				\
121*4882a593Smuzhiyun 	firmware_restrict_branch_speculation_end();			\
122*4882a593Smuzhiyun 	kernel_fpu_end();						\
123*4882a593Smuzhiyun })
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #ifdef CONFIG_KASAN
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
128*4882a593Smuzhiyun  * only in kernel binary.  Since the EFI stub linked into a separate binary it
129*4882a593Smuzhiyun  * doesn't have __memset().  So we should use standard memset from
130*4882a593Smuzhiyun  * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
131*4882a593Smuzhiyun  */
132*4882a593Smuzhiyun #undef memcpy
133*4882a593Smuzhiyun #undef memset
134*4882a593Smuzhiyun #undef memmove
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #endif /* CONFIG_X86_32 */
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun extern struct efi_scratch efi_scratch;
140*4882a593Smuzhiyun extern int __init efi_memblock_x86_reserve_range(void);
141*4882a593Smuzhiyun extern void __init efi_print_memmap(void);
142*4882a593Smuzhiyun extern void __init efi_map_region(efi_memory_desc_t *md);
143*4882a593Smuzhiyun extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
144*4882a593Smuzhiyun extern void efi_sync_low_kernel_mappings(void);
145*4882a593Smuzhiyun extern int __init efi_alloc_page_tables(void);
146*4882a593Smuzhiyun extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
147*4882a593Smuzhiyun extern void __init efi_runtime_update_mappings(void);
148*4882a593Smuzhiyun extern void __init efi_dump_pagetable(void);
149*4882a593Smuzhiyun extern void __init efi_apply_memmap_quirks(void);
150*4882a593Smuzhiyun extern int __init efi_reuse_config(u64 tables, int nr_tables);
151*4882a593Smuzhiyun extern void efi_delete_dummy_variable(void);
152*4882a593Smuzhiyun extern void efi_switch_mm(struct mm_struct *mm);
153*4882a593Smuzhiyun extern void efi_recover_from_page_fault(unsigned long phys_addr);
154*4882a593Smuzhiyun extern void efi_free_boot_services(void);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* kexec external ABI */
157*4882a593Smuzhiyun struct efi_setup_data {
158*4882a593Smuzhiyun 	u64 fw_vendor;
159*4882a593Smuzhiyun 	u64 __unused;
160*4882a593Smuzhiyun 	u64 tables;
161*4882a593Smuzhiyun 	u64 smbios;
162*4882a593Smuzhiyun 	u64 reserved[8];
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun extern u64 efi_setup;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #ifdef CONFIG_EFI
168*4882a593Smuzhiyun extern efi_status_t __efi64_thunk(u32, ...);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define efi64_thunk(...) ({						\
171*4882a593Smuzhiyun 	__efi_nargs_check(efi64_thunk, 6, __VA_ARGS__);			\
172*4882a593Smuzhiyun 	__efi64_thunk(__VA_ARGS__);					\
173*4882a593Smuzhiyun })
174*4882a593Smuzhiyun 
efi_is_mixed(void)175*4882a593Smuzhiyun static inline bool efi_is_mixed(void)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_EFI_MIXED))
178*4882a593Smuzhiyun 		return false;
179*4882a593Smuzhiyun 	return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
efi_runtime_supported(void)182*4882a593Smuzhiyun static inline bool efi_runtime_supported(void)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
185*4882a593Smuzhiyun 		return true;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	return IS_ENABLED(CONFIG_EFI_MIXED);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun extern void parse_efi_setup(u64 phys_addr, u32 data_len);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun extern void efi_thunk_runtime_setup(void);
195*4882a593Smuzhiyun efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
196*4882a593Smuzhiyun 					 unsigned long descriptor_size,
197*4882a593Smuzhiyun 					 u32 descriptor_version,
198*4882a593Smuzhiyun 					 efi_memory_desc_t *virtual_map,
199*4882a593Smuzhiyun 					 unsigned long systab_phys);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /* arch specific definitions used by the stub code */
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #ifdef CONFIG_EFI_MIXED
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun #define ARCH_HAS_EFISTUB_WRAPPERS
206*4882a593Smuzhiyun 
efi_is_64bit(void)207*4882a593Smuzhiyun static inline bool efi_is_64bit(void)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	extern const bool efi_is64;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return efi_is64;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
efi_is_native(void)214*4882a593Smuzhiyun static inline bool efi_is_native(void)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_X86_64))
217*4882a593Smuzhiyun 		return true;
218*4882a593Smuzhiyun 	return efi_is_64bit();
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun #define efi_mixed_mode_cast(attr)					\
222*4882a593Smuzhiyun 	__builtin_choose_expr(						\
223*4882a593Smuzhiyun 		__builtin_types_compatible_p(u32, __typeof__(attr)),	\
224*4882a593Smuzhiyun 			(unsigned long)(attr), (attr))
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #define efi_table_attr(inst, attr)					\
227*4882a593Smuzhiyun 	(efi_is_native()						\
228*4882a593Smuzhiyun 		? inst->attr						\
229*4882a593Smuzhiyun 		: (__typeof__(inst->attr))				\
230*4882a593Smuzhiyun 			efi_mixed_mode_cast(inst->mixed_mode.attr))
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun  * The following macros allow translating arguments if necessary from native to
234*4882a593Smuzhiyun  * mixed mode. The use case for this is to initialize the upper 32 bits of
235*4882a593Smuzhiyun  * output parameters, and where the 32-bit method requires a 64-bit argument,
236*4882a593Smuzhiyun  * which must be split up into two arguments to be thunked properly.
237*4882a593Smuzhiyun  *
238*4882a593Smuzhiyun  * As examples, the AllocatePool boot service returns the address of the
239*4882a593Smuzhiyun  * allocation, but it will not set the high 32 bits of the address. To ensure
240*4882a593Smuzhiyun  * that the full 64-bit address is initialized, we zero-init the address before
241*4882a593Smuzhiyun  * calling the thunk.
242*4882a593Smuzhiyun  *
243*4882a593Smuzhiyun  * The FreePages boot service takes a 64-bit physical address even in 32-bit
244*4882a593Smuzhiyun  * mode. For the thunk to work correctly, a native 64-bit call of
245*4882a593Smuzhiyun  * 	free_pages(addr, size)
246*4882a593Smuzhiyun  * must be translated to
247*4882a593Smuzhiyun  * 	efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
248*4882a593Smuzhiyun  * so that the two 32-bit halves of addr get pushed onto the stack separately.
249*4882a593Smuzhiyun  */
250*4882a593Smuzhiyun 
efi64_zero_upper(void * p)251*4882a593Smuzhiyun static inline void *efi64_zero_upper(void *p)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	((u32 *)p)[1] = 0;
254*4882a593Smuzhiyun 	return p;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
efi64_convert_status(efi_status_t status)257*4882a593Smuzhiyun static inline u32 efi64_convert_status(efi_status_t status)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	return (u32)(status | (u64)status >> 32);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #define __efi64_argmap_free_pages(addr, size)				\
263*4882a593Smuzhiyun 	((addr), 0, (size))
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)	\
266*4882a593Smuzhiyun 	((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun #define __efi64_argmap_allocate_pool(type, size, buffer)		\
269*4882a593Smuzhiyun 	((type), (size), efi64_zero_upper(buffer))
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define __efi64_argmap_create_event(type, tpl, f, c, event)		\
272*4882a593Smuzhiyun 	((type), (tpl), (f), (c), efi64_zero_upper(event))
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #define __efi64_argmap_set_timer(event, type, time)			\
275*4882a593Smuzhiyun 	((event), (type), lower_32_bits(time), upper_32_bits(time))
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #define __efi64_argmap_wait_for_event(num, event, index)		\
278*4882a593Smuzhiyun 	((num), (event), efi64_zero_upper(index))
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define __efi64_argmap_handle_protocol(handle, protocol, interface)	\
281*4882a593Smuzhiyun 	((handle), (protocol), efi64_zero_upper(interface))
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #define __efi64_argmap_locate_protocol(protocol, reg, interface)	\
284*4882a593Smuzhiyun 	((protocol), (reg), efi64_zero_upper(interface))
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define __efi64_argmap_locate_device_path(protocol, path, handle)	\
287*4882a593Smuzhiyun 	((protocol), (path), efi64_zero_upper(handle))
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define __efi64_argmap_exit(handle, status, size, data)			\
290*4882a593Smuzhiyun 	((handle), efi64_convert_status(status), (size), (data))
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* PCI I/O */
293*4882a593Smuzhiyun #define __efi64_argmap_get_location(protocol, seg, bus, dev, func)	\
294*4882a593Smuzhiyun 	((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),	\
295*4882a593Smuzhiyun 	 efi64_zero_upper(dev), efi64_zero_upper(func))
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /* LoadFile */
298*4882a593Smuzhiyun #define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf)	\
299*4882a593Smuzhiyun 	((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* Graphics Output Protocol */
302*4882a593Smuzhiyun #define __efi64_argmap_query_mode(gop, mode, size, info)		\
303*4882a593Smuzhiyun 	((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info))
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun  * The macros below handle the plumbing for the argument mapping. To add a
307*4882a593Smuzhiyun  * mapping for a specific EFI method, simply define a macro
308*4882a593Smuzhiyun  * __efi64_argmap_<method name>, following the examples above.
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #define __efi64_thunk_map(inst, func, ...)				\
312*4882a593Smuzhiyun 	efi64_thunk(inst->mixed_mode.func,				\
313*4882a593Smuzhiyun 		__efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),	\
314*4882a593Smuzhiyun 			       (__VA_ARGS__)))
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #define __efi64_argmap(mapped, args)					\
317*4882a593Smuzhiyun 	__PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
318*4882a593Smuzhiyun #define __efi64_argmap__0(mapped, args) __efi_eval mapped
319*4882a593Smuzhiyun #define __efi64_argmap__1(mapped, args) __efi_eval args
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun #define __efi_eat(...)
322*4882a593Smuzhiyun #define __efi_eval(...) __VA_ARGS__
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* The three macros below handle dispatching via the thunk if needed */
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #define efi_call_proto(inst, func, ...)					\
327*4882a593Smuzhiyun 	(efi_is_native()						\
328*4882a593Smuzhiyun 		? inst->func(inst, ##__VA_ARGS__)			\
329*4882a593Smuzhiyun 		: __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__))
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun #define efi_bs_call(func, ...)						\
332*4882a593Smuzhiyun 	(efi_is_native()						\
333*4882a593Smuzhiyun 		? efi_system_table->boottime->func(__VA_ARGS__)		\
334*4882a593Smuzhiyun 		: __efi64_thunk_map(efi_table_attr(efi_system_table,	\
335*4882a593Smuzhiyun 						   boottime),		\
336*4882a593Smuzhiyun 				    func, __VA_ARGS__))
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun #define efi_rt_call(func, ...)						\
339*4882a593Smuzhiyun 	(efi_is_native()						\
340*4882a593Smuzhiyun 		? efi_system_table->runtime->func(__VA_ARGS__)		\
341*4882a593Smuzhiyun 		: __efi64_thunk_map(efi_table_attr(efi_system_table,	\
342*4882a593Smuzhiyun 						   runtime),		\
343*4882a593Smuzhiyun 				    func, __VA_ARGS__))
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun #else /* CONFIG_EFI_MIXED */
346*4882a593Smuzhiyun 
efi_is_64bit(void)347*4882a593Smuzhiyun static inline bool efi_is_64bit(void)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	return IS_ENABLED(CONFIG_X86_64);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun #endif /* CONFIG_EFI_MIXED */
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun extern bool efi_reboot_required(void);
355*4882a593Smuzhiyun extern bool efi_is_table_address(unsigned long phys_addr);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun extern void efi_find_mirror(void);
358*4882a593Smuzhiyun extern void efi_reserve_boot_services(void);
359*4882a593Smuzhiyun #else
parse_efi_setup(u64 phys_addr,u32 data_len)360*4882a593Smuzhiyun static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
efi_reboot_required(void)361*4882a593Smuzhiyun static inline bool efi_reboot_required(void)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	return false;
364*4882a593Smuzhiyun }
efi_is_table_address(unsigned long phys_addr)365*4882a593Smuzhiyun static inline  bool efi_is_table_address(unsigned long phys_addr)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	return false;
368*4882a593Smuzhiyun }
efi_find_mirror(void)369*4882a593Smuzhiyun static inline void efi_find_mirror(void)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun }
efi_reserve_boot_services(void)372*4882a593Smuzhiyun static inline void efi_reserve_boot_services(void)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun #endif /* CONFIG_EFI */
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun #ifdef CONFIG_EFI_FAKE_MEMMAP
378*4882a593Smuzhiyun extern void __init efi_fake_memmap_early(void);
379*4882a593Smuzhiyun #else
efi_fake_memmap_early(void)380*4882a593Smuzhiyun static inline void efi_fake_memmap_early(void)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun #endif
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun #endif /* _ASM_X86_EFI_H */
386