1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_EFI_H
3*4882a593Smuzhiyun #define _ASM_EFI_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/boot.h>
6*4882a593Smuzhiyun #include <asm/cpufeature.h>
7*4882a593Smuzhiyun #include <asm/fpsimd.h>
8*4882a593Smuzhiyun #include <asm/io.h>
9*4882a593Smuzhiyun #include <asm/memory.h>
10*4882a593Smuzhiyun #include <asm/mmu_context.h>
11*4882a593Smuzhiyun #include <asm/neon.h>
12*4882a593Smuzhiyun #include <asm/ptrace.h>
13*4882a593Smuzhiyun #include <asm/tlbflush.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifdef CONFIG_EFI
16*4882a593Smuzhiyun extern void efi_init(void);
17*4882a593Smuzhiyun #else
18*4882a593Smuzhiyun #define efi_init()
19*4882a593Smuzhiyun #endif
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
22*4882a593Smuzhiyun int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define arch_efi_call_virt_setup() \
25*4882a593Smuzhiyun ({ \
26*4882a593Smuzhiyun efi_virtmap_load(); \
27*4882a593Smuzhiyun __efi_fpsimd_begin(); \
28*4882a593Smuzhiyun spin_lock(&efi_rt_lock); \
29*4882a593Smuzhiyun })
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define arch_efi_call_virt(p, f, args...) \
32*4882a593Smuzhiyun ({ \
33*4882a593Smuzhiyun efi_##f##_t *__f; \
34*4882a593Smuzhiyun __f = p->f; \
35*4882a593Smuzhiyun __efi_rt_asm_wrapper(__f, #f, args); \
36*4882a593Smuzhiyun })
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define arch_efi_call_virt_teardown() \
39*4882a593Smuzhiyun ({ \
40*4882a593Smuzhiyun spin_unlock(&efi_rt_lock); \
41*4882a593Smuzhiyun __efi_fpsimd_end(); \
42*4882a593Smuzhiyun efi_virtmap_unload(); \
43*4882a593Smuzhiyun })
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun extern spinlock_t efi_rt_lock;
46*4882a593Smuzhiyun efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not.
52*4882a593Smuzhiyun * And EFI shouldn't really play around with priority masking as it is not aware
53*4882a593Smuzhiyun * which priorities the OS has assigned to its interrupts.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define arch_efi_save_flags(state_flags) \
56*4882a593Smuzhiyun ((void)((state_flags) = read_sysreg(daif)))
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* arch specific definitions used by the stub code */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * In some configurations (e.g. VMAP_STACK && 64K pages), stacks built into the
65*4882a593Smuzhiyun * kernel need greater alignment than we require the segments to be padded to.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun #define EFI_KIMG_ALIGN \
68*4882a593Smuzhiyun (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* on arm64, the FDT may be located anywhere in system RAM */
efi_get_max_fdt_addr(unsigned long image_addr)71*4882a593Smuzhiyun static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun return ULONG_MAX;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * On arm64, we have to ensure that the initrd ends up in the linear region,
78*4882a593Smuzhiyun * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
79*4882a593Smuzhiyun * guaranteed to cover the kernel Image.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Since the EFI stub is part of the kernel Image, we can relax the
82*4882a593Smuzhiyun * usual requirements in Documentation/arm64/booting.rst, which still
83*4882a593Smuzhiyun * apply to other bootloaders, and are required for some kernel
84*4882a593Smuzhiyun * configurations.
85*4882a593Smuzhiyun */
efi_get_max_initrd_addr(unsigned long image_addr)86*4882a593Smuzhiyun static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define alloc_screen_info(x...) &screen_info
92*4882a593Smuzhiyun
free_screen_info(struct screen_info * si)93*4882a593Smuzhiyun static inline void free_screen_info(struct screen_info *si)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
efifb_setup_from_dmi(struct screen_info * si,const char * opt)97*4882a593Smuzhiyun static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define EFI_ALLOC_ALIGN SZ_64K
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * On ARM systems, virtually remapped UEFI runtime services are set up in two
105*4882a593Smuzhiyun * distinct stages:
106*4882a593Smuzhiyun * - The stub retrieves the final version of the memory map from UEFI, populates
107*4882a593Smuzhiyun * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
108*4882a593Smuzhiyun * service to communicate the new mapping to the firmware (Note that the new
109*4882a593Smuzhiyun * mapping is not live at this time)
110*4882a593Smuzhiyun * - During an early initcall(), the EFI system table is permanently remapped
111*4882a593Smuzhiyun * and the virtual remapping of the UEFI Runtime Services regions is loaded
112*4882a593Smuzhiyun * into a private set of page tables. If this all succeeds, the Runtime
113*4882a593Smuzhiyun * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun
efi_set_pgd(struct mm_struct * mm)116*4882a593Smuzhiyun static inline void efi_set_pgd(struct mm_struct *mm)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun __switch_mm(mm);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (system_uses_ttbr0_pan()) {
121*4882a593Smuzhiyun if (mm != current->active_mm) {
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Update the current thread's saved ttbr0 since it is
124*4882a593Smuzhiyun * restored as part of a return from exception. Enable
125*4882a593Smuzhiyun * access to the valid TTBR0_EL1 and invoke the errata
126*4882a593Smuzhiyun * workaround directly since there is no return from
127*4882a593Smuzhiyun * exception when invoking the EFI run-time services.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun update_saved_ttbr0(current, mm);
130*4882a593Smuzhiyun uaccess_ttbr0_enable();
131*4882a593Smuzhiyun post_ttbr_update_workaround();
132*4882a593Smuzhiyun } else {
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Defer the switch to the current thread's TTBR0_EL1
135*4882a593Smuzhiyun * until uaccess_enable(). Restore the current
136*4882a593Smuzhiyun * thread's saved ttbr0 corresponding to its active_mm
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun uaccess_ttbr0_disable();
139*4882a593Smuzhiyun update_saved_ttbr0(current, current->active_mm);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun void efi_virtmap_load(void);
145*4882a593Smuzhiyun void efi_virtmap_unload(void);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #endif /* _ASM_EFI_H */
148