1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Extensible Firmware Interface
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on Extensible Firmware Interface Specification version 2.4
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2013, 2014 Linaro Ltd.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/efi.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/efi.h>
14*4882a593Smuzhiyun
region_is_misaligned(const efi_memory_desc_t * md)15*4882a593Smuzhiyun static bool region_is_misaligned(const efi_memory_desc_t *md)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun if (PAGE_SIZE == EFI_PAGE_SIZE)
18*4882a593Smuzhiyun return false;
19*4882a593Smuzhiyun return !PAGE_ALIGNED(md->phys_addr) ||
20*4882a593Smuzhiyun !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
25*4882a593Smuzhiyun * executable, everything else can be mapped with the XN bits
26*4882a593Smuzhiyun * set. Also take the new (optional) RO/XP bits into account.
27*4882a593Smuzhiyun */
create_mapping_protection(efi_memory_desc_t * md)28*4882a593Smuzhiyun static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun u64 attr = md->attribute;
31*4882a593Smuzhiyun u32 type = md->type;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (type == EFI_MEMORY_MAPPED_IO)
34*4882a593Smuzhiyun return PROT_DEVICE_nGnRE;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (region_is_misaligned(md)) {
37*4882a593Smuzhiyun static bool __initdata code_is_misaligned;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Regions that are not aligned to the OS page size cannot be
41*4882a593Smuzhiyun * mapped with strict permissions, as those might interfere
42*4882a593Smuzhiyun * with the permissions that are needed by the adjacent
43*4882a593Smuzhiyun * region's mapping. However, if we haven't encountered any
44*4882a593Smuzhiyun * misaligned runtime code regions so far, we can safely use
45*4882a593Smuzhiyun * non-executable permissions for non-code regions.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
50*4882a593Smuzhiyun : pgprot_val(PAGE_KERNEL);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* R-- */
54*4882a593Smuzhiyun if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
55*4882a593Smuzhiyun (EFI_MEMORY_XP | EFI_MEMORY_RO))
56*4882a593Smuzhiyun return pgprot_val(PAGE_KERNEL_RO);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* R-X */
59*4882a593Smuzhiyun if (attr & EFI_MEMORY_RO)
60*4882a593Smuzhiyun return pgprot_val(PAGE_KERNEL_ROX);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* RW- */
63*4882a593Smuzhiyun if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
64*4882a593Smuzhiyun EFI_MEMORY_XP) ||
65*4882a593Smuzhiyun type != EFI_RUNTIME_SERVICES_CODE)
66*4882a593Smuzhiyun return pgprot_val(PAGE_KERNEL);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* RWX */
69*4882a593Smuzhiyun return pgprot_val(PAGE_KERNEL_EXEC);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* we will fill this structure from the stub, so don't put it in .bss */
73*4882a593Smuzhiyun struct screen_info screen_info __section(".data");
74*4882a593Smuzhiyun
efi_create_mapping(struct mm_struct * mm,efi_memory_desc_t * md)75*4882a593Smuzhiyun int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun pteval_t prot_val = create_mapping_protection(md);
78*4882a593Smuzhiyun bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
79*4882a593Smuzhiyun md->type == EFI_RUNTIME_SERVICES_DATA);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * If this region is not aligned to the page size used by the OS, the
83*4882a593Smuzhiyun * mapping will be rounded outwards, and may end up sharing a page
84*4882a593Smuzhiyun * frame with an adjacent runtime memory region. Given that the page
85*4882a593Smuzhiyun * table descriptor covering the shared page will be rewritten when the
86*4882a593Smuzhiyun * adjacent region gets mapped, we must avoid block mappings here so we
87*4882a593Smuzhiyun * don't have to worry about splitting them when that happens.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun if (region_is_misaligned(md))
90*4882a593Smuzhiyun page_mappings_only = true;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
93*4882a593Smuzhiyun md->num_pages << EFI_PAGE_SHIFT,
94*4882a593Smuzhiyun __pgprot(prot_val | PTE_NG), page_mappings_only);
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
set_permissions(pte_t * ptep,unsigned long addr,void * data)98*4882a593Smuzhiyun static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun efi_memory_desc_t *md = data;
101*4882a593Smuzhiyun pte_t pte = READ_ONCE(*ptep);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_RO)
104*4882a593Smuzhiyun pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
105*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_XP)
106*4882a593Smuzhiyun pte = set_pte_bit(pte, __pgprot(PTE_PXN));
107*4882a593Smuzhiyun set_pte(ptep, pte);
108*4882a593Smuzhiyun return 0;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
efi_set_mapping_permissions(struct mm_struct * mm,efi_memory_desc_t * md)111*4882a593Smuzhiyun int __init efi_set_mapping_permissions(struct mm_struct *mm,
112*4882a593Smuzhiyun efi_memory_desc_t *md)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
115*4882a593Smuzhiyun md->type != EFI_RUNTIME_SERVICES_DATA);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (region_is_misaligned(md))
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Calling apply_to_page_range() is only safe on regions that are
122*4882a593Smuzhiyun * guaranteed to be mapped down to pages. Since we are only called
123*4882a593Smuzhiyun * for regions that have been mapped using efi_create_mapping() above
124*4882a593Smuzhiyun * (and this is checked by the generic Memory Attributes table parsing
125*4882a593Smuzhiyun * routines), there is no need to check that again here.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun return apply_to_page_range(mm, md->virt_addr,
128*4882a593Smuzhiyun md->num_pages << EFI_PAGE_SHIFT,
129*4882a593Smuzhiyun set_permissions, md);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * UpdateCapsule() depends on the system being shutdown via
134*4882a593Smuzhiyun * ResetSystem().
135*4882a593Smuzhiyun */
efi_poweroff_required(void)136*4882a593Smuzhiyun bool efi_poweroff_required(void)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return efi_enabled(EFI_RUNTIME_SERVICES);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
efi_handle_corrupted_x18(efi_status_t s,const char * f)141*4882a593Smuzhiyun asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
144*4882a593Smuzhiyun return s;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun DEFINE_SPINLOCK(efi_rt_lock);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun asmlinkage u64 *efi_rt_stack_top __ro_after_init;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* EFI requires 8 KiB of stack space for runtime services */
152*4882a593Smuzhiyun static_assert(THREAD_SIZE >= SZ_8K);
153*4882a593Smuzhiyun
arm64_efi_rt_init(void)154*4882a593Smuzhiyun static int __init arm64_efi_rt_init(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun void *p;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (!efi_enabled(EFI_RUNTIME_SERVICES))
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
162*4882a593Smuzhiyun NUMA_NO_NODE, &&l);
163*4882a593Smuzhiyun l: if (!p) {
164*4882a593Smuzhiyun pr_warn("Failed to allocate EFI runtime stack\n");
165*4882a593Smuzhiyun clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
166*4882a593Smuzhiyun return -ENOMEM;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun efi_rt_stack_top = p + THREAD_SIZE;
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun core_initcall(arm64_efi_rt_init);
173