1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * x86_64 specific EFI support functions
4*4882a593Smuzhiyun * Based on Extensible Firmware Interface Specification version 1.0
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2005-2008 Intel Co.
7*4882a593Smuzhiyun * Fenghua Yu <fenghua.yu@intel.com>
8*4882a593Smuzhiyun * Bibo Mao <bibo.mao@intel.com>
9*4882a593Smuzhiyun * Chandramouli Narayanan <mouli@linux.intel.com>
10*4882a593Smuzhiyun * Huang Ying <ying.huang@intel.com>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Code to convert EFI to E820 map has been implemented in elilo bootloader
13*4882a593Smuzhiyun * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
14*4882a593Smuzhiyun * is setup appropriately for EFI runtime code.
15*4882a593Smuzhiyun * - mouli 06/14/2007.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define pr_fmt(fmt) "efi: " fmt
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/mm.h>
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <linux/spinlock.h>
26*4882a593Smuzhiyun #include <linux/memblock.h>
27*4882a593Smuzhiyun #include <linux/ioport.h>
28*4882a593Smuzhiyun #include <linux/mc146818rtc.h>
29*4882a593Smuzhiyun #include <linux/efi.h>
30*4882a593Smuzhiyun #include <linux/export.h>
31*4882a593Smuzhiyun #include <linux/uaccess.h>
32*4882a593Smuzhiyun #include <linux/io.h>
33*4882a593Smuzhiyun #include <linux/reboot.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/ucs2_string.h>
36*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
37*4882a593Smuzhiyun #include <linux/sched/task.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <asm/setup.h>
40*4882a593Smuzhiyun #include <asm/page.h>
41*4882a593Smuzhiyun #include <asm/e820/api.h>
42*4882a593Smuzhiyun #include <asm/tlbflush.h>
43*4882a593Smuzhiyun #include <asm/proto.h>
44*4882a593Smuzhiyun #include <asm/efi.h>
45*4882a593Smuzhiyun #include <asm/cacheflush.h>
46*4882a593Smuzhiyun #include <asm/fixmap.h>
47*4882a593Smuzhiyun #include <asm/realmode.h>
48*4882a593Smuzhiyun #include <asm/time.h>
49*4882a593Smuzhiyun #include <asm/pgalloc.h>
50*4882a593Smuzhiyun #include <asm/sev-es.h>
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * We allocate runtime services regions top-down, starting from -4G, i.e.
54*4882a593Smuzhiyun * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun static u64 efi_va = EFI_VA_START;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct efi_scratch efi_scratch;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(efi_mm);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * We need our own copy of the higher levels of the page tables
64*4882a593Smuzhiyun * because we want to avoid inserting EFI region mappings (EFI_VA_END
65*4882a593Smuzhiyun * to EFI_VA_START) into the standard kernel page tables. Everything
66*4882a593Smuzhiyun * else can be shared, see efi_sync_low_kernel_mappings().
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
69*4882a593Smuzhiyun * allocation.
70*4882a593Smuzhiyun */
efi_alloc_page_tables(void)71*4882a593Smuzhiyun int __init efi_alloc_page_tables(void)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun pgd_t *pgd, *efi_pgd;
74*4882a593Smuzhiyun p4d_t *p4d;
75*4882a593Smuzhiyun pud_t *pud;
76*4882a593Smuzhiyun gfp_t gfp_mask;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun gfp_mask = GFP_KERNEL | __GFP_ZERO;
79*4882a593Smuzhiyun efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
80*4882a593Smuzhiyun if (!efi_pgd)
81*4882a593Smuzhiyun goto fail;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun pgd = efi_pgd + pgd_index(EFI_VA_END);
84*4882a593Smuzhiyun p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
85*4882a593Smuzhiyun if (!p4d)
86*4882a593Smuzhiyun goto free_pgd;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
89*4882a593Smuzhiyun if (!pud)
90*4882a593Smuzhiyun goto free_p4d;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun efi_mm.pgd = efi_pgd;
93*4882a593Smuzhiyun mm_init_cpumask(&efi_mm);
94*4882a593Smuzhiyun init_new_context(NULL, &efi_mm);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun free_p4d:
99*4882a593Smuzhiyun if (pgtable_l5_enabled())
100*4882a593Smuzhiyun free_page((unsigned long)pgd_page_vaddr(*pgd));
101*4882a593Smuzhiyun free_pgd:
102*4882a593Smuzhiyun free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
103*4882a593Smuzhiyun fail:
104*4882a593Smuzhiyun return -ENOMEM;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * Add low kernel mappings for passing arguments to EFI functions.
109*4882a593Smuzhiyun */
efi_sync_low_kernel_mappings(void)110*4882a593Smuzhiyun void efi_sync_low_kernel_mappings(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned num_entries;
113*4882a593Smuzhiyun pgd_t *pgd_k, *pgd_efi;
114*4882a593Smuzhiyun p4d_t *p4d_k, *p4d_efi;
115*4882a593Smuzhiyun pud_t *pud_k, *pud_efi;
116*4882a593Smuzhiyun pgd_t *efi_pgd = efi_mm.pgd;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
119*4882a593Smuzhiyun pgd_k = pgd_offset_k(PAGE_OFFSET);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
122*4882a593Smuzhiyun memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
125*4882a593Smuzhiyun pgd_k = pgd_offset_k(EFI_VA_END);
126*4882a593Smuzhiyun p4d_efi = p4d_offset(pgd_efi, 0);
127*4882a593Smuzhiyun p4d_k = p4d_offset(pgd_k, 0);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun num_entries = p4d_index(EFI_VA_END);
130*4882a593Smuzhiyun memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * We share all the PUD entries apart from those that map the
134*4882a593Smuzhiyun * EFI regions. Copy around them.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
137*4882a593Smuzhiyun BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
140*4882a593Smuzhiyun p4d_k = p4d_offset(pgd_k, EFI_VA_END);
141*4882a593Smuzhiyun pud_efi = pud_offset(p4d_efi, 0);
142*4882a593Smuzhiyun pud_k = pud_offset(p4d_k, 0);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun num_entries = pud_index(EFI_VA_END);
145*4882a593Smuzhiyun memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun pud_efi = pud_offset(p4d_efi, EFI_VA_START);
148*4882a593Smuzhiyun pud_k = pud_offset(p4d_k, EFI_VA_START);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
151*4882a593Smuzhiyun memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Wrapper for slow_virt_to_phys() that handles NULL addresses.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun static inline phys_addr_t
virt_to_phys_or_null_size(void * va,unsigned long size)158*4882a593Smuzhiyun virt_to_phys_or_null_size(void *va, unsigned long size)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun phys_addr_t pa;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!va)
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (virt_addr_valid(va))
166*4882a593Smuzhiyun return virt_to_phys(va);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun pa = slow_virt_to_phys(va);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* check if the object crosses a page boundary */
171*4882a593Smuzhiyun if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return pa;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #define virt_to_phys_or_null(addr) \
178*4882a593Smuzhiyun virt_to_phys_or_null_size((addr), sizeof(*(addr)))
179*4882a593Smuzhiyun
efi_setup_page_tables(unsigned long pa_memmap,unsigned num_pages)180*4882a593Smuzhiyun int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun unsigned long pfn, text, pf, rodata;
183*4882a593Smuzhiyun struct page *page;
184*4882a593Smuzhiyun unsigned npages;
185*4882a593Smuzhiyun pgd_t *pgd = efi_mm.pgd;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * It can happen that the physical address of new_memmap lands in memory
189*4882a593Smuzhiyun * which is not mapped in the EFI page table. Therefore we need to go
190*4882a593Smuzhiyun * and ident-map those pages containing the map before calling
191*4882a593Smuzhiyun * phys_efi_set_virtual_address_map().
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun pfn = pa_memmap >> PAGE_SHIFT;
194*4882a593Smuzhiyun pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
195*4882a593Smuzhiyun if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
196*4882a593Smuzhiyun pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
197*4882a593Smuzhiyun return 1;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun * Certain firmware versions are way too sentimential and still believe
202*4882a593Smuzhiyun * they are exclusive and unquestionable owners of the first physical page,
203*4882a593Smuzhiyun * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
204*4882a593Smuzhiyun * (but then write-access it later during SetVirtualAddressMap()).
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Create a 1:1 mapping for this page, to avoid triple faults during early
207*4882a593Smuzhiyun * boot with such firmware. We are free to hand this page to the BIOS,
208*4882a593Smuzhiyun * as trim_bios_range() will reserve the first page and isolate it away
209*4882a593Smuzhiyun * from memory allocators anyway.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
212*4882a593Smuzhiyun pr_err("Failed to create 1:1 mapping for the first page!\n");
213*4882a593Smuzhiyun return 1;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * When SEV-ES is active, the GHCB as set by the kernel will be used
218*4882a593Smuzhiyun * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun if (sev_es_efi_map_ghcbs(pgd)) {
221*4882a593Smuzhiyun pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
222*4882a593Smuzhiyun return 1;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * When making calls to the firmware everything needs to be 1:1
227*4882a593Smuzhiyun * mapped and addressable with 32-bit pointers. Map the kernel
228*4882a593Smuzhiyun * text and allocate a new stack because we can't rely on the
229*4882a593Smuzhiyun * stack pointer being < 4GB.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (!efi_is_mixed())
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL|__GFP_DMA32);
235*4882a593Smuzhiyun if (!page) {
236*4882a593Smuzhiyun pr_err("Unable to allocate EFI runtime stack < 4GB\n");
237*4882a593Smuzhiyun return 1;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun npages = (_etext - _text) >> PAGE_SHIFT;
243*4882a593Smuzhiyun text = __pa(_text);
244*4882a593Smuzhiyun pfn = text >> PAGE_SHIFT;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun pf = _PAGE_ENC;
247*4882a593Smuzhiyun if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
248*4882a593Smuzhiyun pr_err("Failed to map kernel text 1:1\n");
249*4882a593Smuzhiyun return 1;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
253*4882a593Smuzhiyun rodata = __pa(__start_rodata);
254*4882a593Smuzhiyun pfn = rodata >> PAGE_SHIFT;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun pf = _PAGE_NX | _PAGE_ENC;
257*4882a593Smuzhiyun if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
258*4882a593Smuzhiyun pr_err("Failed to map kernel rodata 1:1\n");
259*4882a593Smuzhiyun return 1;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
__map_region(efi_memory_desc_t * md,u64 va)265*4882a593Smuzhiyun static void __init __map_region(efi_memory_desc_t *md, u64 va)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun unsigned long flags = _PAGE_RW;
268*4882a593Smuzhiyun unsigned long pfn;
269*4882a593Smuzhiyun pgd_t *pgd = efi_mm.pgd;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
273*4882a593Smuzhiyun * executable images in memory that consist of both R-X and
274*4882a593Smuzhiyun * RW- sections, so we cannot apply read-only or non-exec
275*4882a593Smuzhiyun * permissions just yet. However, modern EFI systems provide
276*4882a593Smuzhiyun * a memory attributes table that describes those sections
277*4882a593Smuzhiyun * with the appropriate restricted permissions, which are
278*4882a593Smuzhiyun * applied in efi_runtime_update_mappings() below. All other
279*4882a593Smuzhiyun * regions can be mapped non-executable at this point, with
280*4882a593Smuzhiyun * the exception of boot services code regions, but those will
281*4882a593Smuzhiyun * be unmapped again entirely in efi_free_boot_services().
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun if (md->type != EFI_BOOT_SERVICES_CODE &&
284*4882a593Smuzhiyun md->type != EFI_RUNTIME_SERVICES_CODE)
285*4882a593Smuzhiyun flags |= _PAGE_NX;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (!(md->attribute & EFI_MEMORY_WB))
288*4882a593Smuzhiyun flags |= _PAGE_PCD;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
291*4882a593Smuzhiyun flags |= _PAGE_ENC;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun pfn = md->phys_addr >> PAGE_SHIFT;
294*4882a593Smuzhiyun if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
295*4882a593Smuzhiyun pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
296*4882a593Smuzhiyun md->phys_addr, va);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
efi_map_region(efi_memory_desc_t * md)299*4882a593Smuzhiyun void __init efi_map_region(efi_memory_desc_t *md)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun unsigned long size = md->num_pages << PAGE_SHIFT;
302*4882a593Smuzhiyun u64 pa = md->phys_addr;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Make sure the 1:1 mappings are present as a catch-all for b0rked
306*4882a593Smuzhiyun * firmware which doesn't update all internal pointers after switching
307*4882a593Smuzhiyun * to virtual mode and would otherwise crap on us.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun __map_region(md, md->phys_addr);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Enforce the 1:1 mapping as the default virtual address when
313*4882a593Smuzhiyun * booting in EFI mixed mode, because even though we may be
314*4882a593Smuzhiyun * running a 64-bit kernel, the firmware may only be 32-bit.
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun if (efi_is_mixed()) {
317*4882a593Smuzhiyun md->virt_addr = md->phys_addr;
318*4882a593Smuzhiyun return;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun efi_va -= size;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Is PA 2M-aligned? */
324*4882a593Smuzhiyun if (!(pa & (PMD_SIZE - 1))) {
325*4882a593Smuzhiyun efi_va &= PMD_MASK;
326*4882a593Smuzhiyun } else {
327*4882a593Smuzhiyun u64 pa_offset = pa & (PMD_SIZE - 1);
328*4882a593Smuzhiyun u64 prev_va = efi_va;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* get us the same offset within this 2M page */
331*4882a593Smuzhiyun efi_va = (efi_va & PMD_MASK) + pa_offset;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (efi_va > prev_va)
334*4882a593Smuzhiyun efi_va -= PMD_SIZE;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (efi_va < EFI_VA_END) {
338*4882a593Smuzhiyun pr_warn(FW_WARN "VA address range overflow!\n");
339*4882a593Smuzhiyun return;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Do the VA map */
343*4882a593Smuzhiyun __map_region(md, efi_va);
344*4882a593Smuzhiyun md->virt_addr = efi_va;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
349*4882a593Smuzhiyun * md->virt_addr is the original virtual address which had been mapped in kexec
350*4882a593Smuzhiyun * 1st kernel.
351*4882a593Smuzhiyun */
efi_map_region_fixed(efi_memory_desc_t * md)352*4882a593Smuzhiyun void __init efi_map_region_fixed(efi_memory_desc_t *md)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun __map_region(md, md->phys_addr);
355*4882a593Smuzhiyun __map_region(md, md->virt_addr);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
parse_efi_setup(u64 phys_addr,u32 data_len)358*4882a593Smuzhiyun void __init parse_efi_setup(u64 phys_addr, u32 data_len)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun efi_setup = phys_addr + sizeof(struct setup_data);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
efi_update_mappings(efi_memory_desc_t * md,unsigned long pf)363*4882a593Smuzhiyun static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun unsigned long pfn;
366*4882a593Smuzhiyun pgd_t *pgd = efi_mm.pgd;
367*4882a593Smuzhiyun int err1, err2;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* Update the 1:1 mapping */
370*4882a593Smuzhiyun pfn = md->phys_addr >> PAGE_SHIFT;
371*4882a593Smuzhiyun err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
372*4882a593Smuzhiyun if (err1) {
373*4882a593Smuzhiyun pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
374*4882a593Smuzhiyun md->phys_addr, md->virt_addr);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
378*4882a593Smuzhiyun if (err2) {
379*4882a593Smuzhiyun pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
380*4882a593Smuzhiyun md->phys_addr, md->virt_addr);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return err1 || err2;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
efi_update_mem_attr(struct mm_struct * mm,efi_memory_desc_t * md)386*4882a593Smuzhiyun static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun unsigned long pf = 0;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_XP)
391*4882a593Smuzhiyun pf |= _PAGE_NX;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (!(md->attribute & EFI_MEMORY_RO))
394*4882a593Smuzhiyun pf |= _PAGE_RW;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (sev_active())
397*4882a593Smuzhiyun pf |= _PAGE_ENC;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return efi_update_mappings(md, pf);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
efi_runtime_update_mappings(void)402*4882a593Smuzhiyun void __init efi_runtime_update_mappings(void)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun efi_memory_desc_t *md;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Use the EFI Memory Attribute Table for mapping permissions if it
408*4882a593Smuzhiyun * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun if (efi_enabled(EFI_MEM_ATTR)) {
411*4882a593Smuzhiyun efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
417*4882a593Smuzhiyun * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
418*4882a593Smuzhiyun * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
419*4882a593Smuzhiyun * published by the firmware. Even if we find a buggy implementation of
420*4882a593Smuzhiyun * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
421*4882a593Smuzhiyun * EFI_PROPERTIES_TABLE, because of the same reason.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (!efi_enabled(EFI_NX_PE_DATA))
425*4882a593Smuzhiyun return;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun for_each_efi_memory_desc(md) {
428*4882a593Smuzhiyun unsigned long pf = 0;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (!(md->attribute & EFI_MEMORY_RUNTIME))
431*4882a593Smuzhiyun continue;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (!(md->attribute & EFI_MEMORY_WB))
434*4882a593Smuzhiyun pf |= _PAGE_PCD;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if ((md->attribute & EFI_MEMORY_XP) ||
437*4882a593Smuzhiyun (md->type == EFI_RUNTIME_SERVICES_DATA))
438*4882a593Smuzhiyun pf |= _PAGE_NX;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!(md->attribute & EFI_MEMORY_RO) &&
441*4882a593Smuzhiyun (md->type != EFI_RUNTIME_SERVICES_CODE))
442*4882a593Smuzhiyun pf |= _PAGE_RW;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (sev_active())
445*4882a593Smuzhiyun pf |= _PAGE_ENC;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun efi_update_mappings(md, pf);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
efi_dump_pagetable(void)451*4882a593Smuzhiyun void __init efi_dump_pagetable(void)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun #ifdef CONFIG_EFI_PGT_DUMP
454*4882a593Smuzhiyun ptdump_walk_pgd_level(NULL, &efi_mm);
455*4882a593Smuzhiyun #endif
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /*
459*4882a593Smuzhiyun * Makes the calling thread switch to/from efi_mm context. Can be used
460*4882a593Smuzhiyun * in a kernel thread and user context. Preemption needs to remain disabled
461*4882a593Smuzhiyun * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
462*4882a593Smuzhiyun * can not change under us.
463*4882a593Smuzhiyun * It should be ensured that there are no concurent calls to this function.
464*4882a593Smuzhiyun */
efi_switch_mm(struct mm_struct * mm)465*4882a593Smuzhiyun void efi_switch_mm(struct mm_struct *mm)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun efi_scratch.prev_mm = current->active_mm;
468*4882a593Smuzhiyun current->active_mm = mm;
469*4882a593Smuzhiyun switch_mm(efi_scratch.prev_mm, mm, NULL);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun static DEFINE_SPINLOCK(efi_runtime_lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * DS and ES contain user values. We need to save them.
476*4882a593Smuzhiyun * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
477*4882a593Smuzhiyun * need to save the old SS: __KERNEL_DS is always acceptable.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun #define __efi_thunk(func, ...) \
480*4882a593Smuzhiyun ({ \
481*4882a593Smuzhiyun unsigned short __ds, __es; \
482*4882a593Smuzhiyun efi_status_t ____s; \
483*4882a593Smuzhiyun \
484*4882a593Smuzhiyun savesegment(ds, __ds); \
485*4882a593Smuzhiyun savesegment(es, __es); \
486*4882a593Smuzhiyun \
487*4882a593Smuzhiyun loadsegment(ss, __KERNEL_DS); \
488*4882a593Smuzhiyun loadsegment(ds, __KERNEL_DS); \
489*4882a593Smuzhiyun loadsegment(es, __KERNEL_DS); \
490*4882a593Smuzhiyun \
491*4882a593Smuzhiyun ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
492*4882a593Smuzhiyun \
493*4882a593Smuzhiyun loadsegment(ds, __ds); \
494*4882a593Smuzhiyun loadsegment(es, __es); \
495*4882a593Smuzhiyun \
496*4882a593Smuzhiyun ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
497*4882a593Smuzhiyun ____s; \
498*4882a593Smuzhiyun })
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * Switch to the EFI page tables early so that we can access the 1:1
502*4882a593Smuzhiyun * runtime services mappings which are not mapped in any other page
503*4882a593Smuzhiyun * tables.
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun * Also, disable interrupts because the IDT points to 64-bit handlers,
506*4882a593Smuzhiyun * which aren't going to function correctly when we switch to 32-bit.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun #define efi_thunk(func...) \
509*4882a593Smuzhiyun ({ \
510*4882a593Smuzhiyun efi_status_t __s; \
511*4882a593Smuzhiyun \
512*4882a593Smuzhiyun arch_efi_call_virt_setup(); \
513*4882a593Smuzhiyun \
514*4882a593Smuzhiyun __s = __efi_thunk(func); \
515*4882a593Smuzhiyun \
516*4882a593Smuzhiyun arch_efi_call_virt_teardown(); \
517*4882a593Smuzhiyun \
518*4882a593Smuzhiyun __s; \
519*4882a593Smuzhiyun })
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun static efi_status_t __init __no_sanitize_address
efi_thunk_set_virtual_address_map(unsigned long memory_map_size,unsigned long descriptor_size,u32 descriptor_version,efi_memory_desc_t * virtual_map)522*4882a593Smuzhiyun efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
523*4882a593Smuzhiyun unsigned long descriptor_size,
524*4882a593Smuzhiyun u32 descriptor_version,
525*4882a593Smuzhiyun efi_memory_desc_t *virtual_map)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun efi_status_t status;
528*4882a593Smuzhiyun unsigned long flags;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun efi_sync_low_kernel_mappings();
531*4882a593Smuzhiyun local_irq_save(flags);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun efi_switch_mm(&efi_mm);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun status = __efi_thunk(set_virtual_address_map, memory_map_size,
536*4882a593Smuzhiyun descriptor_size, descriptor_version, virtual_map);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun efi_switch_mm(efi_scratch.prev_mm);
539*4882a593Smuzhiyun local_irq_restore(flags);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return status;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
efi_thunk_get_time(efi_time_t * tm,efi_time_cap_t * tc)544*4882a593Smuzhiyun static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun return EFI_UNSUPPORTED;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
efi_thunk_set_time(efi_time_t * tm)549*4882a593Smuzhiyun static efi_status_t efi_thunk_set_time(efi_time_t *tm)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun return EFI_UNSUPPORTED;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t * enabled,efi_bool_t * pending,efi_time_t * tm)555*4882a593Smuzhiyun efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
556*4882a593Smuzhiyun efi_time_t *tm)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun return EFI_UNSUPPORTED;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled,efi_time_t * tm)562*4882a593Smuzhiyun efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun return EFI_UNSUPPORTED;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
efi_name_size(efi_char16_t * name)567*4882a593Smuzhiyun static unsigned long efi_name_size(efi_char16_t *name)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun static efi_status_t
efi_thunk_get_variable(efi_char16_t * name,efi_guid_t * vendor,u32 * attr,unsigned long * data_size,void * data)573*4882a593Smuzhiyun efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
574*4882a593Smuzhiyun u32 *attr, unsigned long *data_size, void *data)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun u8 buf[24] __aligned(8);
577*4882a593Smuzhiyun efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
578*4882a593Smuzhiyun efi_status_t status;
579*4882a593Smuzhiyun u32 phys_name, phys_vendor, phys_attr;
580*4882a593Smuzhiyun u32 phys_data_size, phys_data;
581*4882a593Smuzhiyun unsigned long flags;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun spin_lock_irqsave(&efi_runtime_lock, flags);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun *vnd = *vendor;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun phys_data_size = virt_to_phys_or_null(data_size);
588*4882a593Smuzhiyun phys_vendor = virt_to_phys_or_null(vnd);
589*4882a593Smuzhiyun phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
590*4882a593Smuzhiyun phys_attr = virt_to_phys_or_null(attr);
591*4882a593Smuzhiyun phys_data = virt_to_phys_or_null_size(data, *data_size);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (!phys_name || (data && !phys_data))
594*4882a593Smuzhiyun status = EFI_INVALID_PARAMETER;
595*4882a593Smuzhiyun else
596*4882a593Smuzhiyun status = efi_thunk(get_variable, phys_name, phys_vendor,
597*4882a593Smuzhiyun phys_attr, phys_data_size, phys_data);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return status;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun static efi_status_t
efi_thunk_set_variable(efi_char16_t * name,efi_guid_t * vendor,u32 attr,unsigned long data_size,void * data)605*4882a593Smuzhiyun efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
606*4882a593Smuzhiyun u32 attr, unsigned long data_size, void *data)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun u8 buf[24] __aligned(8);
609*4882a593Smuzhiyun efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
610*4882a593Smuzhiyun u32 phys_name, phys_vendor, phys_data;
611*4882a593Smuzhiyun efi_status_t status;
612*4882a593Smuzhiyun unsigned long flags;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun spin_lock_irqsave(&efi_runtime_lock, flags);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun *vnd = *vendor;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
619*4882a593Smuzhiyun phys_vendor = virt_to_phys_or_null(vnd);
620*4882a593Smuzhiyun phys_data = virt_to_phys_or_null_size(data, data_size);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (!phys_name || (data && !phys_data))
623*4882a593Smuzhiyun status = EFI_INVALID_PARAMETER;
624*4882a593Smuzhiyun else
625*4882a593Smuzhiyun status = efi_thunk(set_variable, phys_name, phys_vendor,
626*4882a593Smuzhiyun attr, data_size, phys_data);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun return status;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun static efi_status_t
efi_thunk_set_variable_nonblocking(efi_char16_t * name,efi_guid_t * vendor,u32 attr,unsigned long data_size,void * data)634*4882a593Smuzhiyun efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
635*4882a593Smuzhiyun u32 attr, unsigned long data_size,
636*4882a593Smuzhiyun void *data)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun u8 buf[24] __aligned(8);
639*4882a593Smuzhiyun efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
640*4882a593Smuzhiyun u32 phys_name, phys_vendor, phys_data;
641*4882a593Smuzhiyun efi_status_t status;
642*4882a593Smuzhiyun unsigned long flags;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
645*4882a593Smuzhiyun return EFI_NOT_READY;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun *vnd = *vendor;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
650*4882a593Smuzhiyun phys_vendor = virt_to_phys_or_null(vnd);
651*4882a593Smuzhiyun phys_data = virt_to_phys_or_null_size(data, data_size);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (!phys_name || (data && !phys_data))
654*4882a593Smuzhiyun status = EFI_INVALID_PARAMETER;
655*4882a593Smuzhiyun else
656*4882a593Smuzhiyun status = efi_thunk(set_variable, phys_name, phys_vendor,
657*4882a593Smuzhiyun attr, data_size, phys_data);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun return status;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun static efi_status_t
efi_thunk_get_next_variable(unsigned long * name_size,efi_char16_t * name,efi_guid_t * vendor)665*4882a593Smuzhiyun efi_thunk_get_next_variable(unsigned long *name_size,
666*4882a593Smuzhiyun efi_char16_t *name,
667*4882a593Smuzhiyun efi_guid_t *vendor)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun u8 buf[24] __aligned(8);
670*4882a593Smuzhiyun efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
671*4882a593Smuzhiyun efi_status_t status;
672*4882a593Smuzhiyun u32 phys_name_size, phys_name, phys_vendor;
673*4882a593Smuzhiyun unsigned long flags;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun spin_lock_irqsave(&efi_runtime_lock, flags);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun *vnd = *vendor;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun phys_name_size = virt_to_phys_or_null(name_size);
680*4882a593Smuzhiyun phys_vendor = virt_to_phys_or_null(vnd);
681*4882a593Smuzhiyun phys_name = virt_to_phys_or_null_size(name, *name_size);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (!phys_name)
684*4882a593Smuzhiyun status = EFI_INVALID_PARAMETER;
685*4882a593Smuzhiyun else
686*4882a593Smuzhiyun status = efi_thunk(get_next_variable, phys_name_size,
687*4882a593Smuzhiyun phys_name, phys_vendor);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun *vendor = *vnd;
692*4882a593Smuzhiyun return status;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun static efi_status_t
efi_thunk_get_next_high_mono_count(u32 * count)696*4882a593Smuzhiyun efi_thunk_get_next_high_mono_count(u32 *count)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun return EFI_UNSUPPORTED;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun static void
efi_thunk_reset_system(int reset_type,efi_status_t status,unsigned long data_size,efi_char16_t * data)702*4882a593Smuzhiyun efi_thunk_reset_system(int reset_type, efi_status_t status,
703*4882a593Smuzhiyun unsigned long data_size, efi_char16_t *data)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun u32 phys_data;
706*4882a593Smuzhiyun unsigned long flags;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun spin_lock_irqsave(&efi_runtime_lock, flags);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun phys_data = virt_to_phys_or_null_size(data, data_size);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun efi_thunk(reset_system, reset_type, status, data_size, phys_data);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun static efi_status_t
efi_thunk_update_capsule(efi_capsule_header_t ** capsules,unsigned long count,unsigned long sg_list)718*4882a593Smuzhiyun efi_thunk_update_capsule(efi_capsule_header_t **capsules,
719*4882a593Smuzhiyun unsigned long count, unsigned long sg_list)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun * To properly support this function we would need to repackage
723*4882a593Smuzhiyun * 'capsules' because the firmware doesn't understand 64-bit
724*4882a593Smuzhiyun * pointers.
725*4882a593Smuzhiyun */
726*4882a593Smuzhiyun return EFI_UNSUPPORTED;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun static efi_status_t
efi_thunk_query_variable_info(u32 attr,u64 * storage_space,u64 * remaining_space,u64 * max_variable_size)730*4882a593Smuzhiyun efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
731*4882a593Smuzhiyun u64 *remaining_space,
732*4882a593Smuzhiyun u64 *max_variable_size)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun efi_status_t status;
735*4882a593Smuzhiyun u32 phys_storage, phys_remaining, phys_max;
736*4882a593Smuzhiyun unsigned long flags;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
739*4882a593Smuzhiyun return EFI_UNSUPPORTED;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun spin_lock_irqsave(&efi_runtime_lock, flags);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun phys_storage = virt_to_phys_or_null(storage_space);
744*4882a593Smuzhiyun phys_remaining = virt_to_phys_or_null(remaining_space);
745*4882a593Smuzhiyun phys_max = virt_to_phys_or_null(max_variable_size);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun status = efi_thunk(query_variable_info, attr, phys_storage,
748*4882a593Smuzhiyun phys_remaining, phys_max);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun return status;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun static efi_status_t
efi_thunk_query_variable_info_nonblocking(u32 attr,u64 * storage_space,u64 * remaining_space,u64 * max_variable_size)756*4882a593Smuzhiyun efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
757*4882a593Smuzhiyun u64 *remaining_space,
758*4882a593Smuzhiyun u64 *max_variable_size)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun efi_status_t status;
761*4882a593Smuzhiyun u32 phys_storage, phys_remaining, phys_max;
762*4882a593Smuzhiyun unsigned long flags;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
765*4882a593Smuzhiyun return EFI_UNSUPPORTED;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
768*4882a593Smuzhiyun return EFI_NOT_READY;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun phys_storage = virt_to_phys_or_null(storage_space);
771*4882a593Smuzhiyun phys_remaining = virt_to_phys_or_null(remaining_space);
772*4882a593Smuzhiyun phys_max = virt_to_phys_or_null(max_variable_size);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun status = efi_thunk(query_variable_info, attr, phys_storage,
775*4882a593Smuzhiyun phys_remaining, phys_max);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun spin_unlock_irqrestore(&efi_runtime_lock, flags);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return status;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun static efi_status_t
efi_thunk_query_capsule_caps(efi_capsule_header_t ** capsules,unsigned long count,u64 * max_size,int * reset_type)783*4882a593Smuzhiyun efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
784*4882a593Smuzhiyun unsigned long count, u64 *max_size,
785*4882a593Smuzhiyun int *reset_type)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun * To properly support this function we would need to repackage
789*4882a593Smuzhiyun * 'capsules' because the firmware doesn't understand 64-bit
790*4882a593Smuzhiyun * pointers.
791*4882a593Smuzhiyun */
792*4882a593Smuzhiyun return EFI_UNSUPPORTED;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
efi_thunk_runtime_setup(void)795*4882a593Smuzhiyun void __init efi_thunk_runtime_setup(void)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_EFI_MIXED))
798*4882a593Smuzhiyun return;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun efi.get_time = efi_thunk_get_time;
801*4882a593Smuzhiyun efi.set_time = efi_thunk_set_time;
802*4882a593Smuzhiyun efi.get_wakeup_time = efi_thunk_get_wakeup_time;
803*4882a593Smuzhiyun efi.set_wakeup_time = efi_thunk_set_wakeup_time;
804*4882a593Smuzhiyun efi.get_variable = efi_thunk_get_variable;
805*4882a593Smuzhiyun efi.get_next_variable = efi_thunk_get_next_variable;
806*4882a593Smuzhiyun efi.set_variable = efi_thunk_set_variable;
807*4882a593Smuzhiyun efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
808*4882a593Smuzhiyun efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
809*4882a593Smuzhiyun efi.reset_system = efi_thunk_reset_system;
810*4882a593Smuzhiyun efi.query_variable_info = efi_thunk_query_variable_info;
811*4882a593Smuzhiyun efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
812*4882a593Smuzhiyun efi.update_capsule = efi_thunk_update_capsule;
813*4882a593Smuzhiyun efi.query_capsule_caps = efi_thunk_query_capsule_caps;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun efi_status_t __init __no_sanitize_address
efi_set_virtual_address_map(unsigned long memory_map_size,unsigned long descriptor_size,u32 descriptor_version,efi_memory_desc_t * virtual_map,unsigned long systab_phys)817*4882a593Smuzhiyun efi_set_virtual_address_map(unsigned long memory_map_size,
818*4882a593Smuzhiyun unsigned long descriptor_size,
819*4882a593Smuzhiyun u32 descriptor_version,
820*4882a593Smuzhiyun efi_memory_desc_t *virtual_map,
821*4882a593Smuzhiyun unsigned long systab_phys)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
824*4882a593Smuzhiyun efi_status_t status;
825*4882a593Smuzhiyun unsigned long flags;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (efi_is_mixed())
828*4882a593Smuzhiyun return efi_thunk_set_virtual_address_map(memory_map_size,
829*4882a593Smuzhiyun descriptor_size,
830*4882a593Smuzhiyun descriptor_version,
831*4882a593Smuzhiyun virtual_map);
832*4882a593Smuzhiyun efi_switch_mm(&efi_mm);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun kernel_fpu_begin();
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /* Disable interrupts around EFI calls: */
837*4882a593Smuzhiyun local_irq_save(flags);
838*4882a593Smuzhiyun status = efi_call(efi.runtime->set_virtual_address_map,
839*4882a593Smuzhiyun memory_map_size, descriptor_size,
840*4882a593Smuzhiyun descriptor_version, virtual_map);
841*4882a593Smuzhiyun local_irq_restore(flags);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun kernel_fpu_end();
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* grab the virtually remapped EFI runtime services table pointer */
846*4882a593Smuzhiyun efi.runtime = READ_ONCE(systab->runtime);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun efi_switch_mm(efi_scratch.prev_mm);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return status;
851*4882a593Smuzhiyun }
852