1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/efi.h>
7*4882a593Smuzhiyun #include <asm/efi.h>
8*4882a593Smuzhiyun #include <asm/mach/map.h>
9*4882a593Smuzhiyun #include <asm/mmu_context.h>
10*4882a593Smuzhiyun
set_permissions(pte_t * ptep,unsigned long addr,void * data)11*4882a593Smuzhiyun static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun efi_memory_desc_t *md = data;
14*4882a593Smuzhiyun pte_t pte = *ptep;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_RO)
17*4882a593Smuzhiyun pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
18*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_XP)
19*4882a593Smuzhiyun pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
20*4882a593Smuzhiyun set_pte_ext(ptep, pte, PTE_EXT_NG);
21*4882a593Smuzhiyun return 0;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
efi_set_mapping_permissions(struct mm_struct * mm,efi_memory_desc_t * md)24*4882a593Smuzhiyun int __init efi_set_mapping_permissions(struct mm_struct *mm,
25*4882a593Smuzhiyun efi_memory_desc_t *md)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun unsigned long base, size;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun base = md->virt_addr;
30*4882a593Smuzhiyun size = md->num_pages << EFI_PAGE_SHIFT;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * We can only use apply_to_page_range() if we can guarantee that the
34*4882a593Smuzhiyun * entire region was mapped using pages. This should be the case if the
35*4882a593Smuzhiyun * region does not cover any naturally aligned SECTION_SIZE sized
36*4882a593Smuzhiyun * blocks.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun if (round_down(base + size, SECTION_SIZE) <
39*4882a593Smuzhiyun round_up(base, SECTION_SIZE) + SECTION_SIZE)
40*4882a593Smuzhiyun return apply_to_page_range(mm, base, size, set_permissions, md);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
efi_create_mapping(struct mm_struct * mm,efi_memory_desc_t * md)45*4882a593Smuzhiyun int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct map_desc desc = {
48*4882a593Smuzhiyun .virtual = md->virt_addr,
49*4882a593Smuzhiyun .pfn = __phys_to_pfn(md->phys_addr),
50*4882a593Smuzhiyun .length = md->num_pages * EFI_PAGE_SIZE,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Order is important here: memory regions may have all of the
55*4882a593Smuzhiyun * bits below set (and usually do), so we check them in order of
56*4882a593Smuzhiyun * preference.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun if (md->attribute & EFI_MEMORY_WB)
59*4882a593Smuzhiyun desc.type = MT_MEMORY_RWX;
60*4882a593Smuzhiyun else if (md->attribute & EFI_MEMORY_WT)
61*4882a593Smuzhiyun desc.type = MT_MEMORY_RWX_NONCACHED;
62*4882a593Smuzhiyun else if (md->attribute & EFI_MEMORY_WC)
63*4882a593Smuzhiyun desc.type = MT_DEVICE_WC;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun desc.type = MT_DEVICE;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun create_mapping_late(mm, &desc, true);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * If stricter permissions were specified, apply them now.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
73*4882a593Smuzhiyun return efi_set_mapping_permissions(mm, md);
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun }
76