1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * This code is used on x86_64 to create page table identity mappings on
4*4882a593Smuzhiyun * demand by building up a new set of page tables (or appending to the
5*4882a593Smuzhiyun * existing ones), and then switching over to them when ready.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2015-2016 Yinghai Lu
8*4882a593Smuzhiyun * Copyright (C) 2016 Kees Cook
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * Since we're dealing with identity mappings, physical and virtual
13*4882a593Smuzhiyun * addresses are the same, so override these defines which are ultimately
14*4882a593Smuzhiyun * used by the headers in misc.h.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #define __pa(x) ((unsigned long)(x))
17*4882a593Smuzhiyun #define __va(x) ((void *)((unsigned long)(x)))
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* No PAGE_TABLE_ISOLATION support needed either: */
20*4882a593Smuzhiyun #undef CONFIG_PAGE_TABLE_ISOLATION
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "error.h"
23*4882a593Smuzhiyun #include "misc.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* These actually do the work of building the kernel identity maps. */
26*4882a593Smuzhiyun #include <linux/pgtable.h>
27*4882a593Smuzhiyun #include <asm/cmpxchg.h>
28*4882a593Smuzhiyun #include <asm/trap_pf.h>
29*4882a593Smuzhiyun #include <asm/trapnr.h>
30*4882a593Smuzhiyun #include <asm/init.h>
31*4882a593Smuzhiyun /* Use the static base for this part of the boot process */
32*4882a593Smuzhiyun #undef __PAGE_OFFSET
33*4882a593Smuzhiyun #define __PAGE_OFFSET __PAGE_OFFSET_BASE
34*4882a593Smuzhiyun #include "../../mm/ident_map.c"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define _SETUP
37*4882a593Smuzhiyun #include <asm/setup.h> /* For COMMAND_LINE_SIZE */
38*4882a593Smuzhiyun #undef _SETUP
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun extern unsigned long get_cmd_line_ptr(void);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Used by PAGE_KERN* macros: */
43*4882a593Smuzhiyun pteval_t __default_kernel_pte_mask __read_mostly = ~0;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Used to track our page table allocation area. */
46*4882a593Smuzhiyun struct alloc_pgt_data {
47*4882a593Smuzhiyun unsigned char *pgt_buf;
48*4882a593Smuzhiyun unsigned long pgt_buf_size;
49*4882a593Smuzhiyun unsigned long pgt_buf_offset;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Allocates space for a page table entry, using struct alloc_pgt_data
54*4882a593Smuzhiyun * above. Besides the local callers, this is used as the allocation
55*4882a593Smuzhiyun * callback in mapping_info below.
56*4882a593Smuzhiyun */
alloc_pgt_page(void * context)57*4882a593Smuzhiyun static void *alloc_pgt_page(void *context)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
60*4882a593Smuzhiyun unsigned char *entry;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Validate there is space available for a new page. */
63*4882a593Smuzhiyun if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
64*4882a593Smuzhiyun debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
65*4882a593Smuzhiyun debug_putaddr(pages->pgt_buf_offset);
66*4882a593Smuzhiyun debug_putaddr(pages->pgt_buf_size);
67*4882a593Smuzhiyun return NULL;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun entry = pages->pgt_buf + pages->pgt_buf_offset;
71*4882a593Smuzhiyun pages->pgt_buf_offset += PAGE_SIZE;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return entry;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Used to track our allocated page tables. */
77*4882a593Smuzhiyun static struct alloc_pgt_data pgt_data;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* The top level page table entry pointer. */
80*4882a593Smuzhiyun static unsigned long top_level_pgt;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Mapping information structure passed to kernel_ident_mapping_init().
86*4882a593Smuzhiyun * Due to relocation, pointers must be assigned at run time not build time.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun static struct x86_mapping_info mapping_info;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Adds the specified range to the identity mappings.
92*4882a593Smuzhiyun */
add_identity_map(unsigned long start,unsigned long end)93*4882a593Smuzhiyun static void add_identity_map(unsigned long start, unsigned long end)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun int ret;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Align boundary to 2M. */
98*4882a593Smuzhiyun start = round_down(start, PMD_SIZE);
99*4882a593Smuzhiyun end = round_up(end, PMD_SIZE);
100*4882a593Smuzhiyun if (start >= end)
101*4882a593Smuzhiyun return;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Build the mapping. */
104*4882a593Smuzhiyun ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
105*4882a593Smuzhiyun if (ret)
106*4882a593Smuzhiyun error("Error: kernel_ident_mapping_init() failed\n");
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Locates and clears a region for a new top level page table. */
initialize_identity_maps(void * rmode)110*4882a593Smuzhiyun void initialize_identity_maps(void *rmode)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned long cmdline;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Exclude the encryption mask from __PHYSICAL_MASK */
115*4882a593Smuzhiyun physical_mask &= ~sme_me_mask;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Init mapping_info with run-time function/buffer pointers. */
118*4882a593Smuzhiyun mapping_info.alloc_pgt_page = alloc_pgt_page;
119*4882a593Smuzhiyun mapping_info.context = &pgt_data;
120*4882a593Smuzhiyun mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
121*4882a593Smuzhiyun mapping_info.kernpg_flag = _KERNPG_TABLE;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * It should be impossible for this not to already be true,
125*4882a593Smuzhiyun * but since calling this a second time would rewind the other
126*4882a593Smuzhiyun * counters, let's just make sure this is reset too.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun pgt_data.pgt_buf_offset = 0;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * If we came here via startup_32(), cr3 will be _pgtable already
132*4882a593Smuzhiyun * and we must append to the existing area instead of entirely
133*4882a593Smuzhiyun * overwriting it.
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
136*4882a593Smuzhiyun * the top-level page table is allocated separately.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
139*4882a593Smuzhiyun * cases. On 4-level paging it's equal to 'top_level_pgt'.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun top_level_pgt = read_cr3_pa();
142*4882a593Smuzhiyun if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
143*4882a593Smuzhiyun pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
144*4882a593Smuzhiyun pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
145*4882a593Smuzhiyun memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
146*4882a593Smuzhiyun } else {
147*4882a593Smuzhiyun pgt_data.pgt_buf = _pgtable;
148*4882a593Smuzhiyun pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
149*4882a593Smuzhiyun memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
150*4882a593Smuzhiyun top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * New page-table is set up - map the kernel image, boot_params and the
155*4882a593Smuzhiyun * command line. The uncompressed kernel requires boot_params and the
156*4882a593Smuzhiyun * command line to be mapped in the identity mapping. Map them
157*4882a593Smuzhiyun * explicitly here in case the compressed kernel does not touch them,
158*4882a593Smuzhiyun * or does not touch all the pages covering them.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun add_identity_map((unsigned long)_head, (unsigned long)_end);
161*4882a593Smuzhiyun boot_params = rmode;
162*4882a593Smuzhiyun add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
163*4882a593Smuzhiyun cmdline = get_cmd_line_ptr();
164*4882a593Smuzhiyun add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Load the new page-table. */
167*4882a593Smuzhiyun sev_verify_cbit(top_level_pgt);
168*4882a593Smuzhiyun write_cr3(top_level_pgt);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * This switches the page tables to the new level4 that has been built
173*4882a593Smuzhiyun * via calls to add_identity_map() above. If booted via startup_32(),
174*4882a593Smuzhiyun * this is effectively a no-op.
175*4882a593Smuzhiyun */
finalize_identity_maps(void)176*4882a593Smuzhiyun void finalize_identity_maps(void)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun write_cr3(top_level_pgt);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
split_large_pmd(struct x86_mapping_info * info,pmd_t * pmdp,unsigned long __address)181*4882a593Smuzhiyun static pte_t *split_large_pmd(struct x86_mapping_info *info,
182*4882a593Smuzhiyun pmd_t *pmdp, unsigned long __address)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun unsigned long page_flags;
185*4882a593Smuzhiyun unsigned long address;
186*4882a593Smuzhiyun pte_t *pte;
187*4882a593Smuzhiyun pmd_t pmd;
188*4882a593Smuzhiyun int i;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun pte = (pte_t *)info->alloc_pgt_page(info->context);
191*4882a593Smuzhiyun if (!pte)
192*4882a593Smuzhiyun return NULL;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun address = __address & PMD_MASK;
195*4882a593Smuzhiyun /* No large page - clear PSE flag */
196*4882a593Smuzhiyun page_flags = info->page_flag & ~_PAGE_PSE;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Populate the PTEs */
199*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PMD; i++) {
200*4882a593Smuzhiyun set_pte(&pte[i], __pte(address | page_flags));
201*4882a593Smuzhiyun address += PAGE_SIZE;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * Ideally we need to clear the large PMD first and do a TLB
206*4882a593Smuzhiyun * flush before we write the new PMD. But the 2M range of the
207*4882a593Smuzhiyun * PMD might contain the code we execute and/or the stack
208*4882a593Smuzhiyun * we are on, so we can't do that. But that should be safe here
209*4882a593Smuzhiyun * because we are going from large to small mappings and we are
210*4882a593Smuzhiyun * also the only user of the page-table, so there is no chance
211*4882a593Smuzhiyun * of a TLB multihit.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun pmd = __pmd((unsigned long)pte | info->kernpg_flag);
214*4882a593Smuzhiyun set_pmd(pmdp, pmd);
215*4882a593Smuzhiyun /* Flush TLB to establish the new PMD */
216*4882a593Smuzhiyun write_cr3(top_level_pgt);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return pte + pte_index(__address);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
clflush_page(unsigned long address)221*4882a593Smuzhiyun static void clflush_page(unsigned long address)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun unsigned int flush_size;
224*4882a593Smuzhiyun char *cl, *start, *end;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * Hardcode cl-size to 64 - CPUID can't be used here because that might
228*4882a593Smuzhiyun * cause another #VC exception and the GHCB is not ready to use yet.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun flush_size = 64;
231*4882a593Smuzhiyun start = (char *)(address & PAGE_MASK);
232*4882a593Smuzhiyun end = start + PAGE_SIZE;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * First make sure there are no pending writes on the cache-lines to
236*4882a593Smuzhiyun * flush.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun asm volatile("mfence" : : : "memory");
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun for (cl = start; cl != end; cl += flush_size)
241*4882a593Smuzhiyun clflush(cl);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
set_clr_page_flags(struct x86_mapping_info * info,unsigned long address,pteval_t set,pteval_t clr)244*4882a593Smuzhiyun static int set_clr_page_flags(struct x86_mapping_info *info,
245*4882a593Smuzhiyun unsigned long address,
246*4882a593Smuzhiyun pteval_t set, pteval_t clr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun pgd_t *pgdp = (pgd_t *)top_level_pgt;
249*4882a593Smuzhiyun p4d_t *p4dp;
250*4882a593Smuzhiyun pud_t *pudp;
251*4882a593Smuzhiyun pmd_t *pmdp;
252*4882a593Smuzhiyun pte_t *ptep, pte;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * First make sure there is a PMD mapping for 'address'.
256*4882a593Smuzhiyun * It should already exist, but keep things generic.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * To map the page just read from it and fault it in if there is no
259*4882a593Smuzhiyun * mapping yet. add_identity_map() can't be called here because that
260*4882a593Smuzhiyun * would unconditionally map the address on PMD level, destroying any
261*4882a593Smuzhiyun * PTE-level mappings that might already exist. Use assembly here so
262*4882a593Smuzhiyun * the access won't be optimized away.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun asm volatile("mov %[address], %%r9"
265*4882a593Smuzhiyun :: [address] "g" (*(unsigned long *)address)
266*4882a593Smuzhiyun : "r9", "memory");
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * The page is mapped at least with PMD size - so skip checks and walk
270*4882a593Smuzhiyun * directly to the PMD.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun p4dp = p4d_offset(pgdp, address);
273*4882a593Smuzhiyun pudp = pud_offset(p4dp, address);
274*4882a593Smuzhiyun pmdp = pmd_offset(pudp, address);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (pmd_large(*pmdp))
277*4882a593Smuzhiyun ptep = split_large_pmd(info, pmdp, address);
278*4882a593Smuzhiyun else
279*4882a593Smuzhiyun ptep = pte_offset_kernel(pmdp, address);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (!ptep)
282*4882a593Smuzhiyun return -ENOMEM;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Changing encryption attributes of a page requires to flush it from
286*4882a593Smuzhiyun * the caches.
287*4882a593Smuzhiyun */
288*4882a593Smuzhiyun if ((set | clr) & _PAGE_ENC)
289*4882a593Smuzhiyun clflush_page(address);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Update PTE */
292*4882a593Smuzhiyun pte = *ptep;
293*4882a593Smuzhiyun pte = pte_set_flags(pte, set);
294*4882a593Smuzhiyun pte = pte_clear_flags(pte, clr);
295*4882a593Smuzhiyun set_pte(ptep, pte);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* Flush TLB after changing encryption attribute */
298*4882a593Smuzhiyun write_cr3(top_level_pgt);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return 0;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
set_page_decrypted(unsigned long address)303*4882a593Smuzhiyun int set_page_decrypted(unsigned long address)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
set_page_encrypted(unsigned long address)308*4882a593Smuzhiyun int set_page_encrypted(unsigned long address)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
set_page_non_present(unsigned long address)313*4882a593Smuzhiyun int set_page_non_present(unsigned long address)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
do_pf_error(const char * msg,unsigned long error_code,unsigned long address,unsigned long ip)318*4882a593Smuzhiyun static void do_pf_error(const char *msg, unsigned long error_code,
319*4882a593Smuzhiyun unsigned long address, unsigned long ip)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun error_putstr(msg);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun error_putstr("\nError Code: ");
324*4882a593Smuzhiyun error_puthex(error_code);
325*4882a593Smuzhiyun error_putstr("\nCR2: 0x");
326*4882a593Smuzhiyun error_puthex(address);
327*4882a593Smuzhiyun error_putstr("\nRIP relative to _head: 0x");
328*4882a593Smuzhiyun error_puthex(ip - (unsigned long)_head);
329*4882a593Smuzhiyun error_putstr("\n");
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun error("Stopping.\n");
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
do_boot_page_fault(struct pt_regs * regs,unsigned long error_code)334*4882a593Smuzhiyun void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun unsigned long address = native_read_cr2();
337*4882a593Smuzhiyun unsigned long end;
338*4882a593Smuzhiyun bool ghcb_fault;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun ghcb_fault = sev_es_check_ghcb_fault(address);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun address &= PMD_MASK;
343*4882a593Smuzhiyun end = address + PMD_SIZE;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * Check for unexpected error codes. Unexpected are:
347*4882a593Smuzhiyun * - Faults on present pages
348*4882a593Smuzhiyun * - User faults
349*4882a593Smuzhiyun * - Reserved bits set
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
352*4882a593Smuzhiyun do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
353*4882a593Smuzhiyun else if (ghcb_fault)
354*4882a593Smuzhiyun do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * Error code is sane - now identity map the 2M region around
358*4882a593Smuzhiyun * the faulting address.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun add_identity_map(address, end);
361*4882a593Smuzhiyun }
362