xref: /OK3568_Linux_fs/kernel/arch/x86/mm/mem_encrypt_identity.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * AMD Memory Encryption Support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2016 Advanced Micro Devices, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define DISABLE_BRANCH_PROFILING
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * Since we're dealing with identity mappings, physical and virtual
14*4882a593Smuzhiyun  * addresses are the same, so override these defines which are ultimately
15*4882a593Smuzhiyun  * used by the headers in misc.h.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define __pa(x)  ((unsigned long)(x))
18*4882a593Smuzhiyun #define __va(x)  ((void *)((unsigned long)(x)))
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Special hack: we have to be careful, because no indirections are
22*4882a593Smuzhiyun  * allowed here, and paravirt_ops is a kind of one. As it will only run in
23*4882a593Smuzhiyun  * baremetal anyway, we just keep it from happening. (This list needs to
24*4882a593Smuzhiyun  * be extended when new paravirt and debugging variants are added.)
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #undef CONFIG_PARAVIRT
27*4882a593Smuzhiyun #undef CONFIG_PARAVIRT_XXL
28*4882a593Smuzhiyun #undef CONFIG_PARAVIRT_SPINLOCKS
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * This code runs before CPU feature bits are set. By default, the
32*4882a593Smuzhiyun  * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
33*4882a593Smuzhiyun  * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
34*4882a593Smuzhiyun  * is provided to handle this situation and, instead, use a variable that
35*4882a593Smuzhiyun  * has been set by the early boot code.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define USE_EARLY_PGTABLE_L5
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <asm/setup.h>
44*4882a593Smuzhiyun #include <asm/sections.h>
45*4882a593Smuzhiyun #include <asm/cmdline.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include "mm_internal.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define PGD_FLAGS		_KERNPG_TABLE_NOENC
50*4882a593Smuzhiyun #define P4D_FLAGS		_KERNPG_TABLE_NOENC
51*4882a593Smuzhiyun #define PUD_FLAGS		_KERNPG_TABLE_NOENC
52*4882a593Smuzhiyun #define PMD_FLAGS		_KERNPG_TABLE_NOENC
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
57*4882a593Smuzhiyun #define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
58*4882a593Smuzhiyun 				 (_PAGE_PAT_LARGE | _PAGE_PWT))
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define PTE_FLAGS		(__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define PTE_FLAGS_DEC		PTE_FLAGS
65*4882a593Smuzhiyun #define PTE_FLAGS_DEC_WP	((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
66*4882a593Smuzhiyun 				 (_PAGE_PAT | _PAGE_PWT))
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define PTE_FLAGS_ENC		(PTE_FLAGS | _PAGE_ENC)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct sme_populate_pgd_data {
71*4882a593Smuzhiyun 	void    *pgtable_area;
72*4882a593Smuzhiyun 	pgd_t   *pgd;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	pmdval_t pmd_flags;
75*4882a593Smuzhiyun 	pteval_t pte_flags;
76*4882a593Smuzhiyun 	unsigned long paddr;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	unsigned long vaddr;
79*4882a593Smuzhiyun 	unsigned long vaddr_end;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * This work area lives in the .init.scratch section, which lives outside of
84*4882a593Smuzhiyun  * the kernel proper. It is sized to hold the intermediate copy buffer and
85*4882a593Smuzhiyun  * more than enough pagetable pages.
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  * By using this section, the kernel can be encrypted in place and it
88*4882a593Smuzhiyun  * avoids any possibility of boot parameters or initramfs images being
89*4882a593Smuzhiyun  * placed such that the in-place encryption logic overwrites them.  This
90*4882a593Smuzhiyun  * section is 2MB aligned to allow for simple pagetable setup using only
91*4882a593Smuzhiyun  * PMD entries (see vmlinux.lds.S).
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static char sme_cmdline_arg[] __initdata = "mem_encrypt";
96*4882a593Smuzhiyun static char sme_cmdline_on[]  __initdata = "on";
97*4882a593Smuzhiyun static char sme_cmdline_off[] __initdata = "off";
98*4882a593Smuzhiyun 
sme_clear_pgd(struct sme_populate_pgd_data * ppd)99*4882a593Smuzhiyun static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	unsigned long pgd_start, pgd_end, pgd_size;
102*4882a593Smuzhiyun 	pgd_t *pgd_p;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	pgd_start = ppd->vaddr & PGDIR_MASK;
105*4882a593Smuzhiyun 	pgd_end = ppd->vaddr_end & PGDIR_MASK;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	memset(pgd_p, 0, pgd_size);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
sme_prepare_pgd(struct sme_populate_pgd_data * ppd)114*4882a593Smuzhiyun static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	pgd_t *pgd;
117*4882a593Smuzhiyun 	p4d_t *p4d;
118*4882a593Smuzhiyun 	pud_t *pud;
119*4882a593Smuzhiyun 	pmd_t *pmd;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	pgd = ppd->pgd + pgd_index(ppd->vaddr);
122*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
123*4882a593Smuzhiyun 		p4d = ppd->pgtable_area;
124*4882a593Smuzhiyun 		memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
125*4882a593Smuzhiyun 		ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
126*4882a593Smuzhiyun 		set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, ppd->vaddr);
130*4882a593Smuzhiyun 	if (p4d_none(*p4d)) {
131*4882a593Smuzhiyun 		pud = ppd->pgtable_area;
132*4882a593Smuzhiyun 		memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
133*4882a593Smuzhiyun 		ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
134*4882a593Smuzhiyun 		set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	pud = pud_offset(p4d, ppd->vaddr);
138*4882a593Smuzhiyun 	if (pud_none(*pud)) {
139*4882a593Smuzhiyun 		pmd = ppd->pgtable_area;
140*4882a593Smuzhiyun 		memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
141*4882a593Smuzhiyun 		ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
142*4882a593Smuzhiyun 		set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (pud_large(*pud))
146*4882a593Smuzhiyun 		return NULL;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return pud;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
sme_populate_pgd_large(struct sme_populate_pgd_data * ppd)151*4882a593Smuzhiyun static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	pud_t *pud;
154*4882a593Smuzhiyun 	pmd_t *pmd;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	pud = sme_prepare_pgd(ppd);
157*4882a593Smuzhiyun 	if (!pud)
158*4882a593Smuzhiyun 		return;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	pmd = pmd_offset(pud, ppd->vaddr);
161*4882a593Smuzhiyun 	if (pmd_large(*pmd))
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
sme_populate_pgd(struct sme_populate_pgd_data * ppd)167*4882a593Smuzhiyun static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	pud_t *pud;
170*4882a593Smuzhiyun 	pmd_t *pmd;
171*4882a593Smuzhiyun 	pte_t *pte;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	pud = sme_prepare_pgd(ppd);
174*4882a593Smuzhiyun 	if (!pud)
175*4882a593Smuzhiyun 		return;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	pmd = pmd_offset(pud, ppd->vaddr);
178*4882a593Smuzhiyun 	if (pmd_none(*pmd)) {
179*4882a593Smuzhiyun 		pte = ppd->pgtable_area;
180*4882a593Smuzhiyun 		memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
181*4882a593Smuzhiyun 		ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
182*4882a593Smuzhiyun 		set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (pmd_large(*pmd))
186*4882a593Smuzhiyun 		return;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	pte = pte_offset_map(pmd, ppd->vaddr);
189*4882a593Smuzhiyun 	if (pte_none(*pte))
190*4882a593Smuzhiyun 		set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
__sme_map_range_pmd(struct sme_populate_pgd_data * ppd)193*4882a593Smuzhiyun static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	while (ppd->vaddr < ppd->vaddr_end) {
196*4882a593Smuzhiyun 		sme_populate_pgd_large(ppd);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		ppd->vaddr += PMD_PAGE_SIZE;
199*4882a593Smuzhiyun 		ppd->paddr += PMD_PAGE_SIZE;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
__sme_map_range_pte(struct sme_populate_pgd_data * ppd)203*4882a593Smuzhiyun static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	while (ppd->vaddr < ppd->vaddr_end) {
206*4882a593Smuzhiyun 		sme_populate_pgd(ppd);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		ppd->vaddr += PAGE_SIZE;
209*4882a593Smuzhiyun 		ppd->paddr += PAGE_SIZE;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
__sme_map_range(struct sme_populate_pgd_data * ppd,pmdval_t pmd_flags,pteval_t pte_flags)213*4882a593Smuzhiyun static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
214*4882a593Smuzhiyun 				   pmdval_t pmd_flags, pteval_t pte_flags)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	unsigned long vaddr_end;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	ppd->pmd_flags = pmd_flags;
219*4882a593Smuzhiyun 	ppd->pte_flags = pte_flags;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* Save original end value since we modify the struct value */
222*4882a593Smuzhiyun 	vaddr_end = ppd->vaddr_end;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* If start is not 2MB aligned, create PTE entries */
225*4882a593Smuzhiyun 	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
226*4882a593Smuzhiyun 	__sme_map_range_pte(ppd);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* Create PMD entries */
229*4882a593Smuzhiyun 	ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
230*4882a593Smuzhiyun 	__sme_map_range_pmd(ppd);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* If end is not 2MB aligned, create PTE entries */
233*4882a593Smuzhiyun 	ppd->vaddr_end = vaddr_end;
234*4882a593Smuzhiyun 	__sme_map_range_pte(ppd);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
sme_map_range_encrypted(struct sme_populate_pgd_data * ppd)237*4882a593Smuzhiyun static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
sme_map_range_decrypted(struct sme_populate_pgd_data * ppd)242*4882a593Smuzhiyun static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
sme_map_range_decrypted_wp(struct sme_populate_pgd_data * ppd)247*4882a593Smuzhiyun static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
sme_pgtable_calc(unsigned long len)252*4882a593Smuzhiyun static unsigned long __init sme_pgtable_calc(unsigned long len)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	unsigned long entries = 0, tables = 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/*
257*4882a593Smuzhiyun 	 * Perform a relatively simplistic calculation of the pagetable
258*4882a593Smuzhiyun 	 * entries that are needed. Those mappings will be covered mostly
259*4882a593Smuzhiyun 	 * by 2MB PMD entries so we can conservatively calculate the required
260*4882a593Smuzhiyun 	 * number of P4D, PUD and PMD structures needed to perform the
261*4882a593Smuzhiyun 	 * mappings.  For mappings that are not 2MB aligned, PTE mappings
262*4882a593Smuzhiyun 	 * would be needed for the start and end portion of the address range
263*4882a593Smuzhiyun 	 * that fall outside of the 2MB alignment.  This results in, at most,
264*4882a593Smuzhiyun 	 * two extra pages to hold PTE entries for each range that is mapped.
265*4882a593Smuzhiyun 	 * Incrementing the count for each covers the case where the addresses
266*4882a593Smuzhiyun 	 * cross entries.
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
270*4882a593Smuzhiyun 	if (PTRS_PER_P4D > 1)
271*4882a593Smuzhiyun 		entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
272*4882a593Smuzhiyun 	entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
273*4882a593Smuzhiyun 	entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
274*4882a593Smuzhiyun 	entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/*
277*4882a593Smuzhiyun 	 * Now calculate the added pagetable structures needed to populate
278*4882a593Smuzhiyun 	 * the new pagetables.
279*4882a593Smuzhiyun 	 */
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (PTRS_PER_P4D > 1)
282*4882a593Smuzhiyun 		tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
283*4882a593Smuzhiyun 	tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
284*4882a593Smuzhiyun 	tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return entries + tables;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
sme_encrypt_kernel(struct boot_params * bp)289*4882a593Smuzhiyun void __init sme_encrypt_kernel(struct boot_params *bp)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	unsigned long workarea_start, workarea_end, workarea_len;
292*4882a593Smuzhiyun 	unsigned long execute_start, execute_end, execute_len;
293*4882a593Smuzhiyun 	unsigned long kernel_start, kernel_end, kernel_len;
294*4882a593Smuzhiyun 	unsigned long initrd_start, initrd_end, initrd_len;
295*4882a593Smuzhiyun 	struct sme_populate_pgd_data ppd;
296*4882a593Smuzhiyun 	unsigned long pgtable_area_len;
297*4882a593Smuzhiyun 	unsigned long decrypted_base;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (!sme_active())
300*4882a593Smuzhiyun 		return;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * Prepare for encrypting the kernel and initrd by building new
304*4882a593Smuzhiyun 	 * pagetables with the necessary attributes needed to encrypt the
305*4882a593Smuzhiyun 	 * kernel in place.
306*4882a593Smuzhiyun 	 *
307*4882a593Smuzhiyun 	 *   One range of virtual addresses will map the memory occupied
308*4882a593Smuzhiyun 	 *   by the kernel and initrd as encrypted.
309*4882a593Smuzhiyun 	 *
310*4882a593Smuzhiyun 	 *   Another range of virtual addresses will map the memory occupied
311*4882a593Smuzhiyun 	 *   by the kernel and initrd as decrypted and write-protected.
312*4882a593Smuzhiyun 	 *
313*4882a593Smuzhiyun 	 *     The use of write-protect attribute will prevent any of the
314*4882a593Smuzhiyun 	 *     memory from being cached.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* Physical addresses gives us the identity mapped virtual addresses */
318*4882a593Smuzhiyun 	kernel_start = __pa_symbol(_text);
319*4882a593Smuzhiyun 	kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
320*4882a593Smuzhiyun 	kernel_len = kernel_end - kernel_start;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	initrd_start = 0;
323*4882a593Smuzhiyun 	initrd_end = 0;
324*4882a593Smuzhiyun 	initrd_len = 0;
325*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INITRD
326*4882a593Smuzhiyun 	initrd_len = (unsigned long)bp->hdr.ramdisk_size |
327*4882a593Smuzhiyun 		     ((unsigned long)bp->ext_ramdisk_size << 32);
328*4882a593Smuzhiyun 	if (initrd_len) {
329*4882a593Smuzhiyun 		initrd_start = (unsigned long)bp->hdr.ramdisk_image |
330*4882a593Smuzhiyun 			       ((unsigned long)bp->ext_ramdisk_image << 32);
331*4882a593Smuzhiyun 		initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
332*4882a593Smuzhiyun 		initrd_len = initrd_end - initrd_start;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun #endif
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/*
337*4882a593Smuzhiyun 	 * We're running identity mapped, so we must obtain the address to the
338*4882a593Smuzhiyun 	 * SME encryption workarea using rip-relative addressing.
339*4882a593Smuzhiyun 	 */
340*4882a593Smuzhiyun 	asm ("lea sme_workarea(%%rip), %0"
341*4882a593Smuzhiyun 	     : "=r" (workarea_start)
342*4882a593Smuzhiyun 	     : "p" (sme_workarea));
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * Calculate required number of workarea bytes needed:
346*4882a593Smuzhiyun 	 *   executable encryption area size:
347*4882a593Smuzhiyun 	 *     stack page (PAGE_SIZE)
348*4882a593Smuzhiyun 	 *     encryption routine page (PAGE_SIZE)
349*4882a593Smuzhiyun 	 *     intermediate copy buffer (PMD_PAGE_SIZE)
350*4882a593Smuzhiyun 	 *   pagetable structures for the encryption of the kernel
351*4882a593Smuzhiyun 	 *   pagetable structures for workarea (in case not currently mapped)
352*4882a593Smuzhiyun 	 */
353*4882a593Smuzhiyun 	execute_start = workarea_start;
354*4882a593Smuzhiyun 	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
355*4882a593Smuzhiyun 	execute_len = execute_end - execute_start;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/*
358*4882a593Smuzhiyun 	 * One PGD for both encrypted and decrypted mappings and a set of
359*4882a593Smuzhiyun 	 * PUDs and PMDs for each of the encrypted and decrypted mappings.
360*4882a593Smuzhiyun 	 */
361*4882a593Smuzhiyun 	pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
362*4882a593Smuzhiyun 	pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
363*4882a593Smuzhiyun 	if (initrd_len)
364*4882a593Smuzhiyun 		pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* PUDs and PMDs needed in the current pagetables for the workarea */
367*4882a593Smuzhiyun 	pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/*
370*4882a593Smuzhiyun 	 * The total workarea includes the executable encryption area and
371*4882a593Smuzhiyun 	 * the pagetable area. The start of the workarea is already 2MB
372*4882a593Smuzhiyun 	 * aligned, align the end of the workarea on a 2MB boundary so that
373*4882a593Smuzhiyun 	 * we don't try to create/allocate PTE entries from the workarea
374*4882a593Smuzhiyun 	 * before it is mapped.
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	workarea_len = execute_len + pgtable_area_len;
377*4882a593Smuzhiyun 	workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/*
380*4882a593Smuzhiyun 	 * Set the address to the start of where newly created pagetable
381*4882a593Smuzhiyun 	 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
382*4882a593Smuzhiyun 	 * structures are created when the workarea is added to the current
383*4882a593Smuzhiyun 	 * pagetables and when the new encrypted and decrypted kernel
384*4882a593Smuzhiyun 	 * mappings are populated.
385*4882a593Smuzhiyun 	 */
386*4882a593Smuzhiyun 	ppd.pgtable_area = (void *)execute_end;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/*
389*4882a593Smuzhiyun 	 * Make sure the current pagetable structure has entries for
390*4882a593Smuzhiyun 	 * addressing the workarea.
391*4882a593Smuzhiyun 	 */
392*4882a593Smuzhiyun 	ppd.pgd = (pgd_t *)native_read_cr3_pa();
393*4882a593Smuzhiyun 	ppd.paddr = workarea_start;
394*4882a593Smuzhiyun 	ppd.vaddr = workarea_start;
395*4882a593Smuzhiyun 	ppd.vaddr_end = workarea_end;
396*4882a593Smuzhiyun 	sme_map_range_decrypted(&ppd);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	/* Flush the TLB - no globals so cr3 is enough */
399*4882a593Smuzhiyun 	native_write_cr3(__native_read_cr3());
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/*
402*4882a593Smuzhiyun 	 * A new pagetable structure is being built to allow for the kernel
403*4882a593Smuzhiyun 	 * and initrd to be encrypted. It starts with an empty PGD that will
404*4882a593Smuzhiyun 	 * then be populated with new PUDs and PMDs as the encrypted and
405*4882a593Smuzhiyun 	 * decrypted kernel mappings are created.
406*4882a593Smuzhiyun 	 */
407*4882a593Smuzhiyun 	ppd.pgd = ppd.pgtable_area;
408*4882a593Smuzhiyun 	memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
409*4882a593Smuzhiyun 	ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/*
412*4882a593Smuzhiyun 	 * A different PGD index/entry must be used to get different
413*4882a593Smuzhiyun 	 * pagetable entries for the decrypted mapping. Choose the next
414*4882a593Smuzhiyun 	 * PGD index and convert it to a virtual address to be used as
415*4882a593Smuzhiyun 	 * the base of the mapping.
416*4882a593Smuzhiyun 	 */
417*4882a593Smuzhiyun 	decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
418*4882a593Smuzhiyun 	if (initrd_len) {
419*4882a593Smuzhiyun 		unsigned long check_base;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
422*4882a593Smuzhiyun 		decrypted_base = max(decrypted_base, check_base);
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	decrypted_base <<= PGDIR_SHIFT;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* Add encrypted kernel (identity) mappings */
427*4882a593Smuzhiyun 	ppd.paddr = kernel_start;
428*4882a593Smuzhiyun 	ppd.vaddr = kernel_start;
429*4882a593Smuzhiyun 	ppd.vaddr_end = kernel_end;
430*4882a593Smuzhiyun 	sme_map_range_encrypted(&ppd);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* Add decrypted, write-protected kernel (non-identity) mappings */
433*4882a593Smuzhiyun 	ppd.paddr = kernel_start;
434*4882a593Smuzhiyun 	ppd.vaddr = kernel_start + decrypted_base;
435*4882a593Smuzhiyun 	ppd.vaddr_end = kernel_end + decrypted_base;
436*4882a593Smuzhiyun 	sme_map_range_decrypted_wp(&ppd);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (initrd_len) {
439*4882a593Smuzhiyun 		/* Add encrypted initrd (identity) mappings */
440*4882a593Smuzhiyun 		ppd.paddr = initrd_start;
441*4882a593Smuzhiyun 		ppd.vaddr = initrd_start;
442*4882a593Smuzhiyun 		ppd.vaddr_end = initrd_end;
443*4882a593Smuzhiyun 		sme_map_range_encrypted(&ppd);
444*4882a593Smuzhiyun 		/*
445*4882a593Smuzhiyun 		 * Add decrypted, write-protected initrd (non-identity) mappings
446*4882a593Smuzhiyun 		 */
447*4882a593Smuzhiyun 		ppd.paddr = initrd_start;
448*4882a593Smuzhiyun 		ppd.vaddr = initrd_start + decrypted_base;
449*4882a593Smuzhiyun 		ppd.vaddr_end = initrd_end + decrypted_base;
450*4882a593Smuzhiyun 		sme_map_range_decrypted_wp(&ppd);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Add decrypted workarea mappings to both kernel mappings */
454*4882a593Smuzhiyun 	ppd.paddr = workarea_start;
455*4882a593Smuzhiyun 	ppd.vaddr = workarea_start;
456*4882a593Smuzhiyun 	ppd.vaddr_end = workarea_end;
457*4882a593Smuzhiyun 	sme_map_range_decrypted(&ppd);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	ppd.paddr = workarea_start;
460*4882a593Smuzhiyun 	ppd.vaddr = workarea_start + decrypted_base;
461*4882a593Smuzhiyun 	ppd.vaddr_end = workarea_end + decrypted_base;
462*4882a593Smuzhiyun 	sme_map_range_decrypted(&ppd);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* Perform the encryption */
465*4882a593Smuzhiyun 	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
466*4882a593Smuzhiyun 			    kernel_len, workarea_start, (unsigned long)ppd.pgd);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (initrd_len)
469*4882a593Smuzhiyun 		sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
470*4882a593Smuzhiyun 				    initrd_len, workarea_start,
471*4882a593Smuzhiyun 				    (unsigned long)ppd.pgd);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * At this point we are running encrypted.  Remove the mappings for
475*4882a593Smuzhiyun 	 * the decrypted areas - all that is needed for this is to remove
476*4882a593Smuzhiyun 	 * the PGD entry/entries.
477*4882a593Smuzhiyun 	 */
478*4882a593Smuzhiyun 	ppd.vaddr = kernel_start + decrypted_base;
479*4882a593Smuzhiyun 	ppd.vaddr_end = kernel_end + decrypted_base;
480*4882a593Smuzhiyun 	sme_clear_pgd(&ppd);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (initrd_len) {
483*4882a593Smuzhiyun 		ppd.vaddr = initrd_start + decrypted_base;
484*4882a593Smuzhiyun 		ppd.vaddr_end = initrd_end + decrypted_base;
485*4882a593Smuzhiyun 		sme_clear_pgd(&ppd);
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	ppd.vaddr = workarea_start + decrypted_base;
489*4882a593Smuzhiyun 	ppd.vaddr_end = workarea_end + decrypted_base;
490*4882a593Smuzhiyun 	sme_clear_pgd(&ppd);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Flush the TLB - no globals so cr3 is enough */
493*4882a593Smuzhiyun 	native_write_cr3(__native_read_cr3());
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
sme_enable(struct boot_params * bp)496*4882a593Smuzhiyun void __init sme_enable(struct boot_params *bp)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
499*4882a593Smuzhiyun 	unsigned int eax, ebx, ecx, edx;
500*4882a593Smuzhiyun 	unsigned long feature_mask;
501*4882a593Smuzhiyun 	bool active_by_default;
502*4882a593Smuzhiyun 	unsigned long me_mask;
503*4882a593Smuzhiyun 	char buffer[16];
504*4882a593Smuzhiyun 	u64 msr;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* Check for the SME/SEV support leaf */
507*4882a593Smuzhiyun 	eax = 0x80000000;
508*4882a593Smuzhiyun 	ecx = 0;
509*4882a593Smuzhiyun 	native_cpuid(&eax, &ebx, &ecx, &edx);
510*4882a593Smuzhiyun 	if (eax < 0x8000001f)
511*4882a593Smuzhiyun 		return;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun #define AMD_SME_BIT	BIT(0)
514*4882a593Smuzhiyun #define AMD_SEV_BIT	BIT(1)
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	/*
517*4882a593Smuzhiyun 	 * Check for the SME/SEV feature:
518*4882a593Smuzhiyun 	 *   CPUID Fn8000_001F[EAX]
519*4882a593Smuzhiyun 	 *   - Bit 0 - Secure Memory Encryption support
520*4882a593Smuzhiyun 	 *   - Bit 1 - Secure Encrypted Virtualization support
521*4882a593Smuzhiyun 	 *   CPUID Fn8000_001F[EBX]
522*4882a593Smuzhiyun 	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
523*4882a593Smuzhiyun 	 */
524*4882a593Smuzhiyun 	eax = 0x8000001f;
525*4882a593Smuzhiyun 	ecx = 0;
526*4882a593Smuzhiyun 	native_cpuid(&eax, &ebx, &ecx, &edx);
527*4882a593Smuzhiyun 	/* Check whether SEV or SME is supported */
528*4882a593Smuzhiyun 	if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
529*4882a593Smuzhiyun 		return;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	me_mask = 1UL << (ebx & 0x3f);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/* Check the SEV MSR whether SEV or SME is enabled */
534*4882a593Smuzhiyun 	sev_status   = __rdmsr(MSR_AMD64_SEV);
535*4882a593Smuzhiyun 	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Check if memory encryption is enabled */
538*4882a593Smuzhiyun 	if (feature_mask == AMD_SME_BIT) {
539*4882a593Smuzhiyun 		/*
540*4882a593Smuzhiyun 		 * No SME if Hypervisor bit is set. This check is here to
541*4882a593Smuzhiyun 		 * prevent a guest from trying to enable SME. For running as a
542*4882a593Smuzhiyun 		 * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
543*4882a593Smuzhiyun 		 * might be other hypervisors which emulate that MSR as non-zero
544*4882a593Smuzhiyun 		 * or even pass it through to the guest.
545*4882a593Smuzhiyun 		 * A malicious hypervisor can still trick a guest into this
546*4882a593Smuzhiyun 		 * path, but there is no way to protect against that.
547*4882a593Smuzhiyun 		 */
548*4882a593Smuzhiyun 		eax = 1;
549*4882a593Smuzhiyun 		ecx = 0;
550*4882a593Smuzhiyun 		native_cpuid(&eax, &ebx, &ecx, &edx);
551*4882a593Smuzhiyun 		if (ecx & BIT(31))
552*4882a593Smuzhiyun 			return;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		/* For SME, check the SYSCFG MSR */
555*4882a593Smuzhiyun 		msr = __rdmsr(MSR_K8_SYSCFG);
556*4882a593Smuzhiyun 		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
557*4882a593Smuzhiyun 			return;
558*4882a593Smuzhiyun 	} else {
559*4882a593Smuzhiyun 		/* SEV state cannot be controlled by a command line option */
560*4882a593Smuzhiyun 		sme_me_mask = me_mask;
561*4882a593Smuzhiyun 		sev_enabled = true;
562*4882a593Smuzhiyun 		physical_mask &= ~sme_me_mask;
563*4882a593Smuzhiyun 		return;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/*
567*4882a593Smuzhiyun 	 * Fixups have not been applied to phys_base yet and we're running
568*4882a593Smuzhiyun 	 * identity mapped, so we must obtain the address to the SME command
569*4882a593Smuzhiyun 	 * line argument data using rip-relative addressing.
570*4882a593Smuzhiyun 	 */
571*4882a593Smuzhiyun 	asm ("lea sme_cmdline_arg(%%rip), %0"
572*4882a593Smuzhiyun 	     : "=r" (cmdline_arg)
573*4882a593Smuzhiyun 	     : "p" (sme_cmdline_arg));
574*4882a593Smuzhiyun 	asm ("lea sme_cmdline_on(%%rip), %0"
575*4882a593Smuzhiyun 	     : "=r" (cmdline_on)
576*4882a593Smuzhiyun 	     : "p" (sme_cmdline_on));
577*4882a593Smuzhiyun 	asm ("lea sme_cmdline_off(%%rip), %0"
578*4882a593Smuzhiyun 	     : "=r" (cmdline_off)
579*4882a593Smuzhiyun 	     : "p" (sme_cmdline_off));
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
582*4882a593Smuzhiyun 		active_by_default = true;
583*4882a593Smuzhiyun 	else
584*4882a593Smuzhiyun 		active_by_default = false;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
587*4882a593Smuzhiyun 				     ((u64)bp->ext_cmd_line_ptr << 32));
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
592*4882a593Smuzhiyun 		sme_me_mask = me_mask;
593*4882a593Smuzhiyun 	else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
594*4882a593Smuzhiyun 		sme_me_mask = 0;
595*4882a593Smuzhiyun 	else
596*4882a593Smuzhiyun 		sme_me_mask = active_by_default ? me_mask : 0;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	physical_mask &= ~sme_me_mask;
599*4882a593Smuzhiyun }
600