xref: /OK3568_Linux_fs/kernel/arch/x86/mm/mem_encrypt_boot.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * AMD Memory Encryption Support
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2016 Advanced Micro Devices, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Tom Lendacky <thomas.lendacky@amd.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun#include <linux/linkage.h>
11*4882a593Smuzhiyun#include <linux/pgtable.h>
12*4882a593Smuzhiyun#include <asm/page.h>
13*4882a593Smuzhiyun#include <asm/processor-flags.h>
14*4882a593Smuzhiyun#include <asm/msr-index.h>
15*4882a593Smuzhiyun#include <asm/nospec-branch.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun	.text
18*4882a593Smuzhiyun	.code64
19*4882a593SmuzhiyunSYM_FUNC_START(sme_encrypt_execute)
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun	/*
22*4882a593Smuzhiyun	 * Entry parameters:
23*4882a593Smuzhiyun	 *   RDI - virtual address for the encrypted mapping
24*4882a593Smuzhiyun	 *   RSI - virtual address for the decrypted mapping
25*4882a593Smuzhiyun	 *   RDX - length to encrypt
26*4882a593Smuzhiyun	 *   RCX - virtual address of the encryption workarea, including:
27*4882a593Smuzhiyun	 *     - stack page (PAGE_SIZE)
28*4882a593Smuzhiyun	 *     - encryption routine page (PAGE_SIZE)
29*4882a593Smuzhiyun	 *     - intermediate copy buffer (PMD_PAGE_SIZE)
30*4882a593Smuzhiyun	 *    R8 - physcial address of the pagetables to use for encryption
31*4882a593Smuzhiyun	 */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun	push	%rbp
34*4882a593Smuzhiyun	movq	%rsp, %rbp		/* RBP now has original stack pointer */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun	/* Set up a one page stack in the non-encrypted memory area */
37*4882a593Smuzhiyun	movq	%rcx, %rax		/* Workarea stack page */
38*4882a593Smuzhiyun	leaq	PAGE_SIZE(%rax), %rsp	/* Set new stack pointer */
39*4882a593Smuzhiyun	addq	$PAGE_SIZE, %rax	/* Workarea encryption routine */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun	push	%r12
42*4882a593Smuzhiyun	movq	%rdi, %r10		/* Encrypted area */
43*4882a593Smuzhiyun	movq	%rsi, %r11		/* Decrypted area */
44*4882a593Smuzhiyun	movq	%rdx, %r12		/* Area length */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun	/* Copy encryption routine into the workarea */
47*4882a593Smuzhiyun	movq	%rax, %rdi				/* Workarea encryption routine */
48*4882a593Smuzhiyun	leaq	__enc_copy(%rip), %rsi			/* Encryption routine */
49*4882a593Smuzhiyun	movq	$(.L__enc_copy_end - __enc_copy), %rcx	/* Encryption routine length */
50*4882a593Smuzhiyun	rep	movsb
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun	/* Setup registers for call */
53*4882a593Smuzhiyun	movq	%r10, %rdi		/* Encrypted area */
54*4882a593Smuzhiyun	movq	%r11, %rsi		/* Decrypted area */
55*4882a593Smuzhiyun	movq	%r8, %rdx		/* Pagetables used for encryption */
56*4882a593Smuzhiyun	movq	%r12, %rcx		/* Area length */
57*4882a593Smuzhiyun	movq	%rax, %r8		/* Workarea encryption routine */
58*4882a593Smuzhiyun	addq	$PAGE_SIZE, %r8		/* Workarea intermediate copy buffer */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun	ANNOTATE_RETPOLINE_SAFE
61*4882a593Smuzhiyun	call	*%rax			/* Call the encryption routine */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun	pop	%r12
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun	movq	%rbp, %rsp		/* Restore original stack pointer */
66*4882a593Smuzhiyun	pop	%rbp
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun	/* Offset to __x86_return_thunk would be wrong here */
69*4882a593Smuzhiyun	ANNOTATE_UNRET_SAFE
70*4882a593Smuzhiyun	ret
71*4882a593Smuzhiyun	int3
72*4882a593SmuzhiyunSYM_FUNC_END(sme_encrypt_execute)
73*4882a593Smuzhiyun
74*4882a593SmuzhiyunSYM_FUNC_START(__enc_copy)
75*4882a593Smuzhiyun/*
76*4882a593Smuzhiyun * Routine used to encrypt memory in place.
77*4882a593Smuzhiyun *   This routine must be run outside of the kernel proper since
78*4882a593Smuzhiyun *   the kernel will be encrypted during the process. So this
79*4882a593Smuzhiyun *   routine is defined here and then copied to an area outside
80*4882a593Smuzhiyun *   of the kernel where it will remain and run decrypted
81*4882a593Smuzhiyun *   during execution.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun *   On entry the registers must be:
84*4882a593Smuzhiyun *     RDI - virtual address for the encrypted mapping
85*4882a593Smuzhiyun *     RSI - virtual address for the decrypted mapping
86*4882a593Smuzhiyun *     RDX - address of the pagetables to use for encryption
87*4882a593Smuzhiyun *     RCX - length of area
88*4882a593Smuzhiyun *      R8 - intermediate copy buffer
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun *     RAX - points to this routine
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * The area will be encrypted by copying from the non-encrypted
93*4882a593Smuzhiyun * memory space to an intermediate buffer and then copying from the
94*4882a593Smuzhiyun * intermediate buffer back to the encrypted memory space. The physical
95*4882a593Smuzhiyun * addresses of the two mappings are the same which results in the area
96*4882a593Smuzhiyun * being encrypted "in place".
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun	/* Enable the new page tables */
99*4882a593Smuzhiyun	mov	%rdx, %cr3
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun	/* Flush any global TLBs */
102*4882a593Smuzhiyun	mov	%cr4, %rdx
103*4882a593Smuzhiyun	andq	$~X86_CR4_PGE, %rdx
104*4882a593Smuzhiyun	mov	%rdx, %cr4
105*4882a593Smuzhiyun	orq	$X86_CR4_PGE, %rdx
106*4882a593Smuzhiyun	mov	%rdx, %cr4
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun	push	%r15
109*4882a593Smuzhiyun	push	%r12
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun	movq	%rcx, %r9		/* Save area length */
112*4882a593Smuzhiyun	movq	%rdi, %r10		/* Save encrypted area address */
113*4882a593Smuzhiyun	movq	%rsi, %r11		/* Save decrypted area address */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun	/* Set the PAT register PA5 entry to write-protect */
116*4882a593Smuzhiyun	movl	$MSR_IA32_CR_PAT, %ecx
117*4882a593Smuzhiyun	rdmsr
118*4882a593Smuzhiyun	mov	%rdx, %r15		/* Save original PAT value */
119*4882a593Smuzhiyun	andl	$0xffff00ff, %edx	/* Clear PA5 */
120*4882a593Smuzhiyun	orl	$0x00000500, %edx	/* Set PA5 to WP */
121*4882a593Smuzhiyun	wrmsr
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun	wbinvd				/* Invalidate any cache entries */
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun	/* Copy/encrypt up to 2MB at a time */
126*4882a593Smuzhiyun	movq	$PMD_PAGE_SIZE, %r12
127*4882a593Smuzhiyun1:
128*4882a593Smuzhiyun	cmpq	%r12, %r9
129*4882a593Smuzhiyun	jnb	2f
130*4882a593Smuzhiyun	movq	%r9, %r12
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun2:
133*4882a593Smuzhiyun	movq	%r11, %rsi		/* Source - decrypted area */
134*4882a593Smuzhiyun	movq	%r8, %rdi		/* Dest   - intermediate copy buffer */
135*4882a593Smuzhiyun	movq	%r12, %rcx
136*4882a593Smuzhiyun	rep	movsb
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun	movq	%r8, %rsi		/* Source - intermediate copy buffer */
139*4882a593Smuzhiyun	movq	%r10, %rdi		/* Dest   - encrypted area */
140*4882a593Smuzhiyun	movq	%r12, %rcx
141*4882a593Smuzhiyun	rep	movsb
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun	addq	%r12, %r11
144*4882a593Smuzhiyun	addq	%r12, %r10
145*4882a593Smuzhiyun	subq	%r12, %r9		/* Kernel length decrement */
146*4882a593Smuzhiyun	jnz	1b			/* Kernel length not zero? */
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun	/* Restore PAT register */
149*4882a593Smuzhiyun	movl	$MSR_IA32_CR_PAT, %ecx
150*4882a593Smuzhiyun	rdmsr
151*4882a593Smuzhiyun	mov	%r15, %rdx		/* Restore original PAT value */
152*4882a593Smuzhiyun	wrmsr
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun	pop	%r12
155*4882a593Smuzhiyun	pop	%r15
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun	/* Offset to __x86_return_thunk would be wrong here */
158*4882a593Smuzhiyun	ANNOTATE_UNRET_SAFE
159*4882a593Smuzhiyun	ret
160*4882a593Smuzhiyun	int3
161*4882a593Smuzhiyun.L__enc_copy_end:
162*4882a593SmuzhiyunSYM_FUNC_END(__enc_copy)
163