1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/io.h>
3*4882a593Smuzhiyun #include <linux/slab.h>
4*4882a593Smuzhiyun #include <linux/memblock.h>
5*4882a593Smuzhiyun #include <linux/mem_encrypt.h>
6*4882a593Smuzhiyun #include <linux/pgtable.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <asm/set_memory.h>
9*4882a593Smuzhiyun #include <asm/realmode.h>
10*4882a593Smuzhiyun #include <asm/tlbflush.h>
11*4882a593Smuzhiyun #include <asm/crash.h>
12*4882a593Smuzhiyun #include <asm/sev-es.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct real_mode_header *real_mode_header;
15*4882a593Smuzhiyun u32 *trampoline_cr4_features;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* Hold the pgd entry used on booting additional CPUs */
18*4882a593Smuzhiyun pgd_t trampoline_pgd_entry;
19*4882a593Smuzhiyun
load_trampoline_pgtable(void)20*4882a593Smuzhiyun void load_trampoline_pgtable(void)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun #ifdef CONFIG_X86_32
23*4882a593Smuzhiyun load_cr3(initial_page_table);
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * This function is called before exiting to real-mode and that will
27*4882a593Smuzhiyun * fail with CR4.PCIDE still set.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun if (boot_cpu_has(X86_FEATURE_PCID))
30*4882a593Smuzhiyun cr4_clear_bits(X86_CR4_PCIDE);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun write_cr3(real_mode_header->trampoline_pgd);
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * The CR3 write above will not flush global TLB entries.
37*4882a593Smuzhiyun * Stale, global entries from previous page tables may still be
38*4882a593Smuzhiyun * present. Flush those stale entries.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * This ensures that memory accessed while running with
41*4882a593Smuzhiyun * trampoline_pgd is *actually* mapped into trampoline_pgd.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun __flush_tlb_all();
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
reserve_real_mode(void)46*4882a593Smuzhiyun void __init reserve_real_mode(void)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun phys_addr_t mem;
49*4882a593Smuzhiyun size_t size = real_mode_size_needed();
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (!size)
52*4882a593Smuzhiyun return;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun WARN_ON(slab_is_available());
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* Has to be under 1M so we can execute real-mode AP code. */
57*4882a593Smuzhiyun mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
58*4882a593Smuzhiyun if (!mem) {
59*4882a593Smuzhiyun pr_info("No sub-1M memory is available for the trampoline\n");
60*4882a593Smuzhiyun return;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun memblock_reserve(mem, size);
64*4882a593Smuzhiyun set_real_mode_mem(mem);
65*4882a593Smuzhiyun crash_reserve_low_1M();
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
sme_sev_setup_real_mode(struct trampoline_header * th)68*4882a593Smuzhiyun static void sme_sev_setup_real_mode(struct trampoline_header *th)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun #ifdef CONFIG_AMD_MEM_ENCRYPT
71*4882a593Smuzhiyun if (sme_active())
72*4882a593Smuzhiyun th->flags |= TH_FLAGS_SME_ACTIVE;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (sev_es_active()) {
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Skip the call to verify_cpu() in secondary_startup_64 as it
77*4882a593Smuzhiyun * will cause #VC exceptions when the AP can't handle them yet.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun th->start = (u64) secondary_startup_64_no_verify;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (sev_es_setup_ap_jump_table(real_mode_header))
82*4882a593Smuzhiyun panic("Failed to get/update SEV-ES AP Jump Table");
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
setup_real_mode(void)87*4882a593Smuzhiyun static void __init setup_real_mode(void)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun u16 real_mode_seg;
90*4882a593Smuzhiyun const u32 *rel;
91*4882a593Smuzhiyun u32 count;
92*4882a593Smuzhiyun unsigned char *base;
93*4882a593Smuzhiyun unsigned long phys_base;
94*4882a593Smuzhiyun struct trampoline_header *trampoline_header;
95*4882a593Smuzhiyun size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
96*4882a593Smuzhiyun #ifdef CONFIG_X86_64
97*4882a593Smuzhiyun u64 *trampoline_pgd;
98*4882a593Smuzhiyun u64 efer;
99*4882a593Smuzhiyun int i;
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun base = (unsigned char *)real_mode_header;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * If SME is active, the trampoline area will need to be in
106*4882a593Smuzhiyun * decrypted memory in order to bring up other processors
107*4882a593Smuzhiyun * successfully. This is not needed for SEV.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun if (sme_active())
110*4882a593Smuzhiyun set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun memcpy(base, real_mode_blob, size);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun phys_base = __pa(base);
115*4882a593Smuzhiyun real_mode_seg = phys_base >> 4;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun rel = (u32 *) real_mode_relocs;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* 16-bit segment relocations. */
120*4882a593Smuzhiyun count = *rel++;
121*4882a593Smuzhiyun while (count--) {
122*4882a593Smuzhiyun u16 *seg = (u16 *) (base + *rel++);
123*4882a593Smuzhiyun *seg = real_mode_seg;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* 32-bit linear relocations. */
127*4882a593Smuzhiyun count = *rel++;
128*4882a593Smuzhiyun while (count--) {
129*4882a593Smuzhiyun u32 *ptr = (u32 *) (base + *rel++);
130*4882a593Smuzhiyun *ptr += phys_base;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Must be perfomed *after* relocation. */
134*4882a593Smuzhiyun trampoline_header = (struct trampoline_header *)
135*4882a593Smuzhiyun __va(real_mode_header->trampoline_header);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #ifdef CONFIG_X86_32
138*4882a593Smuzhiyun trampoline_header->start = __pa_symbol(startup_32_smp);
139*4882a593Smuzhiyun trampoline_header->gdt_limit = __BOOT_DS + 7;
140*4882a593Smuzhiyun trampoline_header->gdt_base = __pa_symbol(boot_gdt);
141*4882a593Smuzhiyun #else
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
144*4882a593Smuzhiyun * so we need to mask it out.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun rdmsrl(MSR_EFER, efer);
147*4882a593Smuzhiyun trampoline_header->efer = efer & ~EFER_LMA;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun trampoline_header->start = (u64) secondary_startup_64;
150*4882a593Smuzhiyun trampoline_cr4_features = &trampoline_header->cr4;
151*4882a593Smuzhiyun *trampoline_cr4_features = mmu_cr4_features;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun trampoline_header->flags = 0;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* Map the real mode stub as virtual == physical */
158*4882a593Smuzhiyun trampoline_pgd[0] = trampoline_pgd_entry.pgd;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Include the entirety of the kernel mapping into the trampoline
162*4882a593Smuzhiyun * PGD. This way, all mappings present in the normal kernel page
163*4882a593Smuzhiyun * tables are usable while running on trampoline_pgd.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
166*4882a593Smuzhiyun trampoline_pgd[i] = init_top_pgt[i].pgd;
167*4882a593Smuzhiyun #endif
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun sme_sev_setup_real_mode(trampoline_header);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * reserve_real_mode() gets called very early, to guarantee the
174*4882a593Smuzhiyun * availability of low memory. This is before the proper kernel page
175*4882a593Smuzhiyun * tables are set up, so we cannot set page permissions in that
176*4882a593Smuzhiyun * function. Also trampoline code will be executed by APs so we
177*4882a593Smuzhiyun * need to mark it executable at do_pre_smp_initcalls() at least,
178*4882a593Smuzhiyun * thus run it as a early_initcall().
179*4882a593Smuzhiyun */
set_real_mode_permissions(void)180*4882a593Smuzhiyun static void __init set_real_mode_permissions(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun unsigned char *base = (unsigned char *) real_mode_header;
183*4882a593Smuzhiyun size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun size_t ro_size =
186*4882a593Smuzhiyun PAGE_ALIGN(real_mode_header->ro_end) -
187*4882a593Smuzhiyun __pa(base);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun size_t text_size =
190*4882a593Smuzhiyun PAGE_ALIGN(real_mode_header->ro_end) -
191*4882a593Smuzhiyun real_mode_header->text_start;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun unsigned long text_start =
194*4882a593Smuzhiyun (unsigned long) __va(real_mode_header->text_start);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
197*4882a593Smuzhiyun set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
198*4882a593Smuzhiyun set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
init_real_mode(void)201*4882a593Smuzhiyun static int __init init_real_mode(void)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun if (!real_mode_header)
204*4882a593Smuzhiyun panic("Real mode trampoline was not allocated");
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun setup_real_mode();
207*4882a593Smuzhiyun set_real_mode_permissions();
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun early_initcall(init_real_mode);
212