1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Suspend support specific for i386/x86-64.
4 *
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10 #include <linux/suspend.h>
11 #include <linux/export.h>
12 #include <linux/smp.h>
13 #include <linux/perf_event.h>
14 #include <linux/tboot.h>
15 #include <linux/dmi.h>
16 #include <linux/pgtable.h>
17
18 #include <asm/proto.h>
19 #include <asm/mtrr.h>
20 #include <asm/page.h>
21 #include <asm/mce.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
25 #include <asm/cpu.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cpu_device_id.h>
28 #include <asm/microcode.h>
29
30 #ifdef CONFIG_X86_32
31 __visible unsigned long saved_context_ebx;
32 __visible unsigned long saved_context_esp, saved_context_ebp;
33 __visible unsigned long saved_context_esi, saved_context_edi;
34 __visible unsigned long saved_context_eflags;
35 #endif
36 struct saved_context saved_context;
37
msr_save_context(struct saved_context * ctxt)38 static void msr_save_context(struct saved_context *ctxt)
39 {
40 struct saved_msr *msr = ctxt->saved_msrs.array;
41 struct saved_msr *end = msr + ctxt->saved_msrs.num;
42
43 while (msr < end) {
44 if (msr->valid)
45 rdmsrl(msr->info.msr_no, msr->info.reg.q);
46 msr++;
47 }
48 }
49
msr_restore_context(struct saved_context * ctxt)50 static void msr_restore_context(struct saved_context *ctxt)
51 {
52 struct saved_msr *msr = ctxt->saved_msrs.array;
53 struct saved_msr *end = msr + ctxt->saved_msrs.num;
54
55 while (msr < end) {
56 if (msr->valid)
57 wrmsrl(msr->info.msr_no, msr->info.reg.q);
58 msr++;
59 }
60 }
61
62 /**
63 * __save_processor_state - save CPU registers before creating a
64 * hibernation image and before restoring the memory state from it
65 * @ctxt - structure to store the registers contents in
66 *
67 * NOTE: If there is a CPU register the modification of which by the
68 * boot kernel (ie. the kernel used for loading the hibernation image)
69 * might affect the operations of the restored target kernel (ie. the one
70 * saved in the hibernation image), then its contents must be saved by this
71 * function. In other words, if kernel A is hibernated and different
72 * kernel B is used for loading the hibernation image into memory, the
73 * kernel A's __save_processor_state() function must save all registers
74 * needed by kernel A, so that it can operate correctly after the resume
75 * regardless of what kernel B does in the meantime.
76 */
__save_processor_state(struct saved_context * ctxt)77 static void __save_processor_state(struct saved_context *ctxt)
78 {
79 #ifdef CONFIG_X86_32
80 mtrr_save_fixed_ranges(NULL);
81 #endif
82 kernel_fpu_begin();
83
84 /*
85 * descriptor tables
86 */
87 store_idt(&ctxt->idt);
88
89 /*
90 * We save it here, but restore it only in the hibernate case.
91 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
92 * mode in "secondary_startup_64". In 32-bit mode it is done via
93 * 'pmode_gdt' in wakeup_start.
94 */
95 ctxt->gdt_desc.size = GDT_SIZE - 1;
96 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
97
98 store_tr(ctxt->tr);
99
100 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
101 /*
102 * segment registers
103 */
104 #ifdef CONFIG_X86_32_LAZY_GS
105 savesegment(gs, ctxt->gs);
106 #endif
107 #ifdef CONFIG_X86_64
108 savesegment(gs, ctxt->gs);
109 savesegment(fs, ctxt->fs);
110 savesegment(ds, ctxt->ds);
111 savesegment(es, ctxt->es);
112
113 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
114 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
115 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
116 mtrr_save_fixed_ranges(NULL);
117
118 rdmsrl(MSR_EFER, ctxt->efer);
119 #endif
120
121 /*
122 * control registers
123 */
124 ctxt->cr0 = read_cr0();
125 ctxt->cr2 = read_cr2();
126 ctxt->cr3 = __read_cr3();
127 ctxt->cr4 = __read_cr4();
128 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
129 &ctxt->misc_enable);
130 msr_save_context(ctxt);
131 }
132
133 /* Needed by apm.c */
save_processor_state(void)134 void save_processor_state(void)
135 {
136 __save_processor_state(&saved_context);
137 x86_platform.save_sched_clock_state();
138 }
139 #ifdef CONFIG_X86_32
140 EXPORT_SYMBOL(save_processor_state);
141 #endif
142
do_fpu_end(void)143 static void do_fpu_end(void)
144 {
145 /*
146 * Restore FPU regs if necessary.
147 */
148 kernel_fpu_end();
149 }
150
fix_processor_context(void)151 static void fix_processor_context(void)
152 {
153 int cpu = smp_processor_id();
154 #ifdef CONFIG_X86_64
155 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
156 tss_desc tss;
157 #endif
158
159 /*
160 * We need to reload TR, which requires that we change the
161 * GDT entry to indicate "available" first.
162 *
163 * XXX: This could probably all be replaced by a call to
164 * force_reload_TR().
165 */
166 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
167
168 #ifdef CONFIG_X86_64
169 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
170 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
171 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
172
173 syscall_init(); /* This sets MSR_*STAR and related */
174 #else
175 if (boot_cpu_has(X86_FEATURE_SEP))
176 enable_sep_cpu();
177 #endif
178 load_TR_desc(); /* This does ltr */
179 load_mm_ldt(current->active_mm); /* This does lldt */
180 initialize_tlbstate_and_flush();
181
182 fpu__resume_cpu();
183
184 /* The processor is back on the direct GDT, load back the fixmap */
185 load_fixmap_gdt(cpu);
186 }
187
188 /**
189 * __restore_processor_state - restore the contents of CPU registers saved
190 * by __save_processor_state()
191 * @ctxt - structure to load the registers contents from
192 *
193 * The asm code that gets us here will have restored a usable GDT, although
194 * it will be pointing to the wrong alias.
195 */
__restore_processor_state(struct saved_context * ctxt)196 static void notrace __restore_processor_state(struct saved_context *ctxt)
197 {
198 struct cpuinfo_x86 *c;
199
200 if (ctxt->misc_enable_saved)
201 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
202 /*
203 * control registers
204 */
205 /* cr4 was introduced in the Pentium CPU */
206 #ifdef CONFIG_X86_32
207 if (ctxt->cr4)
208 __write_cr4(ctxt->cr4);
209 #else
210 /* CONFIG X86_64 */
211 wrmsrl(MSR_EFER, ctxt->efer);
212 __write_cr4(ctxt->cr4);
213 #endif
214 write_cr3(ctxt->cr3);
215 write_cr2(ctxt->cr2);
216 write_cr0(ctxt->cr0);
217
218 /* Restore the IDT. */
219 load_idt(&ctxt->idt);
220
221 /*
222 * Just in case the asm code got us here with the SS, DS, or ES
223 * out of sync with the GDT, update them.
224 */
225 loadsegment(ss, __KERNEL_DS);
226 loadsegment(ds, __USER_DS);
227 loadsegment(es, __USER_DS);
228
229 /*
230 * Restore percpu access. Percpu access can happen in exception
231 * handlers or in complicated helpers like load_gs_index().
232 */
233 #ifdef CONFIG_X86_64
234 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
235 #else
236 loadsegment(fs, __KERNEL_PERCPU);
237 loadsegment(gs, __KERNEL_STACK_CANARY);
238 #endif
239
240 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
241 fix_processor_context();
242
243 /*
244 * Now that we have descriptor tables fully restored and working
245 * exception handling, restore the usermode segments.
246 */
247 #ifdef CONFIG_X86_64
248 loadsegment(ds, ctxt->es);
249 loadsegment(es, ctxt->es);
250 loadsegment(fs, ctxt->fs);
251 load_gs_index(ctxt->gs);
252
253 /*
254 * Restore FSBASE and GSBASE after restoring the selectors, since
255 * restoring the selectors clobbers the bases. Keep in mind
256 * that MSR_KERNEL_GS_BASE is horribly misnamed.
257 */
258 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
259 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
260 #elif defined(CONFIG_X86_32_LAZY_GS)
261 loadsegment(gs, ctxt->gs);
262 #endif
263
264 do_fpu_end();
265 tsc_verify_tsc_adjust(true);
266 x86_platform.restore_sched_clock_state();
267 mtrr_bp_restore();
268 perf_restore_debug_store();
269
270 c = &cpu_data(smp_processor_id());
271 if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
272 init_ia32_feat_ctl(c);
273
274 microcode_bsp_resume();
275
276 /*
277 * This needs to happen after the microcode has been updated upon resume
278 * because some of the MSRs are "emulated" in microcode.
279 */
280 msr_restore_context(ctxt);
281 }
282
283 /* Needed by apm.c */
restore_processor_state(void)284 void notrace restore_processor_state(void)
285 {
286 #ifdef __clang__
287 // The following code snippet is copied from __restore_processor_state.
288 // Its purpose is to prepare GS segment before the function is called.
289 // Since the function is compiled with SCS on, it will use GS at its
290 // entry.
291 // TODO: Hack to be removed later when compiler bug is fixed.
292 #ifdef CONFIG_X86_64
293 wrmsrl(MSR_GS_BASE, saved_context.kernelmode_gs_base);
294 #else
295 loadsegment(fs, __KERNEL_PERCPU);
296 loadsegment(gs, __KERNEL_STACK_CANARY);
297 #endif
298 #endif
299 __restore_processor_state(&saved_context);
300 }
301 #ifdef CONFIG_X86_32
302 EXPORT_SYMBOL(restore_processor_state);
303 #endif
304
305 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
resume_play_dead(void)306 static void resume_play_dead(void)
307 {
308 play_dead_common();
309 tboot_shutdown(TB_SHUTDOWN_WFS);
310 hlt_play_dead();
311 }
312
hibernate_resume_nonboot_cpu_disable(void)313 int hibernate_resume_nonboot_cpu_disable(void)
314 {
315 void (*play_dead)(void) = smp_ops.play_dead;
316 int ret;
317
318 /*
319 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
320 * during hibernate image restoration, because it is likely that the
321 * monitored address will be actually written to at that time and then
322 * the "dead" CPU will attempt to execute instructions again, but the
323 * address in its instruction pointer may not be possible to resolve
324 * any more at that point (the page tables used by it previously may
325 * have been overwritten by hibernate image data).
326 *
327 * First, make sure that we wake up all the potentially disabled SMT
328 * threads which have been initially brought up and then put into
329 * mwait/cpuidle sleep.
330 * Those will be put to proper (not interfering with hibernation
331 * resume) sleep afterwards, and the resumed kernel will decide itself
332 * what to do with them.
333 */
334 ret = cpuhp_smt_enable();
335 if (ret)
336 return ret;
337 smp_ops.play_dead = resume_play_dead;
338 ret = freeze_secondary_cpus(0);
339 smp_ops.play_dead = play_dead;
340 return ret;
341 }
342 #endif
343
344 /*
345 * When bsp_check() is called in hibernate and suspend, cpu hotplug
346 * is disabled already. So it's unnessary to handle race condition between
347 * cpumask query and cpu hotplug.
348 */
bsp_check(void)349 static int bsp_check(void)
350 {
351 if (cpumask_first(cpu_online_mask) != 0) {
352 pr_warn("CPU0 is offline.\n");
353 return -ENODEV;
354 }
355
356 return 0;
357 }
358
bsp_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)359 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
360 void *ptr)
361 {
362 int ret = 0;
363
364 switch (action) {
365 case PM_SUSPEND_PREPARE:
366 case PM_HIBERNATION_PREPARE:
367 ret = bsp_check();
368 break;
369 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
370 case PM_RESTORE_PREPARE:
371 /*
372 * When system resumes from hibernation, online CPU0 because
373 * 1. it's required for resume and
374 * 2. the CPU was online before hibernation
375 */
376 if (!cpu_online(0))
377 _debug_hotplug_cpu(0, 1);
378 break;
379 case PM_POST_RESTORE:
380 /*
381 * When a resume really happens, this code won't be called.
382 *
383 * This code is called only when user space hibernation software
384 * prepares for snapshot device during boot time. So we just
385 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
386 * preparing the snapshot device.
387 *
388 * This works for normal boot case in our CPU0 hotplug debug
389 * mode, i.e. CPU0 is offline and user mode hibernation
390 * software initializes during boot time.
391 *
392 * If CPU0 is online and user application accesses snapshot
393 * device after boot time, this will offline CPU0 and user may
394 * see different CPU0 state before and after accessing
395 * the snapshot device. But hopefully this is not a case when
396 * user debugging CPU0 hotplug. Even if users hit this case,
397 * they can easily online CPU0 back.
398 *
399 * To simplify this debug code, we only consider normal boot
400 * case. Otherwise we need to remember CPU0's state and restore
401 * to that state and resolve racy conditions etc.
402 */
403 _debug_hotplug_cpu(0, 0);
404 break;
405 #endif
406 default:
407 break;
408 }
409 return notifier_from_errno(ret);
410 }
411
bsp_pm_check_init(void)412 static int __init bsp_pm_check_init(void)
413 {
414 /*
415 * Set this bsp_pm_callback as lower priority than
416 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
417 * earlier to disable cpu hotplug before bsp online check.
418 */
419 pm_notifier(bsp_pm_callback, -INT_MAX);
420 return 0;
421 }
422
423 core_initcall(bsp_pm_check_init);
424
msr_build_context(const u32 * msr_id,const int num)425 static int msr_build_context(const u32 *msr_id, const int num)
426 {
427 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
428 struct saved_msr *msr_array;
429 int total_num;
430 int i, j;
431
432 total_num = saved_msrs->num + num;
433
434 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
435 if (!msr_array) {
436 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
437 return -ENOMEM;
438 }
439
440 if (saved_msrs->array) {
441 /*
442 * Multiple callbacks can invoke this function, so copy any
443 * MSR save requests from previous invocations.
444 */
445 memcpy(msr_array, saved_msrs->array,
446 sizeof(struct saved_msr) * saved_msrs->num);
447
448 kfree(saved_msrs->array);
449 }
450
451 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
452 u64 dummy;
453
454 msr_array[i].info.msr_no = msr_id[j];
455 msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
456 msr_array[i].info.reg.q = 0;
457 }
458 saved_msrs->num = total_num;
459 saved_msrs->array = msr_array;
460
461 return 0;
462 }
463
464 /*
465 * The following sections are a quirk framework for problematic BIOSen:
466 * Sometimes MSRs are modified by the BIOSen after suspended to
467 * RAM, this might cause unexpected behavior after wakeup.
468 * Thus we save/restore these specified MSRs across suspend/resume
469 * in order to work around it.
470 *
471 * For any further problematic BIOSen/platforms,
472 * please add your own function similar to msr_initialize_bdw.
473 */
msr_initialize_bdw(const struct dmi_system_id * d)474 static int msr_initialize_bdw(const struct dmi_system_id *d)
475 {
476 /* Add any extra MSR ids into this array. */
477 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
478
479 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
480 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
481 }
482
483 static const struct dmi_system_id msr_save_dmi_table[] = {
484 {
485 .callback = msr_initialize_bdw,
486 .ident = "BROADWELL BDX_EP",
487 .matches = {
488 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
489 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
490 },
491 },
492 {}
493 };
494
msr_save_cpuid_features(const struct x86_cpu_id * c)495 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
496 {
497 u32 cpuid_msr_id[] = {
498 MSR_AMD64_CPUID_FN_1,
499 };
500
501 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
502 c->family);
503
504 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
505 }
506
507 static const struct x86_cpu_id msr_save_cpu_table[] = {
508 X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
509 X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
510 {}
511 };
512
513 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
pm_cpu_check(const struct x86_cpu_id * c)514 static int pm_cpu_check(const struct x86_cpu_id *c)
515 {
516 const struct x86_cpu_id *m;
517 int ret = 0;
518
519 m = x86_match_cpu(msr_save_cpu_table);
520 if (m) {
521 pm_cpu_match_t fn;
522
523 fn = (pm_cpu_match_t)m->driver_data;
524 ret = fn(m);
525 }
526
527 return ret;
528 }
529
pm_save_spec_msr(void)530 static void pm_save_spec_msr(void)
531 {
532 struct msr_enumeration {
533 u32 msr_no;
534 u32 feature;
535 } msr_enum[] = {
536 { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
537 { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
538 { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
539 { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
540 { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
541 { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
542 };
543 int i;
544
545 for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
546 if (boot_cpu_has(msr_enum[i].feature))
547 msr_build_context(&msr_enum[i].msr_no, 1);
548 }
549 }
550
pm_check_save_msr(void)551 static int pm_check_save_msr(void)
552 {
553 dmi_check_system(msr_save_dmi_table);
554 pm_cpu_check(msr_save_cpu_table);
555 pm_save_spec_msr();
556
557 return 0;
558 }
559
560 device_initcall(pm_check_save_msr);
561