1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Suspend support specific for i386/x86-64. |
4 | * |
5 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> |
6 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> |
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
8 | */ |
9 | |
10 | #include <linux/suspend.h> |
11 | #include <linux/export.h> |
12 | #include <linux/smp.h> |
13 | #include <linux/perf_event.h> |
14 | #include <linux/tboot.h> |
15 | #include <linux/dmi.h> |
16 | #include <linux/pgtable.h> |
17 | |
18 | #include <asm/proto.h> |
19 | #include <asm/mtrr.h> |
20 | #include <asm/page.h> |
21 | #include <asm/mce.h> |
22 | #include <asm/suspend.h> |
23 | #include <asm/fpu/api.h> |
24 | #include <asm/debugreg.h> |
25 | #include <asm/cpu.h> |
26 | #include <asm/cacheinfo.h> |
27 | #include <asm/mmu_context.h> |
28 | #include <asm/cpu_device_id.h> |
29 | #include <asm/microcode.h> |
30 | |
31 | #ifdef CONFIG_X86_32 |
32 | __visible unsigned long saved_context_ebx; |
33 | __visible unsigned long saved_context_esp, saved_context_ebp; |
34 | __visible unsigned long saved_context_esi, saved_context_edi; |
35 | __visible unsigned long saved_context_eflags; |
36 | #endif |
37 | struct saved_context saved_context; |
38 | |
39 | static void msr_save_context(struct saved_context *ctxt) |
40 | { |
41 | struct saved_msr *msr = ctxt->saved_msrs.array; |
42 | struct saved_msr *end = msr + ctxt->saved_msrs.num; |
43 | |
44 | while (msr < end) { |
45 | if (msr->valid) |
46 | rdmsrl(msr->info.msr_no, msr->info.reg.q); |
47 | msr++; |
48 | } |
49 | } |
50 | |
51 | static void msr_restore_context(struct saved_context *ctxt) |
52 | { |
53 | struct saved_msr *msr = ctxt->saved_msrs.array; |
54 | struct saved_msr *end = msr + ctxt->saved_msrs.num; |
55 | |
56 | while (msr < end) { |
57 | if (msr->valid) |
58 | wrmsrl(msr: msr->info.msr_no, val: msr->info.reg.q); |
59 | msr++; |
60 | } |
61 | } |
62 | |
63 | /** |
64 | * __save_processor_state() - Save CPU registers before creating a |
65 | * hibernation image and before restoring |
66 | * the memory state from it |
67 | * @ctxt: Structure to store the registers contents in. |
68 | * |
69 | * NOTE: If there is a CPU register the modification of which by the |
70 | * boot kernel (ie. the kernel used for loading the hibernation image) |
71 | * might affect the operations of the restored target kernel (ie. the one |
72 | * saved in the hibernation image), then its contents must be saved by this |
73 | * function. In other words, if kernel A is hibernated and different |
74 | * kernel B is used for loading the hibernation image into memory, the |
75 | * kernel A's __save_processor_state() function must save all registers |
76 | * needed by kernel A, so that it can operate correctly after the resume |
77 | * regardless of what kernel B does in the meantime. |
78 | */ |
79 | static void __save_processor_state(struct saved_context *ctxt) |
80 | { |
81 | #ifdef CONFIG_X86_32 |
82 | mtrr_save_fixed_ranges(NULL); |
83 | #endif |
84 | kernel_fpu_begin(); |
85 | |
86 | /* |
87 | * descriptor tables |
88 | */ |
89 | store_idt(dtr: &ctxt->idt); |
90 | |
91 | /* |
92 | * We save it here, but restore it only in the hibernate case. |
93 | * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit |
94 | * mode in "secondary_startup_64". In 32-bit mode it is done via |
95 | * 'pmode_gdt' in wakeup_start. |
96 | */ |
97 | ctxt->gdt_desc.size = GDT_SIZE - 1; |
98 | ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); |
99 | |
100 | store_tr(ctxt->tr); |
101 | |
102 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ |
103 | /* |
104 | * segment registers |
105 | */ |
106 | savesegment(gs, ctxt->gs); |
107 | #ifdef CONFIG_X86_64 |
108 | savesegment(fs, ctxt->fs); |
109 | savesegment(ds, ctxt->ds); |
110 | savesegment(es, ctxt->es); |
111 | |
112 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); |
113 | rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); |
114 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
115 | mtrr_save_fixed_ranges(NULL); |
116 | |
117 | rdmsrl(MSR_EFER, ctxt->efer); |
118 | #endif |
119 | |
120 | /* |
121 | * control registers |
122 | */ |
123 | ctxt->cr0 = read_cr0(); |
124 | ctxt->cr2 = read_cr2(); |
125 | ctxt->cr3 = __read_cr3(); |
126 | ctxt->cr4 = __read_cr4(); |
127 | ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, |
128 | p: &ctxt->misc_enable); |
129 | msr_save_context(ctxt); |
130 | } |
131 | |
132 | /* Needed by apm.c */ |
133 | void save_processor_state(void) |
134 | { |
135 | __save_processor_state(ctxt: &saved_context); |
136 | x86_platform.save_sched_clock_state(); |
137 | } |
138 | #ifdef CONFIG_X86_32 |
139 | EXPORT_SYMBOL(save_processor_state); |
140 | #endif |
141 | |
142 | static void do_fpu_end(void) |
143 | { |
144 | /* |
145 | * Restore FPU regs if necessary. |
146 | */ |
147 | kernel_fpu_end(); |
148 | } |
149 | |
150 | static void fix_processor_context(void) |
151 | { |
152 | int cpu = smp_processor_id(); |
153 | #ifdef CONFIG_X86_64 |
154 | struct desc_struct *desc = get_cpu_gdt_rw(cpu); |
155 | tss_desc tss; |
156 | #endif |
157 | |
158 | /* |
159 | * We need to reload TR, which requires that we change the |
160 | * GDT entry to indicate "available" first. |
161 | * |
162 | * XXX: This could probably all be replaced by a call to |
163 | * force_reload_TR(). |
164 | */ |
165 | set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
166 | |
167 | #ifdef CONFIG_X86_64 |
168 | memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); |
169 | tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ |
170 | write_gdt_entry(dt: desc, GDT_ENTRY_TSS, desc: &tss, type: DESC_TSS); |
171 | |
172 | syscall_init(); /* This sets MSR_*STAR and related */ |
173 | #else |
174 | if (boot_cpu_has(X86_FEATURE_SEP)) |
175 | enable_sep_cpu(); |
176 | #endif |
177 | load_TR_desc(); /* This does ltr */ |
178 | load_mm_ldt(current->active_mm); /* This does lldt */ |
179 | initialize_tlbstate_and_flush(); |
180 | |
181 | fpu__resume_cpu(); |
182 | |
183 | /* The processor is back on the direct GDT, load back the fixmap */ |
184 | load_fixmap_gdt(cpu); |
185 | } |
186 | |
187 | /** |
188 | * __restore_processor_state() - Restore the contents of CPU registers saved |
189 | * by __save_processor_state() |
190 | * @ctxt: Structure to load the registers contents from. |
191 | * |
192 | * The asm code that gets us here will have restored a usable GDT, although |
193 | * it will be pointing to the wrong alias. |
194 | */ |
195 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
196 | { |
197 | struct cpuinfo_x86 *c; |
198 | |
199 | if (ctxt->misc_enable_saved) |
200 | wrmsrl(MSR_IA32_MISC_ENABLE, val: ctxt->misc_enable); |
201 | /* |
202 | * control registers |
203 | */ |
204 | /* cr4 was introduced in the Pentium CPU */ |
205 | #ifdef CONFIG_X86_32 |
206 | if (ctxt->cr4) |
207 | __write_cr4(ctxt->cr4); |
208 | #else |
209 | /* CONFIG X86_64 */ |
210 | wrmsrl(MSR_EFER, val: ctxt->efer); |
211 | __write_cr4(x: ctxt->cr4); |
212 | #endif |
213 | write_cr3(x: ctxt->cr3); |
214 | write_cr2(x: ctxt->cr2); |
215 | write_cr0(x: ctxt->cr0); |
216 | |
217 | /* Restore the IDT. */ |
218 | load_idt(dtr: &ctxt->idt); |
219 | |
220 | /* |
221 | * Just in case the asm code got us here with the SS, DS, or ES |
222 | * out of sync with the GDT, update them. |
223 | */ |
224 | loadsegment(ss, __KERNEL_DS); |
225 | loadsegment(ds, __USER_DS); |
226 | loadsegment(es, __USER_DS); |
227 | |
228 | /* |
229 | * Restore percpu access. Percpu access can happen in exception |
230 | * handlers or in complicated helpers like load_gs_index(). |
231 | */ |
232 | #ifdef CONFIG_X86_64 |
233 | wrmsrl(MSR_GS_BASE, val: ctxt->kernelmode_gs_base); |
234 | #else |
235 | loadsegment(fs, __KERNEL_PERCPU); |
236 | #endif |
237 | |
238 | /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ |
239 | fix_processor_context(); |
240 | |
241 | /* |
242 | * Now that we have descriptor tables fully restored and working |
243 | * exception handling, restore the usermode segments. |
244 | */ |
245 | #ifdef CONFIG_X86_64 |
246 | loadsegment(ds, ctxt->es); |
247 | loadsegment(es, ctxt->es); |
248 | loadsegment(fs, ctxt->fs); |
249 | load_gs_index(gs: ctxt->gs); |
250 | |
251 | /* |
252 | * Restore FSBASE and GSBASE after restoring the selectors, since |
253 | * restoring the selectors clobbers the bases. Keep in mind |
254 | * that MSR_KERNEL_GS_BASE is horribly misnamed. |
255 | */ |
256 | wrmsrl(MSR_FS_BASE, val: ctxt->fs_base); |
257 | wrmsrl(MSR_KERNEL_GS_BASE, val: ctxt->usermode_gs_base); |
258 | #else |
259 | loadsegment(gs, ctxt->gs); |
260 | #endif |
261 | |
262 | do_fpu_end(); |
263 | tsc_verify_tsc_adjust(resume: true); |
264 | x86_platform.restore_sched_clock_state(); |
265 | cache_bp_restore(); |
266 | perf_restore_debug_store(); |
267 | |
268 | c = &cpu_data(smp_processor_id()); |
269 | if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) |
270 | init_ia32_feat_ctl(c); |
271 | |
272 | microcode_bsp_resume(); |
273 | |
274 | /* |
275 | * This needs to happen after the microcode has been updated upon resume |
276 | * because some of the MSRs are "emulated" in microcode. |
277 | */ |
278 | msr_restore_context(ctxt); |
279 | } |
280 | |
281 | /* Needed by apm.c */ |
282 | void notrace restore_processor_state(void) |
283 | { |
284 | __restore_processor_state(ctxt: &saved_context); |
285 | } |
286 | #ifdef CONFIG_X86_32 |
287 | EXPORT_SYMBOL(restore_processor_state); |
288 | #endif |
289 | |
290 | #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) |
291 | static void __noreturn resume_play_dead(void) |
292 | { |
293 | play_dead_common(); |
294 | tboot_shutdown(shutdown_type: TB_SHUTDOWN_WFS); |
295 | hlt_play_dead(); |
296 | } |
297 | |
298 | int hibernate_resume_nonboot_cpu_disable(void) |
299 | { |
300 | void (*play_dead)(void) = smp_ops.play_dead; |
301 | int ret; |
302 | |
303 | /* |
304 | * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop |
305 | * during hibernate image restoration, because it is likely that the |
306 | * monitored address will be actually written to at that time and then |
307 | * the "dead" CPU will attempt to execute instructions again, but the |
308 | * address in its instruction pointer may not be possible to resolve |
309 | * any more at that point (the page tables used by it previously may |
310 | * have been overwritten by hibernate image data). |
311 | * |
312 | * First, make sure that we wake up all the potentially disabled SMT |
313 | * threads which have been initially brought up and then put into |
314 | * mwait/cpuidle sleep. |
315 | * Those will be put to proper (not interfering with hibernation |
316 | * resume) sleep afterwards, and the resumed kernel will decide itself |
317 | * what to do with them. |
318 | */ |
319 | ret = cpuhp_smt_enable(); |
320 | if (ret) |
321 | return ret; |
322 | smp_ops.play_dead = resume_play_dead; |
323 | ret = freeze_secondary_cpus(primary: 0); |
324 | smp_ops.play_dead = play_dead; |
325 | return ret; |
326 | } |
327 | #endif |
328 | |
329 | /* |
330 | * When bsp_check() is called in hibernate and suspend, cpu hotplug |
331 | * is disabled already. So it's unnecessary to handle race condition between |
332 | * cpumask query and cpu hotplug. |
333 | */ |
334 | static int bsp_check(void) |
335 | { |
336 | if (cpumask_first(cpu_online_mask) != 0) { |
337 | pr_warn("CPU0 is offline.\n" ); |
338 | return -ENODEV; |
339 | } |
340 | |
341 | return 0; |
342 | } |
343 | |
344 | static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, |
345 | void *ptr) |
346 | { |
347 | int ret = 0; |
348 | |
349 | switch (action) { |
350 | case PM_SUSPEND_PREPARE: |
351 | case PM_HIBERNATION_PREPARE: |
352 | ret = bsp_check(); |
353 | break; |
354 | default: |
355 | break; |
356 | } |
357 | return notifier_from_errno(err: ret); |
358 | } |
359 | |
360 | static int __init bsp_pm_check_init(void) |
361 | { |
362 | /* |
363 | * Set this bsp_pm_callback as lower priority than |
364 | * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called |
365 | * earlier to disable cpu hotplug before bsp online check. |
366 | */ |
367 | pm_notifier(bsp_pm_callback, -INT_MAX); |
368 | return 0; |
369 | } |
370 | |
371 | core_initcall(bsp_pm_check_init); |
372 | |
373 | static int msr_build_context(const u32 *msr_id, const int num) |
374 | { |
375 | struct saved_msrs *saved_msrs = &saved_context.saved_msrs; |
376 | struct saved_msr *msr_array; |
377 | int total_num; |
378 | int i, j; |
379 | |
380 | total_num = saved_msrs->num + num; |
381 | |
382 | msr_array = kmalloc_array(n: total_num, size: sizeof(struct saved_msr), GFP_KERNEL); |
383 | if (!msr_array) { |
384 | pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n" ); |
385 | return -ENOMEM; |
386 | } |
387 | |
388 | if (saved_msrs->array) { |
389 | /* |
390 | * Multiple callbacks can invoke this function, so copy any |
391 | * MSR save requests from previous invocations. |
392 | */ |
393 | memcpy(msr_array, saved_msrs->array, |
394 | sizeof(struct saved_msr) * saved_msrs->num); |
395 | |
396 | kfree(objp: saved_msrs->array); |
397 | } |
398 | |
399 | for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { |
400 | u64 dummy; |
401 | |
402 | msr_array[i].info.msr_no = msr_id[j]; |
403 | msr_array[i].valid = !rdmsrl_safe(msr: msr_id[j], p: &dummy); |
404 | msr_array[i].info.reg.q = 0; |
405 | } |
406 | saved_msrs->num = total_num; |
407 | saved_msrs->array = msr_array; |
408 | |
409 | return 0; |
410 | } |
411 | |
412 | /* |
413 | * The following sections are a quirk framework for problematic BIOSen: |
414 | * Sometimes MSRs are modified by the BIOSen after suspended to |
415 | * RAM, this might cause unexpected behavior after wakeup. |
416 | * Thus we save/restore these specified MSRs across suspend/resume |
417 | * in order to work around it. |
418 | * |
419 | * For any further problematic BIOSen/platforms, |
420 | * please add your own function similar to msr_initialize_bdw. |
421 | */ |
422 | static int msr_initialize_bdw(const struct dmi_system_id *d) |
423 | { |
424 | /* Add any extra MSR ids into this array. */ |
425 | u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; |
426 | |
427 | pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n" , d->ident); |
428 | return msr_build_context(msr_id: bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); |
429 | } |
430 | |
431 | static const struct dmi_system_id msr_save_dmi_table[] = { |
432 | { |
433 | .callback = msr_initialize_bdw, |
434 | .ident = "BROADWELL BDX_EP" , |
435 | .matches = { |
436 | DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY" ), |
437 | DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400" ), |
438 | }, |
439 | }, |
440 | {} |
441 | }; |
442 | |
443 | static int msr_save_cpuid_features(const struct x86_cpu_id *c) |
444 | { |
445 | u32 cpuid_msr_id[] = { |
446 | MSR_AMD64_CPUID_FN_1, |
447 | }; |
448 | |
449 | pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n" , |
450 | c->family); |
451 | |
452 | return msr_build_context(msr_id: cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); |
453 | } |
454 | |
455 | static const struct x86_cpu_id msr_save_cpu_table[] = { |
456 | X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features), |
457 | X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features), |
458 | {} |
459 | }; |
460 | |
461 | typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); |
462 | static int pm_cpu_check(const struct x86_cpu_id *c) |
463 | { |
464 | const struct x86_cpu_id *m; |
465 | int ret = 0; |
466 | |
467 | m = x86_match_cpu(match: msr_save_cpu_table); |
468 | if (m) { |
469 | pm_cpu_match_t fn; |
470 | |
471 | fn = (pm_cpu_match_t)m->driver_data; |
472 | ret = fn(m); |
473 | } |
474 | |
475 | return ret; |
476 | } |
477 | |
478 | static void pm_save_spec_msr(void) |
479 | { |
480 | struct msr_enumeration { |
481 | u32 msr_no; |
482 | u32 feature; |
483 | } msr_enum[] = { |
484 | { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, |
485 | { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, |
486 | { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, |
487 | { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, |
488 | { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, |
489 | { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, |
490 | }; |
491 | int i; |
492 | |
493 | for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { |
494 | if (boot_cpu_has(msr_enum[i].feature)) |
495 | msr_build_context(msr_id: &msr_enum[i].msr_no, num: 1); |
496 | } |
497 | } |
498 | |
499 | static int pm_check_save_msr(void) |
500 | { |
501 | dmi_check_system(list: msr_save_dmi_table); |
502 | pm_cpu_check(c: msr_save_cpu_table); |
503 | pm_save_spec_msr(); |
504 | |
505 | return 0; |
506 | } |
507 | |
508 | device_initcall(pm_check_save_msr); |
509 | |