1/* SPDX-License-Identifier: GPL-2.0-only */
2.text
3#include <linux/linkage.h>
4#include <linux/objtool.h>
5#include <asm/segment.h>
6#include <asm/pgtable_types.h>
7#include <asm/page_types.h>
8#include <asm/msr.h>
9#include <asm/asm-offsets.h>
10#include <asm/frame.h>
11#include <asm/nospec-branch.h>
12
13# Copyright 2003 Pavel Machek <pavel@suse.cz
14
15.code64
16 /*
17 * Hooray, we are in Long 64-bit mode (but still running in low memory)
18 */
19SYM_FUNC_START(wakeup_long64)
20 movq saved_magic(%rip), %rax
21 movq $0x123456789abcdef0, %rdx
22 cmpq %rdx, %rax
23 je 2f
24
25 /* stop here on a saved_magic mismatch */
26 movq $0xbad6d61676963, %rcx
271:
28 jmp 1b
292:
30 movw $__KERNEL_DS, %ax
31 movw %ax, %ss
32 movw %ax, %ds
33 movw %ax, %es
34 movw %ax, %fs
35 movw %ax, %gs
36 movq saved_rsp(%rip), %rsp
37
38 movq saved_rbx(%rip), %rbx
39 movq saved_rdi(%rip), %rdi
40 movq saved_rsi(%rip), %rsi
41 movq saved_rbp(%rip), %rbp
42
43 movq saved_rip(%rip), %rax
44 ANNOTATE_RETPOLINE_SAFE
45 jmp *%rax
46SYM_FUNC_END(wakeup_long64)
47
48SYM_FUNC_START(do_suspend_lowlevel)
49 FRAME_BEGIN
50 subq $8, %rsp
51 xorl %eax, %eax
52 call save_processor_state
53
54 movq $saved_context, %rax
55 movq %rsp, pt_regs_sp(%rax)
56 movq %rbp, pt_regs_bp(%rax)
57 movq %rsi, pt_regs_si(%rax)
58 movq %rdi, pt_regs_di(%rax)
59 movq %rbx, pt_regs_bx(%rax)
60 movq %rcx, pt_regs_cx(%rax)
61 movq %rdx, pt_regs_dx(%rax)
62 movq %r8, pt_regs_r8(%rax)
63 movq %r9, pt_regs_r9(%rax)
64 movq %r10, pt_regs_r10(%rax)
65 movq %r11, pt_regs_r11(%rax)
66 movq %r12, pt_regs_r12(%rax)
67 movq %r13, pt_regs_r13(%rax)
68 movq %r14, pt_regs_r14(%rax)
69 movq %r15, pt_regs_r15(%rax)
70 pushfq
71 popq pt_regs_flags(%rax)
72
73 movq $.Lresume_point, saved_rip(%rip)
74
75 movq %rsp, saved_rsp(%rip)
76 movq %rbp, saved_rbp(%rip)
77 movq %rbx, saved_rbx(%rip)
78 movq %rdi, saved_rdi(%rip)
79 movq %rsi, saved_rsi(%rip)
80
81 addq $8, %rsp
82 movl $3, %edi
83 xorl %eax, %eax
84 call x86_acpi_enter_sleep_state
85 /* in case something went wrong, restore the machine status and go on */
86 jmp .Lresume_point
87
88 .align 4
89.Lresume_point:
90 /* We don't restore %rax, it must be 0 anyway */
91 movq $saved_context, %rax
92 movq saved_context_cr4(%rax), %rbx
93 movq %rbx, %cr4
94 movq saved_context_cr3(%rax), %rbx
95 movq %rbx, %cr3
96 movq saved_context_cr2(%rax), %rbx
97 movq %rbx, %cr2
98 movq saved_context_cr0(%rax), %rbx
99 movq %rbx, %cr0
100 pushq pt_regs_flags(%rax)
101 popfq
102 movq pt_regs_sp(%rax), %rsp
103 movq pt_regs_bp(%rax), %rbp
104 movq pt_regs_si(%rax), %rsi
105 movq pt_regs_di(%rax), %rdi
106 movq pt_regs_bx(%rax), %rbx
107 movq pt_regs_cx(%rax), %rcx
108 movq pt_regs_dx(%rax), %rdx
109 movq pt_regs_r8(%rax), %r8
110 movq pt_regs_r9(%rax), %r9
111 movq pt_regs_r10(%rax), %r10
112 movq pt_regs_r11(%rax), %r11
113 movq pt_regs_r12(%rax), %r12
114 movq pt_regs_r13(%rax), %r13
115 movq pt_regs_r14(%rax), %r14
116 movq pt_regs_r15(%rax), %r15
117
118#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
119 /*
120 * The suspend path may have poisoned some areas deeper in the stack,
121 * which we now need to unpoison.
122 */
123 movq %rsp, %rdi
124 call kasan_unpoison_task_stack_below
125#endif
126
127 xorl %eax, %eax
128 addq $8, %rsp
129 FRAME_END
130 jmp restore_processor_state
131SYM_FUNC_END(do_suspend_lowlevel)
132STACK_FRAME_NON_STANDARD do_suspend_lowlevel
133
134.data
135saved_rbp: .quad 0
136saved_rsi: .quad 0
137saved_rdi: .quad 0
138saved_rbx: .quad 0
139
140saved_rip: .quad 0
141saved_rsp: .quad 0
142
143SYM_DATA(saved_magic, .quad 0)
144

source code of linux/arch/x86/kernel/acpi/wakeup_64.S