1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *
4 * Trampoline.S Derived from Setup.S by Linus Torvalds
5 *
6 * 4 Jan 1997 Michael Chastain: changed to gnu as.
7 * 15 Sept 2005 Eric Biederman: 64bit PIC support
8 *
9 * Entry: CS:IP point to the start of our code, we are
10 * in real mode with no stack, but the rest of the
11 * trampoline page to make our stack and everything else
12 * is a mystery.
13 *
14 * On entry to trampoline_start, the processor is in real mode
15 * with 16-bit addressing and 16-bit data. CS has some value
16 * and IP is zero. Thus, data addresses need to be absolute
17 * (no relocation) and are taken with regard to r_base.
18 *
19 * With the addition of trampoline_level4_pgt this code can
20 * now enter a 64bit kernel that lives at arbitrary 64bit
21 * physical addresses.
22 *
23 * If you work on this file, check the object module with objdump
24 * --full-contents --reloc to make sure there are no relocation
25 * entries.
26 */
27
28#include <linux/linkage.h>
29#include <asm/pgtable_types.h>
30#include <asm/page_types.h>
31#include <asm/msr.h>
32#include <asm/segment.h>
33#include <asm/processor-flags.h>
34#include <asm/realmode.h>
35#include "realmode.h"
36
37 .text
38 .code16
39
40.macro LOCK_AND_LOAD_REALMODE_ESP lock_pa=0 lock_rip=0
41 /*
42 * Make sure only one CPU fiddles with the realmode stack
43 */
44.Llock_rm\@:
45 .if \lock_pa
46 lock btsl $0, pa_tr_lock
47 .elseif \lock_rip
48 lock btsl $0, tr_lock(%rip)
49 .else
50 lock btsl $0, tr_lock
51 .endif
52 jnc 2f
53 pause
54 jmp .Llock_rm\@
552:
56 # Setup stack
57 movl $rm_stack_end, %esp
58.endm
59
60 .balign PAGE_SIZE
61SYM_CODE_START(trampoline_start)
62 cli # We should be safe anyway
63 wbinvd
64
65 LJMPW_RM(1f)
661:
67 mov %cs, %ax # Code and data in the same place
68 mov %ax, %ds
69 mov %ax, %es
70 mov %ax, %ss
71
72 LOCK_AND_LOAD_REALMODE_ESP
73
74 call verify_cpu # Verify the cpu supports long mode
75 testl %eax, %eax # Check for return code
76 jnz no_longmode
77
78.Lswitch_to_protected:
79 /*
80 * GDT tables in non default location kernel can be beyond 16MB and
81 * lgdt will not be able to load the address as in real mode default
82 * operand size is 16bit. Use lgdtl instead to force operand size
83 * to 32 bit.
84 */
85
86 lidtl tr_idt # load idt with 0, 0
87 lgdtl tr_gdt # load gdt with whatever is appropriate
88
89 movw $__KERNEL_DS, %dx # Data segment descriptor
90
91 # Enable protected mode
92 movl $(CR0_STATE & ~X86_CR0_PG), %eax
93 movl %eax, %cr0 # into protected mode
94
95 # flush prefetch and jump to startup_32
96 ljmpl $__KERNEL32_CS, $pa_startup_32
97
98no_longmode:
99 hlt
100 jmp no_longmode
101SYM_CODE_END(trampoline_start)
102
103#ifdef CONFIG_AMD_MEM_ENCRYPT
104/* SEV-ES supports non-zero IP for entry points - no alignment needed */
105SYM_CODE_START(sev_es_trampoline_start)
106 cli # We should be safe anyway
107
108 LJMPW_RM(1f)
1091:
110 mov %cs, %ax # Code and data in the same place
111 mov %ax, %ds
112 mov %ax, %es
113 mov %ax, %ss
114
115 LOCK_AND_LOAD_REALMODE_ESP
116
117 jmp .Lswitch_to_protected
118SYM_CODE_END(sev_es_trampoline_start)
119#endif /* CONFIG_AMD_MEM_ENCRYPT */
120
121#include "../kernel/verify_cpu.S"
122
123 .section ".text32","ax"
124 .code32
125 .balign 4
126SYM_CODE_START(startup_32)
127 movl %edx, %ss
128 addl $pa_real_mode_base, %esp
129 movl %edx, %ds
130 movl %edx, %es
131 movl %edx, %fs
132 movl %edx, %gs
133
134 /*
135 * Check for memory encryption support. This is a safety net in
136 * case BIOS hasn't done the necessary step of setting the bit in
137 * the MSR for this AP. If SME is active and we've gotten this far
138 * then it is safe for us to set the MSR bit and continue. If we
139 * don't we'll eventually crash trying to execute encrypted
140 * instructions.
141 */
142 btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
143 jnc .Ldone
144 movl $MSR_AMD64_SYSCFG, %ecx
145 rdmsr
146 bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
147 jc .Ldone
148
149 /*
150 * Memory encryption is enabled but the SME enable bit for this
151 * CPU has has not been set. It is safe to set it, so do so.
152 */
153 wrmsr
154.Ldone:
155
156 movl pa_tr_cr4, %eax
157 movl %eax, %cr4 # Enable PAE mode
158
159 # Setup trampoline 4 level pagetables
160 movl $pa_trampoline_pgd, %eax
161 movl %eax, %cr3
162
163 # Set up EFER
164 movl $MSR_EFER, %ecx
165 rdmsr
166 /*
167 * Skip writing to EFER if the register already has desired
168 * value (to avoid #VE for the TDX guest).
169 */
170 cmp pa_tr_efer, %eax
171 jne .Lwrite_efer
172 cmp pa_tr_efer + 4, %edx
173 je .Ldone_efer
174.Lwrite_efer:
175 movl pa_tr_efer, %eax
176 movl pa_tr_efer + 4, %edx
177 wrmsr
178
179.Ldone_efer:
180 # Enable paging and in turn activate Long Mode.
181 movl $CR0_STATE, %eax
182 movl %eax, %cr0
183
184 /*
185 * At this point we're in long mode but in 32bit compatibility mode
186 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
187 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
188 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
189 */
190 ljmpl $__KERNEL_CS, $pa_startup_64
191SYM_CODE_END(startup_32)
192
193SYM_CODE_START(pa_trampoline_compat)
194 /*
195 * In compatibility mode. Prep ESP and DX for startup_32, then disable
196 * paging and complete the switch to legacy 32-bit mode.
197 */
198 LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
199 movw $__KERNEL_DS, %dx
200
201 movl $(CR0_STATE & ~X86_CR0_PG), %eax
202 movl %eax, %cr0
203 ljmpl $__KERNEL32_CS, $pa_startup_32
204SYM_CODE_END(pa_trampoline_compat)
205
206 .section ".text64","ax"
207 .code64
208 .balign 4
209SYM_CODE_START(startup_64)
210 # Now jump into the kernel using virtual addresses
211 jmpq *tr_start(%rip)
212SYM_CODE_END(startup_64)
213
214SYM_CODE_START(trampoline_start64)
215 /*
216 * APs start here on a direct transfer from 64-bit BIOS with identity
217 * mapped page tables. Load the kernel's GDT in order to gear down to
218 * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load
219 * segment registers. Load the zero IDT so any fault triggers a
220 * shutdown instead of jumping back into BIOS.
221 */
222 lidt tr_idt(%rip)
223 lgdt tr_gdt64(%rip)
224
225 /* Check if paging mode has to be changed */
226 movq %cr4, %rax
227 xorl tr_cr4(%rip), %eax
228 testl $X86_CR4_LA57, %eax
229 jnz .L_switch_paging
230
231 /* Paging mode is correct proceed in 64-bit mode */
232
233 LOCK_AND_LOAD_REALMODE_ESP lock_rip=1
234
235 movw $__KERNEL_DS, %dx
236 movl %edx, %ss
237 addl $pa_real_mode_base, %esp
238 movl %edx, %ds
239 movl %edx, %es
240 movl %edx, %fs
241 movl %edx, %gs
242
243 movl $pa_trampoline_pgd, %eax
244 movq %rax, %cr3
245
246 pushq $__KERNEL_CS
247 pushq tr_start(%rip)
248 lretq
249.L_switch_paging:
250 /*
251 * To switch between 4- and 5-level paging modes, it is necessary
252 * to disable paging. This must be done in the compatibility mode.
253 */
254 ljmpl *tr_compat(%rip)
255SYM_CODE_END(trampoline_start64)
256
257 .section ".rodata","a"
258 # Duplicate the global descriptor table
259 # so the kernel can live anywhere
260 .balign 16
261SYM_DATA_START(tr_gdt)
262 .short tr_gdt_end - tr_gdt - 1 # gdt limit
263 .long pa_tr_gdt
264 .short 0
265 .quad 0x00cf9b000000ffff # __KERNEL32_CS
266 .quad 0x00af9b000000ffff # __KERNEL_CS
267 .quad 0x00cf93000000ffff # __KERNEL_DS
268SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
269
270SYM_DATA_START(tr_gdt64)
271 .short tr_gdt_end - tr_gdt - 1 # gdt limit
272 .long pa_tr_gdt
273 .long 0
274SYM_DATA_END(tr_gdt64)
275
276SYM_DATA_START(tr_compat)
277 .long pa_trampoline_compat
278 .short __KERNEL32_CS
279SYM_DATA_END(tr_compat)
280
281 .bss
282 .balign PAGE_SIZE
283SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
284
285 .balign 8
286SYM_DATA_START(trampoline_header)
287 SYM_DATA_LOCAL(tr_start, .space 8)
288 SYM_DATA(tr_efer, .space 8)
289 SYM_DATA(tr_cr4, .space 4)
290 SYM_DATA(tr_flags, .space 4)
291 SYM_DATA(tr_lock, .space 4)
292SYM_DATA_END(trampoline_header)
293
294#include "trampoline_common.S"
295

source code of linux/arch/x86/realmode/rm/trampoline_64.S