1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/frame.h>
7#include <asm/kvm_vcpu_regs.h>
8#include <asm/nospec-branch.h>
9#include "kvm-asm-offsets.h"
10
11#define WORD_SIZE (BITS_PER_LONG / 8)
12
13/* Intentionally omit RAX as it's context switched by hardware */
14#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
15#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
16#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17/* Intentionally omit RSP as it's context switched by hardware */
18#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
19#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
20#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21
22#ifdef CONFIG_X86_64
23#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
24#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
25#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
26#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
27#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
28#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
29#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
30#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31#endif
32
33#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
34
35.section .noinstr.text, "ax"
36
37.macro RESTORE_GUEST_SPEC_CTRL
38 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
39 ALTERNATIVE_2 "", \
40 "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
41 "", X86_FEATURE_V_SPEC_CTRL
42801:
43.endm
44.macro RESTORE_GUEST_SPEC_CTRL_BODY
45800:
46 /*
47 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
48 * host's, write the MSR. This is kept out-of-line so that the common
49 * case does not have to jump.
50 *
51 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
52 * there must not be any returns or indirect branches between this code
53 * and vmentry.
54 */
55 movl SVM_spec_ctrl(%_ASM_DI), %eax
56 cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
57 je 801b
58 mov $MSR_IA32_SPEC_CTRL, %ecx
59 xor %edx, %edx
60 wrmsr
61 jmp 801b
62.endm
63
64.macro RESTORE_HOST_SPEC_CTRL
65 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
66 ALTERNATIVE_2 "", \
67 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
68 "", X86_FEATURE_V_SPEC_CTRL
69901:
70.endm
71.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
72900:
73 /* Same for after vmexit. */
74 mov $MSR_IA32_SPEC_CTRL, %ecx
75
76 /*
77 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
78 * if it was not intercepted during guest execution.
79 */
80 cmpb $0, \spec_ctrl_intercepted
81 jnz 998f
82 rdmsr
83 movl %eax, SVM_spec_ctrl(%_ASM_DI)
84998:
85
86 /* Now restore the host value of the MSR if different from the guest's. */
87 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
88 cmp SVM_spec_ctrl(%_ASM_DI), %eax
89 je 901b
90 xor %edx, %edx
91 wrmsr
92 jmp 901b
93.endm
94
95
96/**
97 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
98 * @svm: struct vcpu_svm *
99 * @spec_ctrl_intercepted: bool
100 */
101SYM_FUNC_START(__svm_vcpu_run)
102 push %_ASM_BP
103 mov %_ASM_SP, %_ASM_BP
104#ifdef CONFIG_X86_64
105 push %r15
106 push %r14
107 push %r13
108 push %r12
109#else
110 push %edi
111 push %esi
112#endif
113 push %_ASM_BX
114
115 /*
116 * Save variables needed after vmexit on the stack, in inverse
117 * order compared to when they are needed.
118 */
119
120 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
121 push %_ASM_ARG2
122
123 /* Needed to restore access to percpu variables. */
124 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
125
126 /* Finally save @svm. */
127 push %_ASM_ARG1
128
129.ifnc _ASM_ARG1, _ASM_DI
130 /*
131 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
132 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
133 */
134 mov %_ASM_ARG1, %_ASM_DI
135.endif
136
137 /* Clobbers RAX, RCX, RDX. */
138 RESTORE_GUEST_SPEC_CTRL
139
140 /*
141 * Use a single vmcb (vmcb01 because it's always valid) for
142 * context switching guest state via VMLOAD/VMSAVE, that way
143 * the state doesn't need to be copied between vmcb01 and
144 * vmcb02 when switching vmcbs for nested virtualization.
145 */
146 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1471: vmload %_ASM_AX
1482:
149
150 /* Get svm->current_vmcb->pa into RAX. */
151 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
152 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
153
154 /* Load guest registers. */
155 mov VCPU_RCX(%_ASM_DI), %_ASM_CX
156 mov VCPU_RDX(%_ASM_DI), %_ASM_DX
157 mov VCPU_RBX(%_ASM_DI), %_ASM_BX
158 mov VCPU_RBP(%_ASM_DI), %_ASM_BP
159 mov VCPU_RSI(%_ASM_DI), %_ASM_SI
160#ifdef CONFIG_X86_64
161 mov VCPU_R8 (%_ASM_DI), %r8
162 mov VCPU_R9 (%_ASM_DI), %r9
163 mov VCPU_R10(%_ASM_DI), %r10
164 mov VCPU_R11(%_ASM_DI), %r11
165 mov VCPU_R12(%_ASM_DI), %r12
166 mov VCPU_R13(%_ASM_DI), %r13
167 mov VCPU_R14(%_ASM_DI), %r14
168 mov VCPU_R15(%_ASM_DI), %r15
169#endif
170 mov VCPU_RDI(%_ASM_DI), %_ASM_DI
171
172 /* Enter guest mode */
173 sti
174
1753: vmrun %_ASM_AX
1764:
177 cli
178
179 /* Pop @svm to RAX while it's the only available register. */
180 pop %_ASM_AX
181
182 /* Save all guest registers. */
183 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
184 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
185 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
186 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
187 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
188 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
189#ifdef CONFIG_X86_64
190 mov %r8, VCPU_R8 (%_ASM_AX)
191 mov %r9, VCPU_R9 (%_ASM_AX)
192 mov %r10, VCPU_R10(%_ASM_AX)
193 mov %r11, VCPU_R11(%_ASM_AX)
194 mov %r12, VCPU_R12(%_ASM_AX)
195 mov %r13, VCPU_R13(%_ASM_AX)
196 mov %r14, VCPU_R14(%_ASM_AX)
197 mov %r15, VCPU_R15(%_ASM_AX)
198#endif
199
200 /* @svm can stay in RDI from now on. */
201 mov %_ASM_AX, %_ASM_DI
202
203 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
2045: vmsave %_ASM_AX
2056:
206
207 /* Restores GSBASE among other things, allowing access to percpu data. */
208 pop %_ASM_AX
2097: vmload %_ASM_AX
2108:
211
212#ifdef CONFIG_MITIGATION_RETPOLINE
213 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
214 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
215#endif
216
217 /* Clobbers RAX, RCX, RDX. */
218 RESTORE_HOST_SPEC_CTRL
219
220 /*
221 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
222 * untrained as soon as we exit the VM and are back to the
223 * kernel. This should be done before re-enabling interrupts
224 * because interrupt handlers won't sanitize 'ret' if the return is
225 * from the kernel.
226 */
227 UNTRAIN_RET_VM
228
229 /*
230 * Clear all general purpose registers except RSP and RAX to prevent
231 * speculative use of the guest's values, even those that are reloaded
232 * via the stack. In theory, an L1 cache miss when restoring registers
233 * could lead to speculative execution with the guest's values.
234 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
235 * free. RSP and RAX are exempt as they are restored by hardware
236 * during VM-Exit.
237 */
238 xor %ecx, %ecx
239 xor %edx, %edx
240 xor %ebx, %ebx
241 xor %ebp, %ebp
242 xor %esi, %esi
243 xor %edi, %edi
244#ifdef CONFIG_X86_64
245 xor %r8d, %r8d
246 xor %r9d, %r9d
247 xor %r10d, %r10d
248 xor %r11d, %r11d
249 xor %r12d, %r12d
250 xor %r13d, %r13d
251 xor %r14d, %r14d
252 xor %r15d, %r15d
253#endif
254
255 /* "Pop" @spec_ctrl_intercepted. */
256 pop %_ASM_BX
257
258 pop %_ASM_BX
259
260#ifdef CONFIG_X86_64
261 pop %r12
262 pop %r13
263 pop %r14
264 pop %r15
265#else
266 pop %esi
267 pop %edi
268#endif
269 pop %_ASM_BP
270 RET
271
272 RESTORE_GUEST_SPEC_CTRL_BODY
273 RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
274
27510: cmpb $0, _ASM_RIP(kvm_rebooting)
276 jne 2b
277 ud2
27830: cmpb $0, _ASM_RIP(kvm_rebooting)
279 jne 4b
280 ud2
28150: cmpb $0, _ASM_RIP(kvm_rebooting)
282 jne 6b
283 ud2
28470: cmpb $0, _ASM_RIP(kvm_rebooting)
285 jne 8b
286 ud2
287
288 _ASM_EXTABLE(1b, 10b)
289 _ASM_EXTABLE(3b, 30b)
290 _ASM_EXTABLE(5b, 50b)
291 _ASM_EXTABLE(7b, 70b)
292
293SYM_FUNC_END(__svm_vcpu_run)
294
295#ifdef CONFIG_KVM_AMD_SEV
296
297
298#ifdef CONFIG_X86_64
299#define SEV_ES_GPRS_BASE 0x300
300#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
301#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
302#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
303#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
304#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
305#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
306#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
307#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
308#endif
309
310/**
311 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
312 * @svm: struct vcpu_svm *
313 * @spec_ctrl_intercepted: bool
314 */
315SYM_FUNC_START(__svm_sev_es_vcpu_run)
316 FRAME_BEGIN
317
318 /*
319 * Save non-volatile (callee-saved) registers to the host save area.
320 * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
321 * saved on VMRUN.
322 */
323 mov %rbp, SEV_ES_RBP (%rdx)
324 mov %r15, SEV_ES_R15 (%rdx)
325 mov %r14, SEV_ES_R14 (%rdx)
326 mov %r13, SEV_ES_R13 (%rdx)
327 mov %r12, SEV_ES_R12 (%rdx)
328 mov %rbx, SEV_ES_RBX (%rdx)
329
330 /*
331 * Save volatile registers that hold arguments that are needed after
332 * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
333 */
334 mov %rdi, SEV_ES_RDI (%rdx)
335 mov %rsi, SEV_ES_RSI (%rdx)
336
337 /* Clobbers RAX, RCX, RDX (@hostsa). */
338 RESTORE_GUEST_SPEC_CTRL
339
340 /* Get svm->current_vmcb->pa into RAX. */
341 mov SVM_current_vmcb(%rdi), %rax
342 mov KVM_VMCB_pa(%rax), %rax
343
344 /* Enter guest mode */
345 sti
346
3471: vmrun %rax
348
3492: cli
350
351#ifdef CONFIG_MITIGATION_RETPOLINE
352 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
353 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
354#endif
355
356 /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
357 RESTORE_HOST_SPEC_CTRL
358
359 /*
360 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
361 * untrained as soon as we exit the VM and are back to the
362 * kernel. This should be done before re-enabling interrupts
363 * because interrupt handlers won't sanitize RET if the return is
364 * from the kernel.
365 */
366 UNTRAIN_RET_VM
367
368 FRAME_END
369 RET
370
371 RESTORE_GUEST_SPEC_CTRL_BODY
372 RESTORE_HOST_SPEC_CTRL_BODY %sil
373
3743: cmpb $0, kvm_rebooting(%rip)
375 jne 2b
376 ud2
377
378 _ASM_EXTABLE(1b, 3b)
379
380SYM_FUNC_END(__svm_sev_es_vcpu_run)
381#endif /* CONFIG_KVM_AMD_SEV */
382

source code of linux/arch/x86/kvm/svm/vmenter.S