1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * |
5 | * AMD SVM support |
6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
9 | * |
10 | * Authors: |
11 | * Yaniv Kamay <yaniv@qumranet.com> |
12 | * Avi Kivity <avi@qumranet.com> |
13 | */ |
14 | |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
16 | |
17 | #include <linux/kvm_types.h> |
18 | #include <linux/kvm_host.h> |
19 | #include <linux/kernel.h> |
20 | |
21 | #include <asm/msr-index.h> |
22 | #include <asm/debugreg.h> |
23 | |
24 | #include "kvm_emulate.h" |
25 | #include "trace.h" |
26 | #include "mmu.h" |
27 | #include "x86.h" |
28 | #include "smm.h" |
29 | #include "cpuid.h" |
30 | #include "lapic.h" |
31 | #include "svm.h" |
32 | #include "hyperv.h" |
33 | |
34 | #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK |
35 | |
36 | static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, |
37 | struct x86_exception *fault) |
38 | { |
39 | struct vcpu_svm *svm = to_svm(vcpu); |
40 | struct vmcb *vmcb = svm->vmcb; |
41 | |
42 | if (vmcb->control.exit_code != SVM_EXIT_NPF) { |
43 | /* |
44 | * TODO: track the cause of the nested page fault, and |
45 | * correctly fill in the high bits of exit_info_1. |
46 | */ |
47 | vmcb->control.exit_code = SVM_EXIT_NPF; |
48 | vmcb->control.exit_code_hi = 0; |
49 | vmcb->control.exit_info_1 = (1ULL << 32); |
50 | vmcb->control.exit_info_2 = fault->address; |
51 | } |
52 | |
53 | vmcb->control.exit_info_1 &= ~0xffffffffULL; |
54 | vmcb->control.exit_info_1 |= fault->error_code; |
55 | |
56 | nested_svm_vmexit(svm); |
57 | } |
58 | |
59 | static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) |
60 | { |
61 | struct vcpu_svm *svm = to_svm(vcpu); |
62 | u64 cr3 = svm->nested.ctl.nested_cr3; |
63 | u64 pdpte; |
64 | int ret; |
65 | |
66 | ret = kvm_vcpu_read_guest_page(vcpu, gfn: gpa_to_gfn(gpa: cr3), data: &pdpte, |
67 | offset_in_page(cr3) + index * 8, len: 8); |
68 | if (ret) |
69 | return 0; |
70 | return pdpte; |
71 | } |
72 | |
73 | static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) |
74 | { |
75 | struct vcpu_svm *svm = to_svm(vcpu); |
76 | |
77 | return svm->nested.ctl.nested_cr3; |
78 | } |
79 | |
80 | static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) |
81 | { |
82 | struct vcpu_svm *svm = to_svm(vcpu); |
83 | |
84 | WARN_ON(mmu_is_nested(vcpu)); |
85 | |
86 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; |
87 | |
88 | /* |
89 | * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note, |
90 | * when called via KVM_SET_NESTED_STATE, that state may _not_ match current |
91 | * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required. |
92 | */ |
93 | kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, |
94 | svm->vmcb01.ptr->save.efer, |
95 | svm->nested.ctl.nested_cr3); |
96 | vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; |
97 | vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; |
98 | vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; |
99 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; |
100 | } |
101 | |
102 | static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) |
103 | { |
104 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
105 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
106 | } |
107 | |
108 | static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) |
109 | { |
110 | if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD)) |
111 | return true; |
112 | |
113 | if (!nested_npt_enabled(svm)) |
114 | return true; |
115 | |
116 | if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) |
117 | return true; |
118 | |
119 | return false; |
120 | } |
121 | |
122 | void recalc_intercepts(struct vcpu_svm *svm) |
123 | { |
124 | struct vmcb_control_area *c, *h; |
125 | struct vmcb_ctrl_area_cached *g; |
126 | unsigned int i; |
127 | |
128 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTERCEPTS); |
129 | |
130 | if (!is_guest_mode(vcpu: &svm->vcpu)) |
131 | return; |
132 | |
133 | c = &svm->vmcb->control; |
134 | h = &svm->vmcb01.ptr->control; |
135 | g = &svm->nested.ctl; |
136 | |
137 | for (i = 0; i < MAX_INTERCEPT; i++) |
138 | c->intercepts[i] = h->intercepts[i]; |
139 | |
140 | if (g->int_ctl & V_INTR_MASKING_MASK) { |
141 | /* |
142 | * If L2 is active and V_INTR_MASKING is enabled in vmcb12, |
143 | * disable intercept of CR8 writes as L2's CR8 does not affect |
144 | * any interrupt KVM may want to inject. |
145 | * |
146 | * Similarly, disable intercept of virtual interrupts (used to |
147 | * detect interrupt windows) if the saved RFLAGS.IF is '0', as |
148 | * the effective RFLAGS.IF for L1 interrupts will never be set |
149 | * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs). |
150 | */ |
151 | vmcb_clr_intercept(control: c, bit: INTERCEPT_CR8_WRITE); |
152 | if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)) |
153 | vmcb_clr_intercept(control: c, bit: INTERCEPT_VINTR); |
154 | } |
155 | |
156 | /* |
157 | * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB |
158 | * flush feature is enabled. |
159 | */ |
160 | if (!nested_svm_l2_tlb_flush_enabled(vcpu: &svm->vcpu)) |
161 | vmcb_clr_intercept(control: c, bit: INTERCEPT_VMMCALL); |
162 | |
163 | for (i = 0; i < MAX_INTERCEPT; i++) |
164 | c->intercepts[i] |= g->intercepts[i]; |
165 | |
166 | /* If SMI is not intercepted, ignore guest SMI intercept as well */ |
167 | if (!intercept_smi) |
168 | vmcb_clr_intercept(control: c, bit: INTERCEPT_SMI); |
169 | |
170 | if (nested_vmcb_needs_vls_intercept(svm)) { |
171 | /* |
172 | * If the virtual VMLOAD/VMSAVE is not enabled for the L2, |
173 | * we must intercept these instructions to correctly |
174 | * emulate them in case L1 doesn't intercept them. |
175 | */ |
176 | vmcb_set_intercept(control: c, bit: INTERCEPT_VMLOAD); |
177 | vmcb_set_intercept(control: c, bit: INTERCEPT_VMSAVE); |
178 | } else { |
179 | WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)); |
180 | } |
181 | } |
182 | |
183 | /* |
184 | * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function |
185 | * is optimized in that it only merges the parts where KVM MSR permission bitmap |
186 | * may contain zero bits. |
187 | */ |
188 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
189 | { |
190 | int i; |
191 | |
192 | /* |
193 | * MSR bitmap update can be skipped when: |
194 | * - MSR bitmap for L1 hasn't changed. |
195 | * - Nested hypervisor (L1) is attempting to launch the same L2 as |
196 | * before. |
197 | * - Nested hypervisor (L1) is using Hyper-V emulation interface and |
198 | * tells KVM (L0) there were no changes in MSR bitmap for L2. |
199 | */ |
200 | #ifdef CONFIG_KVM_HYPERV |
201 | if (!svm->nested.force_msr_bitmap_recalc) { |
202 | struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; |
203 | |
204 | if (kvm_hv_hypercall_enabled(vcpu: &svm->vcpu) && |
205 | hve->hv_enlightenments_control.msr_bitmap && |
206 | (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS))) |
207 | goto set_msrpm_base_pa; |
208 | } |
209 | #endif |
210 | |
211 | if (!(vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_MSR_PROT))) |
212 | return true; |
213 | |
214 | for (i = 0; i < MSRPM_OFFSETS; i++) { |
215 | u32 value, p; |
216 | u64 offset; |
217 | |
218 | if (msrpm_offsets[i] == 0xffffffff) |
219 | break; |
220 | |
221 | p = msrpm_offsets[i]; |
222 | |
223 | /* x2apic msrs are intercepted always for the nested guest */ |
224 | if (is_x2apic_msrpm_offset(offset: p)) |
225 | continue; |
226 | |
227 | offset = svm->nested.ctl.msrpm_base_pa + (p * 4); |
228 | |
229 | if (kvm_vcpu_read_guest(vcpu: &svm->vcpu, gpa: offset, data: &value, len: 4)) |
230 | return false; |
231 | |
232 | svm->nested.msrpm[p] = svm->msrpm[p] | value; |
233 | } |
234 | |
235 | svm->nested.force_msr_bitmap_recalc = false; |
236 | |
237 | #ifdef CONFIG_KVM_HYPERV |
238 | set_msrpm_base_pa: |
239 | #endif |
240 | svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); |
241 | |
242 | return true; |
243 | } |
244 | |
245 | /* |
246 | * Bits 11:0 of bitmap address are ignored by hardware |
247 | */ |
248 | static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) |
249 | { |
250 | u64 addr = PAGE_ALIGN(pa); |
251 | |
252 | return kvm_vcpu_is_legal_gpa(vcpu, addr) && |
253 | kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); |
254 | } |
255 | |
256 | static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, |
257 | struct vmcb_ctrl_area_cached *control) |
258 | { |
259 | if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) |
260 | return false; |
261 | |
262 | if (CC(control->asid == 0)) |
263 | return false; |
264 | |
265 | if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) |
266 | return false; |
267 | |
268 | if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, |
269 | MSRPM_SIZE))) |
270 | return false; |
271 | if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, |
272 | IOPM_SIZE))) |
273 | return false; |
274 | |
275 | if (CC((control->int_ctl & V_NMI_ENABLE_MASK) && |
276 | !vmcb12_is_intercept(control, INTERCEPT_NMI))) { |
277 | return false; |
278 | } |
279 | |
280 | return true; |
281 | } |
282 | |
283 | /* Common checks that apply to both L1 and L2 state. */ |
284 | static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, |
285 | struct vmcb_save_area_cached *save) |
286 | { |
287 | if (CC(!(save->efer & EFER_SVME))) |
288 | return false; |
289 | |
290 | if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || |
291 | CC(save->cr0 & ~0xffffffffULL)) |
292 | return false; |
293 | |
294 | if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) |
295 | return false; |
296 | |
297 | /* |
298 | * These checks are also performed by KVM_SET_SREGS, |
299 | * except that EFER.LMA is not checked by SVM against |
300 | * CR0.PG && EFER.LME. |
301 | */ |
302 | if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { |
303 | if (CC(!(save->cr4 & X86_CR4_PAE)) || |
304 | CC(!(save->cr0 & X86_CR0_PE)) || |
305 | CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) |
306 | return false; |
307 | } |
308 | |
309 | /* Note, SVM doesn't have any additional restrictions on CR4. */ |
310 | if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4))) |
311 | return false; |
312 | |
313 | if (CC(!kvm_valid_efer(vcpu, save->efer))) |
314 | return false; |
315 | |
316 | return true; |
317 | } |
318 | |
319 | static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) |
320 | { |
321 | struct vcpu_svm *svm = to_svm(vcpu); |
322 | struct vmcb_save_area_cached *save = &svm->nested.save; |
323 | |
324 | return __nested_vmcb_check_save(vcpu, save); |
325 | } |
326 | |
327 | static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) |
328 | { |
329 | struct vcpu_svm *svm = to_svm(vcpu); |
330 | struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; |
331 | |
332 | return __nested_vmcb_check_controls(vcpu, control: ctl); |
333 | } |
334 | |
335 | static |
336 | void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, |
337 | struct vmcb_ctrl_area_cached *to, |
338 | struct vmcb_control_area *from) |
339 | { |
340 | unsigned int i; |
341 | |
342 | for (i = 0; i < MAX_INTERCEPT; i++) |
343 | to->intercepts[i] = from->intercepts[i]; |
344 | |
345 | to->iopm_base_pa = from->iopm_base_pa; |
346 | to->msrpm_base_pa = from->msrpm_base_pa; |
347 | to->tsc_offset = from->tsc_offset; |
348 | to->tlb_ctl = from->tlb_ctl; |
349 | to->int_ctl = from->int_ctl; |
350 | to->int_vector = from->int_vector; |
351 | to->int_state = from->int_state; |
352 | to->exit_code = from->exit_code; |
353 | to->exit_code_hi = from->exit_code_hi; |
354 | to->exit_info_1 = from->exit_info_1; |
355 | to->exit_info_2 = from->exit_info_2; |
356 | to->exit_int_info = from->exit_int_info; |
357 | to->exit_int_info_err = from->exit_int_info_err; |
358 | to->nested_ctl = from->nested_ctl; |
359 | to->event_inj = from->event_inj; |
360 | to->event_inj_err = from->event_inj_err; |
361 | to->next_rip = from->next_rip; |
362 | to->nested_cr3 = from->nested_cr3; |
363 | to->virt_ext = from->virt_ext; |
364 | to->pause_filter_count = from->pause_filter_count; |
365 | to->pause_filter_thresh = from->pause_filter_thresh; |
366 | |
367 | /* Copy asid here because nested_vmcb_check_controls will check it. */ |
368 | to->asid = from->asid; |
369 | to->msrpm_base_pa &= ~0x0fffULL; |
370 | to->iopm_base_pa &= ~0x0fffULL; |
371 | |
372 | #ifdef CONFIG_KVM_HYPERV |
373 | /* Hyper-V extensions (Enlightened VMCB) */ |
374 | if (kvm_hv_hypercall_enabled(vcpu)) { |
375 | to->clean = from->clean; |
376 | memcpy(&to->hv_enlightenments, &from->hv_enlightenments, |
377 | sizeof(to->hv_enlightenments)); |
378 | } |
379 | #endif |
380 | } |
381 | |
382 | void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, |
383 | struct vmcb_control_area *control) |
384 | { |
385 | __nested_copy_vmcb_control_to_cache(vcpu: &svm->vcpu, to: &svm->nested.ctl, from: control); |
386 | } |
387 | |
388 | static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, |
389 | struct vmcb_save_area *from) |
390 | { |
391 | /* |
392 | * Copy only fields that are validated, as we need them |
393 | * to avoid TOC/TOU races. |
394 | */ |
395 | to->efer = from->efer; |
396 | to->cr0 = from->cr0; |
397 | to->cr3 = from->cr3; |
398 | to->cr4 = from->cr4; |
399 | |
400 | to->dr6 = from->dr6; |
401 | to->dr7 = from->dr7; |
402 | } |
403 | |
404 | void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, |
405 | struct vmcb_save_area *save) |
406 | { |
407 | __nested_copy_vmcb_save_to_cache(to: &svm->nested.save, from: save); |
408 | } |
409 | |
410 | /* |
411 | * Synchronize fields that are written by the processor, so that |
412 | * they can be copied back into the vmcb12. |
413 | */ |
414 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) |
415 | { |
416 | u32 mask; |
417 | svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; |
418 | svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; |
419 | |
420 | /* Only a few fields of int_ctl are written by the processor. */ |
421 | mask = V_IRQ_MASK | V_TPR_MASK; |
422 | /* |
423 | * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting |
424 | * virtual interrupts in order to request an interrupt window, as KVM |
425 | * has usurped vmcb02's int_ctl. If an interrupt window opens before |
426 | * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl. |
427 | * If no window opens, V_IRQ will be correctly preserved in vmcb12's |
428 | * int_ctl (because it was never recognized while L2 was running). |
429 | */ |
430 | if (svm_is_intercept(svm, bit: INTERCEPT_VINTR) && |
431 | !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts)) |
432 | mask &= ~V_IRQ_MASK; |
433 | |
434 | if (nested_vgif_enabled(svm)) |
435 | mask |= V_GIF_MASK; |
436 | |
437 | if (nested_vnmi_enabled(svm)) |
438 | mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK; |
439 | |
440 | svm->nested.ctl.int_ctl &= ~mask; |
441 | svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; |
442 | } |
443 | |
444 | /* |
445 | * Transfer any event that L0 or L1 wanted to inject into L2 to |
446 | * EXIT_INT_INFO. |
447 | */ |
448 | static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, |
449 | struct vmcb *vmcb12) |
450 | { |
451 | struct kvm_vcpu *vcpu = &svm->vcpu; |
452 | u32 exit_int_info = 0; |
453 | unsigned int nr; |
454 | |
455 | if (vcpu->arch.exception.injected) { |
456 | nr = vcpu->arch.exception.vector; |
457 | exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; |
458 | |
459 | if (vcpu->arch.exception.has_error_code) { |
460 | exit_int_info |= SVM_EVTINJ_VALID_ERR; |
461 | vmcb12->control.exit_int_info_err = |
462 | vcpu->arch.exception.error_code; |
463 | } |
464 | |
465 | } else if (vcpu->arch.nmi_injected) { |
466 | exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
467 | |
468 | } else if (vcpu->arch.interrupt.injected) { |
469 | nr = vcpu->arch.interrupt.nr; |
470 | exit_int_info = nr | SVM_EVTINJ_VALID; |
471 | |
472 | if (vcpu->arch.interrupt.soft) |
473 | exit_int_info |= SVM_EVTINJ_TYPE_SOFT; |
474 | else |
475 | exit_int_info |= SVM_EVTINJ_TYPE_INTR; |
476 | } |
477 | |
478 | vmcb12->control.exit_int_info = exit_int_info; |
479 | } |
480 | |
481 | static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) |
482 | { |
483 | /* Handle pending Hyper-V TLB flush requests */ |
484 | kvm_hv_nested_transtion_tlb_flush(vcpu, tdp_enabled: npt_enabled); |
485 | |
486 | /* |
487 | * TODO: optimize unconditional TLB flush/MMU sync. A partial list of |
488 | * things to fix before this can be conditional: |
489 | * |
490 | * - Flush TLBs for both L1 and L2 remote TLB flush |
491 | * - Honor L1's request to flush an ASID on nested VMRUN |
492 | * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] |
493 | * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN |
494 | * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST |
495 | * |
496 | * [*] Unlike nested EPT, SVM's ASID management can invalidate nested |
497 | * NPT guest-physical mappings on VMRUN. |
498 | */ |
499 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
500 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
501 | } |
502 | |
503 | /* |
504 | * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true |
505 | * if we are emulating VM-Entry into a guest with NPT enabled. |
506 | */ |
507 | static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, |
508 | bool nested_npt, bool reload_pdptrs) |
509 | { |
510 | if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) |
511 | return -EINVAL; |
512 | |
513 | if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && |
514 | CC(!load_pdptrs(vcpu, cr3))) |
515 | return -EINVAL; |
516 | |
517 | vcpu->arch.cr3 = cr3; |
518 | |
519 | /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ |
520 | kvm_init_mmu(vcpu); |
521 | |
522 | if (!nested_npt) |
523 | kvm_mmu_new_pgd(vcpu, new_pgd: cr3); |
524 | |
525 | return 0; |
526 | } |
527 | |
528 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) |
529 | { |
530 | if (!svm->nested.vmcb02.ptr) |
531 | return; |
532 | |
533 | /* FIXME: merge g_pat from vmcb01 and vmcb12. */ |
534 | svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; |
535 | } |
536 | |
537 | static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) |
538 | { |
539 | bool new_vmcb12 = false; |
540 | struct vmcb *vmcb01 = svm->vmcb01.ptr; |
541 | struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; |
542 | struct kvm_vcpu *vcpu = &svm->vcpu; |
543 | |
544 | nested_vmcb02_compute_g_pat(svm); |
545 | |
546 | /* Load the nested guest state */ |
547 | if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { |
548 | new_vmcb12 = true; |
549 | svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; |
550 | svm->nested.force_msr_bitmap_recalc = true; |
551 | } |
552 | |
553 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { |
554 | vmcb02->save.es = vmcb12->save.es; |
555 | vmcb02->save.cs = vmcb12->save.cs; |
556 | vmcb02->save.ss = vmcb12->save.ss; |
557 | vmcb02->save.ds = vmcb12->save.ds; |
558 | vmcb02->save.cpl = vmcb12->save.cpl; |
559 | vmcb_mark_dirty(vmcb: vmcb02, bit: VMCB_SEG); |
560 | } |
561 | |
562 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { |
563 | vmcb02->save.gdtr = vmcb12->save.gdtr; |
564 | vmcb02->save.idtr = vmcb12->save.idtr; |
565 | vmcb_mark_dirty(vmcb: vmcb02, bit: VMCB_DT); |
566 | } |
567 | |
568 | kvm_set_rflags(vcpu, rflags: vmcb12->save.rflags | X86_EFLAGS_FIXED); |
569 | |
570 | svm_set_efer(vcpu, efer: svm->nested.save.efer); |
571 | |
572 | svm_set_cr0(vcpu, cr0: svm->nested.save.cr0); |
573 | svm_set_cr4(vcpu, cr4: svm->nested.save.cr4); |
574 | |
575 | svm->vcpu.arch.cr2 = vmcb12->save.cr2; |
576 | |
577 | kvm_rax_write(vcpu, val: vmcb12->save.rax); |
578 | kvm_rsp_write(vcpu, val: vmcb12->save.rsp); |
579 | kvm_rip_write(vcpu, val: vmcb12->save.rip); |
580 | |
581 | /* In case we don't even reach vcpu_run, the fields are not updated */ |
582 | vmcb02->save.rax = vmcb12->save.rax; |
583 | vmcb02->save.rsp = vmcb12->save.rsp; |
584 | vmcb02->save.rip = vmcb12->save.rip; |
585 | |
586 | /* These bits will be set properly on the first execution when new_vmc12 is true */ |
587 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { |
588 | vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; |
589 | svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; |
590 | vmcb_mark_dirty(vmcb: vmcb02, bit: VMCB_DR); |
591 | } |
592 | |
593 | if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) && |
594 | (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { |
595 | /* |
596 | * Reserved bits of DEBUGCTL are ignored. Be consistent with |
597 | * svm_set_msr's definition of reserved bits. |
598 | */ |
599 | svm_copy_lbrs(to_vmcb: vmcb02, from_vmcb: vmcb12); |
600 | vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS; |
601 | svm_update_lbrv(vcpu: &svm->vcpu); |
602 | |
603 | } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { |
604 | svm_copy_lbrs(to_vmcb: vmcb02, from_vmcb: vmcb01); |
605 | } |
606 | } |
607 | |
608 | static inline bool is_evtinj_soft(u32 evtinj) |
609 | { |
610 | u32 type = evtinj & SVM_EVTINJ_TYPE_MASK; |
611 | u8 vector = evtinj & SVM_EVTINJ_VEC_MASK; |
612 | |
613 | if (!(evtinj & SVM_EVTINJ_VALID)) |
614 | return false; |
615 | |
616 | if (type == SVM_EVTINJ_TYPE_SOFT) |
617 | return true; |
618 | |
619 | return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(nr: vector); |
620 | } |
621 | |
622 | static bool is_evtinj_nmi(u32 evtinj) |
623 | { |
624 | u32 type = evtinj & SVM_EVTINJ_TYPE_MASK; |
625 | |
626 | if (!(evtinj & SVM_EVTINJ_VALID)) |
627 | return false; |
628 | |
629 | return type == SVM_EVTINJ_TYPE_NMI; |
630 | } |
631 | |
632 | static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, |
633 | unsigned long vmcb12_rip, |
634 | unsigned long vmcb12_csbase) |
635 | { |
636 | u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK; |
637 | u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK; |
638 | |
639 | struct kvm_vcpu *vcpu = &svm->vcpu; |
640 | struct vmcb *vmcb01 = svm->vmcb01.ptr; |
641 | struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; |
642 | u32 pause_count12; |
643 | u32 pause_thresh12; |
644 | |
645 | /* |
646 | * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, |
647 | * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. |
648 | */ |
649 | |
650 | if (guest_can_use(vcpu, X86_FEATURE_VGIF) && |
651 | (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) |
652 | int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); |
653 | else |
654 | int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); |
655 | |
656 | if (vnmi) { |
657 | if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) { |
658 | svm->vcpu.arch.nmi_pending++; |
659 | kvm_make_request(KVM_REQ_EVENT, vcpu: &svm->vcpu); |
660 | } |
661 | if (nested_vnmi_enabled(svm)) |
662 | int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK | |
663 | V_NMI_ENABLE_MASK | |
664 | V_NMI_BLOCKING_MASK); |
665 | } |
666 | |
667 | /* Copied from vmcb01. msrpm_base can be overwritten later. */ |
668 | vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; |
669 | vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; |
670 | vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa; |
671 | |
672 | /* Done at vmrun: asid. */ |
673 | |
674 | /* Also overwritten later if necessary. */ |
675 | vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
676 | |
677 | /* nested_cr3. */ |
678 | if (nested_npt_enabled(svm)) |
679 | nested_svm_init_mmu_context(vcpu); |
680 | |
681 | vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( |
682 | l1_offset: vcpu->arch.l1_tsc_offset, |
683 | l2_offset: svm->nested.ctl.tsc_offset, |
684 | l2_multiplier: svm->tsc_ratio_msr); |
685 | |
686 | vmcb02->control.tsc_offset = vcpu->arch.tsc_offset; |
687 | |
688 | if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) && |
689 | svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) |
690 | nested_svm_update_tsc_ratio_msr(vcpu); |
691 | |
692 | vmcb02->control.int_ctl = |
693 | (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | |
694 | (vmcb01->control.int_ctl & int_ctl_vmcb01_bits); |
695 | |
696 | vmcb02->control.int_vector = svm->nested.ctl.int_vector; |
697 | vmcb02->control.int_state = svm->nested.ctl.int_state; |
698 | vmcb02->control.event_inj = svm->nested.ctl.event_inj; |
699 | vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; |
700 | |
701 | /* |
702 | * next_rip is consumed on VMRUN as the return address pushed on the |
703 | * stack for injected soft exceptions/interrupts. If nrips is exposed |
704 | * to L1, take it verbatim from vmcb12. If nrips is supported in |
705 | * hardware but not exposed to L1, stuff the actual L2 RIP to emulate |
706 | * what a nrips=0 CPU would do (L1 is responsible for advancing RIP |
707 | * prior to injecting the event). |
708 | */ |
709 | if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) |
710 | vmcb02->control.next_rip = svm->nested.ctl.next_rip; |
711 | else if (boot_cpu_has(X86_FEATURE_NRIPS)) |
712 | vmcb02->control.next_rip = vmcb12_rip; |
713 | |
714 | svm->nmi_l1_to_l2 = is_evtinj_nmi(evtinj: vmcb02->control.event_inj); |
715 | if (is_evtinj_soft(evtinj: vmcb02->control.event_inj)) { |
716 | svm->soft_int_injected = true; |
717 | svm->soft_int_csbase = vmcb12_csbase; |
718 | svm->soft_int_old_rip = vmcb12_rip; |
719 | if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) |
720 | svm->soft_int_next_rip = svm->nested.ctl.next_rip; |
721 | else |
722 | svm->soft_int_next_rip = vmcb12_rip; |
723 | } |
724 | |
725 | vmcb02->control.virt_ext = vmcb01->control.virt_ext & |
726 | LBR_CTL_ENABLE_MASK; |
727 | if (guest_can_use(vcpu, X86_FEATURE_LBRV)) |
728 | vmcb02->control.virt_ext |= |
729 | (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); |
730 | |
731 | if (!nested_vmcb_needs_vls_intercept(svm)) |
732 | vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
733 | |
734 | if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER)) |
735 | pause_count12 = svm->nested.ctl.pause_filter_count; |
736 | else |
737 | pause_count12 = 0; |
738 | if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD)) |
739 | pause_thresh12 = svm->nested.ctl.pause_filter_thresh; |
740 | else |
741 | pause_thresh12 = 0; |
742 | if (kvm_pause_in_guest(kvm: svm->vcpu.kvm)) { |
743 | /* use guest values since host doesn't intercept PAUSE */ |
744 | vmcb02->control.pause_filter_count = pause_count12; |
745 | vmcb02->control.pause_filter_thresh = pause_thresh12; |
746 | |
747 | } else { |
748 | /* start from host values otherwise */ |
749 | vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count; |
750 | vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh; |
751 | |
752 | /* ... but ensure filtering is disabled if so requested. */ |
753 | if (vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_PAUSE)) { |
754 | if (!pause_count12) |
755 | vmcb02->control.pause_filter_count = 0; |
756 | if (!pause_thresh12) |
757 | vmcb02->control.pause_filter_thresh = 0; |
758 | } |
759 | } |
760 | |
761 | nested_svm_transition_tlb_flush(vcpu); |
762 | |
763 | /* Enter Guest-Mode */ |
764 | enter_guest_mode(vcpu); |
765 | |
766 | /* |
767 | * Merge guest and host intercepts - must be called with vcpu in |
768 | * guest-mode to take effect. |
769 | */ |
770 | recalc_intercepts(svm); |
771 | } |
772 | |
773 | static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) |
774 | { |
775 | /* |
776 | * Some VMCB state is shared between L1 and L2 and thus has to be |
777 | * moved at the time of nested vmrun and vmexit. |
778 | * |
779 | * VMLOAD/VMSAVE state would also belong in this category, but KVM |
780 | * always performs VMLOAD and VMSAVE from the VMCB01. |
781 | */ |
782 | to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; |
783 | } |
784 | |
785 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, |
786 | struct vmcb *vmcb12, bool from_vmrun) |
787 | { |
788 | struct vcpu_svm *svm = to_svm(vcpu); |
789 | int ret; |
790 | |
791 | trace_kvm_nested_vmenter(svm->vmcb->save.rip, |
792 | vmcb12_gpa, |
793 | vmcb12->save.rip, |
794 | vmcb12->control.int_ctl, |
795 | vmcb12->control.event_inj, |
796 | vmcb12->control.nested_ctl, |
797 | vmcb12->control.nested_cr3, |
798 | vmcb12->save.cr3, |
799 | KVM_ISA_SVM); |
800 | |
801 | trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, |
802 | vmcb12->control.intercepts[INTERCEPT_CR] >> 16, |
803 | vmcb12->control.intercepts[INTERCEPT_EXCEPTION], |
804 | vmcb12->control.intercepts[INTERCEPT_WORD3], |
805 | vmcb12->control.intercepts[INTERCEPT_WORD4], |
806 | vmcb12->control.intercepts[INTERCEPT_WORD5]); |
807 | |
808 | |
809 | svm->nested.vmcb12_gpa = vmcb12_gpa; |
810 | |
811 | WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); |
812 | |
813 | nested_svm_copy_common_state(from_vmcb: svm->vmcb01.ptr, to_vmcb: svm->nested.vmcb02.ptr); |
814 | |
815 | svm_switch_vmcb(svm, target_vmcb: &svm->nested.vmcb02); |
816 | nested_vmcb02_prepare_control(svm, vmcb12_rip: vmcb12->save.rip, vmcb12_csbase: vmcb12->save.cs.base); |
817 | nested_vmcb02_prepare_save(svm, vmcb12); |
818 | |
819 | ret = nested_svm_load_cr3(vcpu: &svm->vcpu, cr3: svm->nested.save.cr3, |
820 | nested_npt: nested_npt_enabled(svm), reload_pdptrs: from_vmrun); |
821 | if (ret) |
822 | return ret; |
823 | |
824 | if (!from_vmrun) |
825 | kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
826 | |
827 | svm_set_gif(svm, value: true); |
828 | |
829 | if (kvm_vcpu_apicv_active(vcpu)) |
830 | kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); |
831 | |
832 | nested_svm_hv_update_vm_vp_ids(vcpu); |
833 | |
834 | return 0; |
835 | } |
836 | |
837 | int nested_svm_vmrun(struct kvm_vcpu *vcpu) |
838 | { |
839 | struct vcpu_svm *svm = to_svm(vcpu); |
840 | int ret; |
841 | struct vmcb *vmcb12; |
842 | struct kvm_host_map map; |
843 | u64 vmcb12_gpa; |
844 | struct vmcb *vmcb01 = svm->vmcb01.ptr; |
845 | |
846 | if (!svm->nested.hsave_msr) { |
847 | kvm_inject_gp(vcpu, error_code: 0); |
848 | return 1; |
849 | } |
850 | |
851 | if (is_smm(vcpu)) { |
852 | kvm_queue_exception(vcpu, UD_VECTOR); |
853 | return 1; |
854 | } |
855 | |
856 | /* This fails when VP assist page is enabled but the supplied GPA is bogus */ |
857 | ret = kvm_hv_verify_vp_assist(vcpu); |
858 | if (ret) { |
859 | kvm_inject_gp(vcpu, error_code: 0); |
860 | return ret; |
861 | } |
862 | |
863 | vmcb12_gpa = svm->vmcb->save.rax; |
864 | ret = kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: vmcb12_gpa), map: &map); |
865 | if (ret == -EINVAL) { |
866 | kvm_inject_gp(vcpu, error_code: 0); |
867 | return 1; |
868 | } else if (ret) { |
869 | return kvm_skip_emulated_instruction(vcpu); |
870 | } |
871 | |
872 | ret = kvm_skip_emulated_instruction(vcpu); |
873 | |
874 | vmcb12 = map.hva; |
875 | |
876 | if (WARN_ON_ONCE(!svm->nested.initialized)) |
877 | return -EINVAL; |
878 | |
879 | nested_copy_vmcb_control_to_cache(svm, control: &vmcb12->control); |
880 | nested_copy_vmcb_save_to_cache(svm, save: &vmcb12->save); |
881 | |
882 | if (!nested_vmcb_check_save(vcpu) || |
883 | !nested_vmcb_check_controls(vcpu)) { |
884 | vmcb12->control.exit_code = SVM_EXIT_ERR; |
885 | vmcb12->control.exit_code_hi = 0; |
886 | vmcb12->control.exit_info_1 = 0; |
887 | vmcb12->control.exit_info_2 = 0; |
888 | goto out; |
889 | } |
890 | |
891 | /* |
892 | * Since vmcb01 is not in use, we can use it to store some of the L1 |
893 | * state. |
894 | */ |
895 | vmcb01->save.efer = vcpu->arch.efer; |
896 | vmcb01->save.cr0 = kvm_read_cr0(vcpu); |
897 | vmcb01->save.cr4 = vcpu->arch.cr4; |
898 | vmcb01->save.rflags = kvm_get_rflags(vcpu); |
899 | vmcb01->save.rip = kvm_rip_read(vcpu); |
900 | |
901 | if (!npt_enabled) |
902 | vmcb01->save.cr3 = kvm_read_cr3(vcpu); |
903 | |
904 | svm->nested.nested_run_pending = 1; |
905 | |
906 | if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, from_vmrun: true)) |
907 | goto out_exit_err; |
908 | |
909 | if (nested_svm_vmrun_msrpm(svm)) |
910 | goto out; |
911 | |
912 | out_exit_err: |
913 | svm->nested.nested_run_pending = 0; |
914 | svm->nmi_l1_to_l2 = false; |
915 | svm->soft_int_injected = false; |
916 | |
917 | svm->vmcb->control.exit_code = SVM_EXIT_ERR; |
918 | svm->vmcb->control.exit_code_hi = 0; |
919 | svm->vmcb->control.exit_info_1 = 0; |
920 | svm->vmcb->control.exit_info_2 = 0; |
921 | |
922 | nested_svm_vmexit(svm); |
923 | |
924 | out: |
925 | kvm_vcpu_unmap(vcpu, map: &map, dirty: true); |
926 | |
927 | return ret; |
928 | } |
929 | |
930 | /* Copy state save area fields which are handled by VMRUN */ |
931 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
932 | struct vmcb_save_area *from_save) |
933 | { |
934 | to_save->es = from_save->es; |
935 | to_save->cs = from_save->cs; |
936 | to_save->ss = from_save->ss; |
937 | to_save->ds = from_save->ds; |
938 | to_save->gdtr = from_save->gdtr; |
939 | to_save->idtr = from_save->idtr; |
940 | to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; |
941 | to_save->efer = from_save->efer; |
942 | to_save->cr0 = from_save->cr0; |
943 | to_save->cr3 = from_save->cr3; |
944 | to_save->cr4 = from_save->cr4; |
945 | to_save->rax = from_save->rax; |
946 | to_save->rsp = from_save->rsp; |
947 | to_save->rip = from_save->rip; |
948 | to_save->cpl = 0; |
949 | } |
950 | |
951 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) |
952 | { |
953 | to_vmcb->save.fs = from_vmcb->save.fs; |
954 | to_vmcb->save.gs = from_vmcb->save.gs; |
955 | to_vmcb->save.tr = from_vmcb->save.tr; |
956 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; |
957 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; |
958 | to_vmcb->save.star = from_vmcb->save.star; |
959 | to_vmcb->save.lstar = from_vmcb->save.lstar; |
960 | to_vmcb->save.cstar = from_vmcb->save.cstar; |
961 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; |
962 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; |
963 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; |
964 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
965 | } |
966 | |
967 | int nested_svm_vmexit(struct vcpu_svm *svm) |
968 | { |
969 | struct kvm_vcpu *vcpu = &svm->vcpu; |
970 | struct vmcb *vmcb01 = svm->vmcb01.ptr; |
971 | struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; |
972 | struct vmcb *vmcb12; |
973 | struct kvm_host_map map; |
974 | int rc; |
975 | |
976 | rc = kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: svm->nested.vmcb12_gpa), map: &map); |
977 | if (rc) { |
978 | if (rc == -EINVAL) |
979 | kvm_inject_gp(vcpu, error_code: 0); |
980 | return 1; |
981 | } |
982 | |
983 | vmcb12 = map.hva; |
984 | |
985 | /* Exit Guest-Mode */ |
986 | leave_guest_mode(vcpu); |
987 | svm->nested.vmcb12_gpa = 0; |
988 | WARN_ON_ONCE(svm->nested.nested_run_pending); |
989 | |
990 | kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
991 | |
992 | /* in case we halted in L2 */ |
993 | svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; |
994 | |
995 | /* Give the current vmcb to the guest */ |
996 | |
997 | vmcb12->save.es = vmcb02->save.es; |
998 | vmcb12->save.cs = vmcb02->save.cs; |
999 | vmcb12->save.ss = vmcb02->save.ss; |
1000 | vmcb12->save.ds = vmcb02->save.ds; |
1001 | vmcb12->save.gdtr = vmcb02->save.gdtr; |
1002 | vmcb12->save.idtr = vmcb02->save.idtr; |
1003 | vmcb12->save.efer = svm->vcpu.arch.efer; |
1004 | vmcb12->save.cr0 = kvm_read_cr0(vcpu); |
1005 | vmcb12->save.cr3 = kvm_read_cr3(vcpu); |
1006 | vmcb12->save.cr2 = vmcb02->save.cr2; |
1007 | vmcb12->save.cr4 = svm->vcpu.arch.cr4; |
1008 | vmcb12->save.rflags = kvm_get_rflags(vcpu); |
1009 | vmcb12->save.rip = kvm_rip_read(vcpu); |
1010 | vmcb12->save.rsp = kvm_rsp_read(vcpu); |
1011 | vmcb12->save.rax = kvm_rax_read(vcpu); |
1012 | vmcb12->save.dr7 = vmcb02->save.dr7; |
1013 | vmcb12->save.dr6 = svm->vcpu.arch.dr6; |
1014 | vmcb12->save.cpl = vmcb02->save.cpl; |
1015 | |
1016 | vmcb12->control.int_state = vmcb02->control.int_state; |
1017 | vmcb12->control.exit_code = vmcb02->control.exit_code; |
1018 | vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi; |
1019 | vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1; |
1020 | vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2; |
1021 | |
1022 | if (vmcb12->control.exit_code != SVM_EXIT_ERR) |
1023 | nested_save_pending_event_to_vmcb12(svm, vmcb12); |
1024 | |
1025 | if (guest_can_use(vcpu, X86_FEATURE_NRIPS)) |
1026 | vmcb12->control.next_rip = vmcb02->control.next_rip; |
1027 | |
1028 | vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; |
1029 | vmcb12->control.event_inj = svm->nested.ctl.event_inj; |
1030 | vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; |
1031 | |
1032 | if (!kvm_pause_in_guest(kvm: vcpu->kvm)) { |
1033 | vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count; |
1034 | vmcb_mark_dirty(vmcb: vmcb01, bit: VMCB_INTERCEPTS); |
1035 | |
1036 | } |
1037 | |
1038 | nested_svm_copy_common_state(from_vmcb: svm->nested.vmcb02.ptr, to_vmcb: svm->vmcb01.ptr); |
1039 | |
1040 | svm_switch_vmcb(svm, target_vmcb: &svm->vmcb01); |
1041 | |
1042 | /* |
1043 | * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01: |
1044 | * |
1045 | * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't |
1046 | * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related |
1047 | * flags) to detect interrupt windows for L1 IRQs (even if L1 uses |
1048 | * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that |
1049 | * KVM re-requests an interrupt window if necessary, which implicitly |
1050 | * copies this bits from vmcb02 to vmcb01. |
1051 | * |
1052 | * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR |
1053 | * is stored in vmcb02, but its value doesn't need to be copied from/to |
1054 | * vmcb01 because it is copied from/to the virtual APIC's TPR register |
1055 | * on each VM entry/exit. |
1056 | * |
1057 | * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's |
1058 | * V_GIF. However, GIF is architecturally clear on each VM exit, thus |
1059 | * there is no need to copy V_GIF from vmcb02 to vmcb01. |
1060 | */ |
1061 | if (!nested_exit_on_intr(svm)) |
1062 | kvm_make_request(KVM_REQ_EVENT, vcpu: &svm->vcpu); |
1063 | |
1064 | if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) && |
1065 | (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { |
1066 | svm_copy_lbrs(to_vmcb: vmcb12, from_vmcb: vmcb02); |
1067 | svm_update_lbrv(vcpu); |
1068 | } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { |
1069 | svm_copy_lbrs(to_vmcb: vmcb01, from_vmcb: vmcb02); |
1070 | svm_update_lbrv(vcpu); |
1071 | } |
1072 | |
1073 | if (vnmi) { |
1074 | if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK) |
1075 | vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK; |
1076 | else |
1077 | vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK; |
1078 | |
1079 | if (vcpu->arch.nmi_pending) { |
1080 | vcpu->arch.nmi_pending--; |
1081 | vmcb01->control.int_ctl |= V_NMI_PENDING_MASK; |
1082 | } else { |
1083 | vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK; |
1084 | } |
1085 | } |
1086 | |
1087 | /* |
1088 | * On vmexit the GIF is set to false and |
1089 | * no event can be injected in L1. |
1090 | */ |
1091 | svm_set_gif(svm, value: false); |
1092 | vmcb01->control.exit_int_info = 0; |
1093 | |
1094 | svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; |
1095 | if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { |
1096 | vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; |
1097 | vmcb_mark_dirty(vmcb: vmcb01, bit: VMCB_INTERCEPTS); |
1098 | } |
1099 | |
1100 | if (kvm_caps.has_tsc_control && |
1101 | vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) { |
1102 | vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; |
1103 | svm_write_tsc_multiplier(vcpu); |
1104 | } |
1105 | |
1106 | svm->nested.ctl.nested_cr3 = 0; |
1107 | |
1108 | /* |
1109 | * Restore processor state that had been saved in vmcb01 |
1110 | */ |
1111 | kvm_set_rflags(vcpu, rflags: vmcb01->save.rflags); |
1112 | svm_set_efer(vcpu, efer: vmcb01->save.efer); |
1113 | svm_set_cr0(vcpu, cr0: vmcb01->save.cr0 | X86_CR0_PE); |
1114 | svm_set_cr4(vcpu, cr4: vmcb01->save.cr4); |
1115 | kvm_rax_write(vcpu, val: vmcb01->save.rax); |
1116 | kvm_rsp_write(vcpu, val: vmcb01->save.rsp); |
1117 | kvm_rip_write(vcpu, val: vmcb01->save.rip); |
1118 | |
1119 | svm->vcpu.arch.dr7 = DR7_FIXED_1; |
1120 | kvm_update_dr7(vcpu: &svm->vcpu); |
1121 | |
1122 | trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, |
1123 | vmcb12->control.exit_info_1, |
1124 | vmcb12->control.exit_info_2, |
1125 | vmcb12->control.exit_int_info, |
1126 | vmcb12->control.exit_int_info_err, |
1127 | KVM_ISA_SVM); |
1128 | |
1129 | kvm_vcpu_unmap(vcpu, map: &map, dirty: true); |
1130 | |
1131 | nested_svm_transition_tlb_flush(vcpu); |
1132 | |
1133 | nested_svm_uninit_mmu_context(vcpu); |
1134 | |
1135 | rc = nested_svm_load_cr3(vcpu, cr3: vmcb01->save.cr3, nested_npt: false, reload_pdptrs: true); |
1136 | if (rc) |
1137 | return 1; |
1138 | |
1139 | /* |
1140 | * Drop what we picked up for L2 via svm_complete_interrupts() so it |
1141 | * doesn't end up in L1. |
1142 | */ |
1143 | svm->vcpu.arch.nmi_injected = false; |
1144 | kvm_clear_exception_queue(vcpu); |
1145 | kvm_clear_interrupt_queue(vcpu); |
1146 | |
1147 | /* |
1148 | * If we are here following the completion of a VMRUN that |
1149 | * is being single-stepped, queue the pending #DB intercept |
1150 | * right now so that it an be accounted for before we execute |
1151 | * L1's next instruction. |
1152 | */ |
1153 | if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF)) |
1154 | kvm_queue_exception(vcpu: &(svm->vcpu), DB_VECTOR); |
1155 | |
1156 | /* |
1157 | * Un-inhibit the AVIC right away, so that other vCPUs can start |
1158 | * to benefit from it right away. |
1159 | */ |
1160 | if (kvm_apicv_activated(kvm: vcpu->kvm)) |
1161 | __kvm_vcpu_update_apicv(vcpu); |
1162 | |
1163 | return 0; |
1164 | } |
1165 | |
1166 | static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) |
1167 | { |
1168 | struct vcpu_svm *svm = to_svm(vcpu); |
1169 | |
1170 | if (!vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_SHUTDOWN)) |
1171 | return; |
1172 | |
1173 | kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
1174 | nested_svm_simple_vmexit(svm: to_svm(vcpu), SVM_EXIT_SHUTDOWN); |
1175 | } |
1176 | |
1177 | int svm_allocate_nested(struct vcpu_svm *svm) |
1178 | { |
1179 | struct page *vmcb02_page; |
1180 | |
1181 | if (svm->nested.initialized) |
1182 | return 0; |
1183 | |
1184 | vmcb02_page = snp_safe_alloc_page(vcpu: &svm->vcpu); |
1185 | if (!vmcb02_page) |
1186 | return -ENOMEM; |
1187 | svm->nested.vmcb02.ptr = page_address(vmcb02_page); |
1188 | svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); |
1189 | |
1190 | svm->nested.msrpm = svm_vcpu_alloc_msrpm(); |
1191 | if (!svm->nested.msrpm) |
1192 | goto err_free_vmcb02; |
1193 | svm_vcpu_init_msrpm(vcpu: &svm->vcpu, msrpm: svm->nested.msrpm); |
1194 | |
1195 | svm->nested.initialized = true; |
1196 | return 0; |
1197 | |
1198 | err_free_vmcb02: |
1199 | __free_page(vmcb02_page); |
1200 | return -ENOMEM; |
1201 | } |
1202 | |
1203 | void svm_free_nested(struct vcpu_svm *svm) |
1204 | { |
1205 | if (!svm->nested.initialized) |
1206 | return; |
1207 | |
1208 | if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr)) |
1209 | svm_switch_vmcb(svm, target_vmcb: &svm->vmcb01); |
1210 | |
1211 | svm_vcpu_free_msrpm(msrpm: svm->nested.msrpm); |
1212 | svm->nested.msrpm = NULL; |
1213 | |
1214 | __free_page(virt_to_page(svm->nested.vmcb02.ptr)); |
1215 | svm->nested.vmcb02.ptr = NULL; |
1216 | |
1217 | /* |
1218 | * When last_vmcb12_gpa matches the current vmcb12 gpa, |
1219 | * some vmcb12 fields are not loaded if they are marked clean |
1220 | * in the vmcb12, since in this case they are up to date already. |
1221 | * |
1222 | * When the vmcb02 is freed, this optimization becomes invalid. |
1223 | */ |
1224 | svm->nested.last_vmcb12_gpa = INVALID_GPA; |
1225 | |
1226 | svm->nested.initialized = false; |
1227 | } |
1228 | |
1229 | void svm_leave_nested(struct kvm_vcpu *vcpu) |
1230 | { |
1231 | struct vcpu_svm *svm = to_svm(vcpu); |
1232 | |
1233 | if (is_guest_mode(vcpu)) { |
1234 | svm->nested.nested_run_pending = 0; |
1235 | svm->nested.vmcb12_gpa = INVALID_GPA; |
1236 | |
1237 | leave_guest_mode(vcpu); |
1238 | |
1239 | svm_switch_vmcb(svm, target_vmcb: &svm->vmcb01); |
1240 | |
1241 | nested_svm_uninit_mmu_context(vcpu); |
1242 | vmcb_mark_all_dirty(vmcb: svm->vmcb); |
1243 | |
1244 | if (kvm_apicv_activated(kvm: vcpu->kvm)) |
1245 | kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); |
1246 | } |
1247 | |
1248 | kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
1249 | } |
1250 | |
1251 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
1252 | { |
1253 | u32 offset, msr, value; |
1254 | int write, mask; |
1255 | |
1256 | if (!(vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_MSR_PROT))) |
1257 | return NESTED_EXIT_HOST; |
1258 | |
1259 | msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
1260 | offset = svm_msrpm_offset(msr); |
1261 | write = svm->vmcb->control.exit_info_1 & 1; |
1262 | mask = 1 << ((2 * (msr & 0xf)) + write); |
1263 | |
1264 | if (offset == MSR_INVALID) |
1265 | return NESTED_EXIT_DONE; |
1266 | |
1267 | /* Offset is in 32 bit units but need in 8 bit units */ |
1268 | offset *= 4; |
1269 | |
1270 | if (kvm_vcpu_read_guest(vcpu: &svm->vcpu, gpa: svm->nested.ctl.msrpm_base_pa + offset, data: &value, len: 4)) |
1271 | return NESTED_EXIT_DONE; |
1272 | |
1273 | return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
1274 | } |
1275 | |
1276 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
1277 | { |
1278 | unsigned port, size, iopm_len; |
1279 | u16 val, mask; |
1280 | u8 start_bit; |
1281 | u64 gpa; |
1282 | |
1283 | if (!(vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_IOIO_PROT))) |
1284 | return NESTED_EXIT_HOST; |
1285 | |
1286 | port = svm->vmcb->control.exit_info_1 >> 16; |
1287 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> |
1288 | SVM_IOIO_SIZE_SHIFT; |
1289 | gpa = svm->nested.ctl.iopm_base_pa + (port / 8); |
1290 | start_bit = port % 8; |
1291 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
1292 | mask = (0xf >> (4 - size)) << start_bit; |
1293 | val = 0; |
1294 | |
1295 | if (kvm_vcpu_read_guest(vcpu: &svm->vcpu, gpa, data: &val, len: iopm_len)) |
1296 | return NESTED_EXIT_DONE; |
1297 | |
1298 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
1299 | } |
1300 | |
1301 | static int nested_svm_intercept(struct vcpu_svm *svm) |
1302 | { |
1303 | u32 exit_code = svm->vmcb->control.exit_code; |
1304 | int vmexit = NESTED_EXIT_HOST; |
1305 | |
1306 | switch (exit_code) { |
1307 | case SVM_EXIT_MSR: |
1308 | vmexit = nested_svm_exit_handled_msr(svm); |
1309 | break; |
1310 | case SVM_EXIT_IOIO: |
1311 | vmexit = nested_svm_intercept_ioio(svm); |
1312 | break; |
1313 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { |
1314 | if (vmcb12_is_intercept(control: &svm->nested.ctl, bit: exit_code)) |
1315 | vmexit = NESTED_EXIT_DONE; |
1316 | break; |
1317 | } |
1318 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { |
1319 | if (vmcb12_is_intercept(control: &svm->nested.ctl, bit: exit_code)) |
1320 | vmexit = NESTED_EXIT_DONE; |
1321 | break; |
1322 | } |
1323 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
1324 | /* |
1325 | * Host-intercepted exceptions have been checked already in |
1326 | * nested_svm_exit_special. There is nothing to do here, |
1327 | * the vmexit is injected by svm_check_nested_events. |
1328 | */ |
1329 | vmexit = NESTED_EXIT_DONE; |
1330 | break; |
1331 | } |
1332 | case SVM_EXIT_ERR: { |
1333 | vmexit = NESTED_EXIT_DONE; |
1334 | break; |
1335 | } |
1336 | default: { |
1337 | if (vmcb12_is_intercept(control: &svm->nested.ctl, bit: exit_code)) |
1338 | vmexit = NESTED_EXIT_DONE; |
1339 | } |
1340 | } |
1341 | |
1342 | return vmexit; |
1343 | } |
1344 | |
1345 | int nested_svm_exit_handled(struct vcpu_svm *svm) |
1346 | { |
1347 | int vmexit; |
1348 | |
1349 | vmexit = nested_svm_intercept(svm); |
1350 | |
1351 | if (vmexit == NESTED_EXIT_DONE) |
1352 | nested_svm_vmexit(svm); |
1353 | |
1354 | return vmexit; |
1355 | } |
1356 | |
1357 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu) |
1358 | { |
1359 | if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { |
1360 | kvm_queue_exception(vcpu, UD_VECTOR); |
1361 | return 1; |
1362 | } |
1363 | |
1364 | if (to_svm(vcpu)->vmcb->save.cpl) { |
1365 | kvm_inject_gp(vcpu, error_code: 0); |
1366 | return 1; |
1367 | } |
1368 | |
1369 | return 0; |
1370 | } |
1371 | |
1372 | static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, |
1373 | u32 error_code) |
1374 | { |
1375 | struct vcpu_svm *svm = to_svm(vcpu); |
1376 | |
1377 | return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector)); |
1378 | } |
1379 | |
1380 | static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu) |
1381 | { |
1382 | struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; |
1383 | struct vcpu_svm *svm = to_svm(vcpu); |
1384 | struct vmcb *vmcb = svm->vmcb; |
1385 | |
1386 | vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector; |
1387 | vmcb->control.exit_code_hi = 0; |
1388 | |
1389 | if (ex->has_error_code) |
1390 | vmcb->control.exit_info_1 = ex->error_code; |
1391 | |
1392 | /* |
1393 | * EXITINFO2 is undefined for all exception intercepts other |
1394 | * than #PF. |
1395 | */ |
1396 | if (ex->vector == PF_VECTOR) { |
1397 | if (ex->has_payload) |
1398 | vmcb->control.exit_info_2 = ex->payload; |
1399 | else |
1400 | vmcb->control.exit_info_2 = vcpu->arch.cr2; |
1401 | } else if (ex->vector == DB_VECTOR) { |
1402 | /* See kvm_check_and_inject_events(). */ |
1403 | kvm_deliver_exception_payload(vcpu, ex); |
1404 | |
1405 | if (vcpu->arch.dr7 & DR7_GD) { |
1406 | vcpu->arch.dr7 &= ~DR7_GD; |
1407 | kvm_update_dr7(vcpu); |
1408 | } |
1409 | } else { |
1410 | WARN_ON(ex->has_payload); |
1411 | } |
1412 | |
1413 | nested_svm_vmexit(svm); |
1414 | } |
1415 | |
1416 | static inline bool nested_exit_on_init(struct vcpu_svm *svm) |
1417 | { |
1418 | return vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_INIT); |
1419 | } |
1420 | |
1421 | static int svm_check_nested_events(struct kvm_vcpu *vcpu) |
1422 | { |
1423 | struct kvm_lapic *apic = vcpu->arch.apic; |
1424 | struct vcpu_svm *svm = to_svm(vcpu); |
1425 | /* |
1426 | * Only a pending nested run blocks a pending exception. If there is a |
1427 | * previously injected event, the pending exception occurred while said |
1428 | * event was being delivered and thus needs to be handled. |
1429 | */ |
1430 | bool block_nested_exceptions = svm->nested.nested_run_pending; |
1431 | /* |
1432 | * New events (not exceptions) are only recognized at instruction |
1433 | * boundaries. If an event needs reinjection, then KVM is handling a |
1434 | * VM-Exit that occurred _during_ instruction execution; new events are |
1435 | * blocked until the instruction completes. |
1436 | */ |
1437 | bool block_nested_events = block_nested_exceptions || |
1438 | kvm_event_needs_reinjection(vcpu); |
1439 | |
1440 | if (lapic_in_kernel(vcpu) && |
1441 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { |
1442 | if (block_nested_events) |
1443 | return -EBUSY; |
1444 | if (!nested_exit_on_init(svm)) |
1445 | return 0; |
1446 | nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); |
1447 | return 0; |
1448 | } |
1449 | |
1450 | if (vcpu->arch.exception_vmexit.pending) { |
1451 | if (block_nested_exceptions) |
1452 | return -EBUSY; |
1453 | nested_svm_inject_exception_vmexit(vcpu); |
1454 | return 0; |
1455 | } |
1456 | |
1457 | if (vcpu->arch.exception.pending) { |
1458 | if (block_nested_exceptions) |
1459 | return -EBUSY; |
1460 | return 0; |
1461 | } |
1462 | |
1463 | #ifdef CONFIG_KVM_SMM |
1464 | if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { |
1465 | if (block_nested_events) |
1466 | return -EBUSY; |
1467 | if (!nested_exit_on_smi(svm)) |
1468 | return 0; |
1469 | nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); |
1470 | return 0; |
1471 | } |
1472 | #endif |
1473 | |
1474 | if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { |
1475 | if (block_nested_events) |
1476 | return -EBUSY; |
1477 | if (!nested_exit_on_nmi(svm)) |
1478 | return 0; |
1479 | nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); |
1480 | return 0; |
1481 | } |
1482 | |
1483 | if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { |
1484 | if (block_nested_events) |
1485 | return -EBUSY; |
1486 | if (!nested_exit_on_intr(svm)) |
1487 | return 0; |
1488 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
1489 | nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); |
1490 | return 0; |
1491 | } |
1492 | |
1493 | return 0; |
1494 | } |
1495 | |
1496 | int nested_svm_exit_special(struct vcpu_svm *svm) |
1497 | { |
1498 | u32 exit_code = svm->vmcb->control.exit_code; |
1499 | struct kvm_vcpu *vcpu = &svm->vcpu; |
1500 | |
1501 | switch (exit_code) { |
1502 | case SVM_EXIT_INTR: |
1503 | case SVM_EXIT_NMI: |
1504 | case SVM_EXIT_NPF: |
1505 | return NESTED_EXIT_HOST; |
1506 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
1507 | u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); |
1508 | |
1509 | if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & |
1510 | excp_bits) |
1511 | return NESTED_EXIT_HOST; |
1512 | else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && |
1513 | svm->vcpu.arch.apf.host_apf_flags) |
1514 | /* Trap async PF even if not shadowing */ |
1515 | return NESTED_EXIT_HOST; |
1516 | break; |
1517 | } |
1518 | case SVM_EXIT_VMMCALL: |
1519 | /* Hyper-V L2 TLB flush hypercall is handled by L0 */ |
1520 | if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) && |
1521 | nested_svm_l2_tlb_flush_enabled(vcpu) && |
1522 | kvm_hv_is_tlb_flush_hcall(vcpu)) |
1523 | return NESTED_EXIT_HOST; |
1524 | break; |
1525 | default: |
1526 | break; |
1527 | } |
1528 | |
1529 | return NESTED_EXIT_CONTINUE; |
1530 | } |
1531 | |
1532 | void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) |
1533 | { |
1534 | struct vcpu_svm *svm = to_svm(vcpu); |
1535 | |
1536 | vcpu->arch.tsc_scaling_ratio = |
1537 | kvm_calc_nested_tsc_multiplier(l1_multiplier: vcpu->arch.l1_tsc_scaling_ratio, |
1538 | l2_multiplier: svm->tsc_ratio_msr); |
1539 | svm_write_tsc_multiplier(vcpu); |
1540 | } |
1541 | |
1542 | /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ |
1543 | static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, |
1544 | struct vmcb_ctrl_area_cached *from) |
1545 | { |
1546 | unsigned int i; |
1547 | |
1548 | memset(dst, 0, sizeof(struct vmcb_control_area)); |
1549 | |
1550 | for (i = 0; i < MAX_INTERCEPT; i++) |
1551 | dst->intercepts[i] = from->intercepts[i]; |
1552 | |
1553 | dst->iopm_base_pa = from->iopm_base_pa; |
1554 | dst->msrpm_base_pa = from->msrpm_base_pa; |
1555 | dst->tsc_offset = from->tsc_offset; |
1556 | dst->asid = from->asid; |
1557 | dst->tlb_ctl = from->tlb_ctl; |
1558 | dst->int_ctl = from->int_ctl; |
1559 | dst->int_vector = from->int_vector; |
1560 | dst->int_state = from->int_state; |
1561 | dst->exit_code = from->exit_code; |
1562 | dst->exit_code_hi = from->exit_code_hi; |
1563 | dst->exit_info_1 = from->exit_info_1; |
1564 | dst->exit_info_2 = from->exit_info_2; |
1565 | dst->exit_int_info = from->exit_int_info; |
1566 | dst->exit_int_info_err = from->exit_int_info_err; |
1567 | dst->nested_ctl = from->nested_ctl; |
1568 | dst->event_inj = from->event_inj; |
1569 | dst->event_inj_err = from->event_inj_err; |
1570 | dst->next_rip = from->next_rip; |
1571 | dst->nested_cr3 = from->nested_cr3; |
1572 | dst->virt_ext = from->virt_ext; |
1573 | dst->pause_filter_count = from->pause_filter_count; |
1574 | dst->pause_filter_thresh = from->pause_filter_thresh; |
1575 | /* 'clean' and 'hv_enlightenments' are not changed by KVM */ |
1576 | } |
1577 | |
1578 | static int svm_get_nested_state(struct kvm_vcpu *vcpu, |
1579 | struct kvm_nested_state __user *user_kvm_nested_state, |
1580 | u32 user_data_size) |
1581 | { |
1582 | struct vcpu_svm *svm; |
1583 | struct vmcb_control_area *ctl; |
1584 | unsigned long r; |
1585 | struct kvm_nested_state kvm_state = { |
1586 | .flags = 0, |
1587 | .format = KVM_STATE_NESTED_FORMAT_SVM, |
1588 | .size = sizeof(kvm_state), |
1589 | }; |
1590 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
1591 | &user_kvm_nested_state->data.svm[0]; |
1592 | |
1593 | if (!vcpu) |
1594 | return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; |
1595 | |
1596 | svm = to_svm(vcpu); |
1597 | |
1598 | if (user_data_size < kvm_state.size) |
1599 | goto out; |
1600 | |
1601 | /* First fill in the header and copy it out. */ |
1602 | if (is_guest_mode(vcpu)) { |
1603 | kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; |
1604 | kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; |
1605 | kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; |
1606 | |
1607 | if (svm->nested.nested_run_pending) |
1608 | kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; |
1609 | } |
1610 | |
1611 | if (gif_set(svm)) |
1612 | kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; |
1613 | |
1614 | if (copy_to_user(to: user_kvm_nested_state, from: &kvm_state, n: sizeof(kvm_state))) |
1615 | return -EFAULT; |
1616 | |
1617 | if (!is_guest_mode(vcpu)) |
1618 | goto out; |
1619 | |
1620 | /* |
1621 | * Copy over the full size of the VMCB rather than just the size |
1622 | * of the structs. |
1623 | */ |
1624 | if (clear_user(to: user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) |
1625 | return -EFAULT; |
1626 | |
1627 | ctl = kzalloc(size: sizeof(*ctl), GFP_KERNEL); |
1628 | if (!ctl) |
1629 | return -ENOMEM; |
1630 | |
1631 | nested_copy_vmcb_cache_to_control(dst: ctl, from: &svm->nested.ctl); |
1632 | r = copy_to_user(to: &user_vmcb->control, from: ctl, |
1633 | n: sizeof(user_vmcb->control)); |
1634 | kfree(objp: ctl); |
1635 | if (r) |
1636 | return -EFAULT; |
1637 | |
1638 | if (copy_to_user(to: &user_vmcb->save, from: &svm->vmcb01.ptr->save, |
1639 | n: sizeof(user_vmcb->save))) |
1640 | return -EFAULT; |
1641 | out: |
1642 | return kvm_state.size; |
1643 | } |
1644 | |
1645 | static int svm_set_nested_state(struct kvm_vcpu *vcpu, |
1646 | struct kvm_nested_state __user *user_kvm_nested_state, |
1647 | struct kvm_nested_state *kvm_state) |
1648 | { |
1649 | struct vcpu_svm *svm = to_svm(vcpu); |
1650 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
1651 | &user_kvm_nested_state->data.svm[0]; |
1652 | struct vmcb_control_area *ctl; |
1653 | struct vmcb_save_area *save; |
1654 | struct vmcb_save_area_cached save_cached; |
1655 | struct vmcb_ctrl_area_cached ctl_cached; |
1656 | unsigned long cr0; |
1657 | int ret; |
1658 | |
1659 | BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > |
1660 | KVM_STATE_NESTED_SVM_VMCB_SIZE); |
1661 | |
1662 | if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) |
1663 | return -EINVAL; |
1664 | |
1665 | if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | |
1666 | KVM_STATE_NESTED_RUN_PENDING | |
1667 | KVM_STATE_NESTED_GIF_SET)) |
1668 | return -EINVAL; |
1669 | |
1670 | /* |
1671 | * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's |
1672 | * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. |
1673 | */ |
1674 | if (!(vcpu->arch.efer & EFER_SVME)) { |
1675 | /* GIF=1 and no guest mode are required if SVME=0. */ |
1676 | if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) |
1677 | return -EINVAL; |
1678 | } |
1679 | |
1680 | /* SMM temporarily disables SVM, so we cannot be in guest mode. */ |
1681 | if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) |
1682 | return -EINVAL; |
1683 | |
1684 | if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { |
1685 | svm_leave_nested(vcpu); |
1686 | svm_set_gif(svm, value: !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); |
1687 | return 0; |
1688 | } |
1689 | |
1690 | if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) |
1691 | return -EINVAL; |
1692 | if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) |
1693 | return -EINVAL; |
1694 | |
1695 | ret = -ENOMEM; |
1696 | ctl = kzalloc(size: sizeof(*ctl), GFP_KERNEL_ACCOUNT); |
1697 | save = kzalloc(size: sizeof(*save), GFP_KERNEL_ACCOUNT); |
1698 | if (!ctl || !save) |
1699 | goto out_free; |
1700 | |
1701 | ret = -EFAULT; |
1702 | if (copy_from_user(to: ctl, from: &user_vmcb->control, n: sizeof(*ctl))) |
1703 | goto out_free; |
1704 | if (copy_from_user(to: save, from: &user_vmcb->save, n: sizeof(*save))) |
1705 | goto out_free; |
1706 | |
1707 | ret = -EINVAL; |
1708 | __nested_copy_vmcb_control_to_cache(vcpu, to: &ctl_cached, from: ctl); |
1709 | if (!__nested_vmcb_check_controls(vcpu, control: &ctl_cached)) |
1710 | goto out_free; |
1711 | |
1712 | /* |
1713 | * Processor state contains L2 state. Check that it is |
1714 | * valid for guest mode (see nested_vmcb_check_save). |
1715 | */ |
1716 | cr0 = kvm_read_cr0(vcpu); |
1717 | if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) |
1718 | goto out_free; |
1719 | |
1720 | /* |
1721 | * Validate host state saved from before VMRUN (see |
1722 | * nested_svm_check_permissions). |
1723 | */ |
1724 | __nested_copy_vmcb_save_to_cache(to: &save_cached, from: save); |
1725 | if (!(save->cr0 & X86_CR0_PG) || |
1726 | !(save->cr0 & X86_CR0_PE) || |
1727 | (save->rflags & X86_EFLAGS_VM) || |
1728 | !__nested_vmcb_check_save(vcpu, save: &save_cached)) |
1729 | goto out_free; |
1730 | |
1731 | |
1732 | /* |
1733 | * All checks done, we can enter guest mode. Userspace provides |
1734 | * vmcb12.control, which will be combined with L1 and stored into |
1735 | * vmcb02, and the L1 save state which we store in vmcb01. |
1736 | * L2 registers if needed are moved from the current VMCB to VMCB02. |
1737 | */ |
1738 | |
1739 | if (is_guest_mode(vcpu)) |
1740 | svm_leave_nested(vcpu); |
1741 | else |
1742 | svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; |
1743 | |
1744 | svm_set_gif(svm, value: !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); |
1745 | |
1746 | svm->nested.nested_run_pending = |
1747 | !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); |
1748 | |
1749 | svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; |
1750 | |
1751 | svm_copy_vmrun_state(to_save: &svm->vmcb01.ptr->save, from_save: save); |
1752 | nested_copy_vmcb_control_to_cache(svm, control: ctl); |
1753 | |
1754 | svm_switch_vmcb(svm, target_vmcb: &svm->nested.vmcb02); |
1755 | nested_vmcb02_prepare_control(svm, vmcb12_rip: svm->vmcb->save.rip, vmcb12_csbase: svm->vmcb->save.cs.base); |
1756 | |
1757 | /* |
1758 | * While the nested guest CR3 is already checked and set by |
1759 | * KVM_SET_SREGS, it was set when nested state was yet loaded, |
1760 | * thus MMU might not be initialized correctly. |
1761 | * Set it again to fix this. |
1762 | */ |
1763 | |
1764 | ret = nested_svm_load_cr3(vcpu: &svm->vcpu, cr3: vcpu->arch.cr3, |
1765 | nested_npt: nested_npt_enabled(svm), reload_pdptrs: false); |
1766 | if (WARN_ON_ONCE(ret)) |
1767 | goto out_free; |
1768 | |
1769 | svm->nested.force_msr_bitmap_recalc = true; |
1770 | |
1771 | kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
1772 | ret = 0; |
1773 | out_free: |
1774 | kfree(objp: save); |
1775 | kfree(objp: ctl); |
1776 | |
1777 | return ret; |
1778 | } |
1779 | |
1780 | static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) |
1781 | { |
1782 | struct vcpu_svm *svm = to_svm(vcpu); |
1783 | |
1784 | if (WARN_ON(!is_guest_mode(vcpu))) |
1785 | return true; |
1786 | |
1787 | if (!vcpu->arch.pdptrs_from_userspace && |
1788 | !nested_npt_enabled(svm) && is_pae_paging(vcpu)) |
1789 | /* |
1790 | * Reload the guest's PDPTRs since after a migration |
1791 | * the guest CR3 might be restored prior to setting the nested |
1792 | * state which can lead to a load of wrong PDPTRs. |
1793 | */ |
1794 | if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) |
1795 | return false; |
1796 | |
1797 | if (!nested_svm_vmrun_msrpm(svm)) { |
1798 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1799 | vcpu->run->internal.suberror = |
1800 | KVM_INTERNAL_ERROR_EMULATION; |
1801 | vcpu->run->internal.ndata = 0; |
1802 | return false; |
1803 | } |
1804 | |
1805 | if (kvm_hv_verify_vp_assist(vcpu)) |
1806 | return false; |
1807 | |
1808 | return true; |
1809 | } |
1810 | |
1811 | struct kvm_x86_nested_ops svm_nested_ops = { |
1812 | .leave_nested = svm_leave_nested, |
1813 | .is_exception_vmexit = nested_svm_is_exception_vmexit, |
1814 | .check_events = svm_check_nested_events, |
1815 | .triple_fault = nested_svm_triple_fault, |
1816 | .get_nested_state_pages = svm_get_nested_state_pages, |
1817 | .get_state = svm_get_nested_state, |
1818 | .set_state = svm_set_nested_state, |
1819 | .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush, |
1820 | }; |
1821 | |