1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * |
5 | * AMD SVM support |
6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
9 | * |
10 | * Authors: |
11 | * Yaniv Kamay <yaniv@qumranet.com> |
12 | * Avi Kivity <avi@qumranet.com> |
13 | */ |
14 | |
15 | #ifndef __SVM_SVM_H |
16 | #define __SVM_SVM_H |
17 | |
18 | #include <linux/kvm_types.h> |
19 | #include <linux/kvm_host.h> |
20 | #include <linux/bits.h> |
21 | |
22 | #include <asm/svm.h> |
23 | #include <asm/sev-common.h> |
24 | |
25 | #include "cpuid.h" |
26 | #include "kvm_cache_regs.h" |
27 | |
28 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
29 | |
30 | #define IOPM_SIZE PAGE_SIZE * 3 |
31 | #define MSRPM_SIZE PAGE_SIZE * 2 |
32 | |
33 | #define MAX_DIRECT_ACCESS_MSRS 47 |
34 | #define MSRPM_OFFSETS 32 |
35 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
36 | extern bool npt_enabled; |
37 | extern int nrips; |
38 | extern int vgif; |
39 | extern bool intercept_smi; |
40 | extern bool x2avic_enabled; |
41 | extern bool vnmi; |
42 | |
43 | /* |
44 | * Clean bits in VMCB. |
45 | * VMCB_ALL_CLEAN_MASK might also need to |
46 | * be updated if this enum is modified. |
47 | */ |
48 | enum { |
49 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, |
50 | pause filter count */ |
51 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ |
52 | VMCB_ASID, /* ASID */ |
53 | VMCB_INTR, /* int_ctl, int_vector */ |
54 | VMCB_NPT, /* npt_en, nCR3, gPAT */ |
55 | VMCB_CR, /* CR0, CR3, CR4, EFER */ |
56 | VMCB_DR, /* DR6, DR7 */ |
57 | VMCB_DT, /* GDT, IDT */ |
58 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ |
59 | VMCB_CR2, /* CR2 only */ |
60 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ |
61 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, |
62 | * AVIC PHYSICAL_TABLE pointer, |
63 | * AVIC LOGICAL_TABLE pointer |
64 | */ |
65 | VMCB_SW = 31, /* Reserved for hypervisor/software use */ |
66 | }; |
67 | |
68 | #define VMCB_ALL_CLEAN_MASK ( \ |
69 | (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ |
70 | (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ |
71 | (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ |
72 | (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ |
73 | (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ |
74 | (1U << VMCB_SW)) |
75 | |
76 | /* TPR and CR2 are always written before VMRUN */ |
77 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) |
78 | |
79 | struct kvm_sev_info { |
80 | bool active; /* SEV enabled guest */ |
81 | bool es_active; /* SEV-ES enabled guest */ |
82 | unsigned int asid; /* ASID used for this guest */ |
83 | unsigned int handle; /* SEV firmware handle */ |
84 | int fd; /* SEV device fd */ |
85 | unsigned long pages_locked; /* Number of pages locked */ |
86 | struct list_head regions_list; /* List of registered regions */ |
87 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
88 | struct kvm *enc_context_owner; /* Owner of copied encryption context */ |
89 | struct list_head mirror_vms; /* List of VMs mirroring */ |
90 | struct list_head mirror_entry; /* Use as a list entry of mirrors */ |
91 | struct misc_cg *misc_cg; /* For misc cgroup accounting */ |
92 | atomic_t migration_in_progress; |
93 | }; |
94 | |
95 | struct kvm_svm { |
96 | struct kvm kvm; |
97 | |
98 | /* Struct members for AVIC */ |
99 | u32 avic_vm_id; |
100 | struct page *avic_logical_id_table_page; |
101 | struct page *avic_physical_id_table_page; |
102 | struct hlist_node hnode; |
103 | |
104 | struct kvm_sev_info sev_info; |
105 | }; |
106 | |
107 | struct kvm_vcpu; |
108 | |
109 | struct kvm_vmcb_info { |
110 | struct vmcb *ptr; |
111 | unsigned long pa; |
112 | int cpu; |
113 | uint64_t asid_generation; |
114 | }; |
115 | |
116 | struct vmcb_save_area_cached { |
117 | u64 efer; |
118 | u64 cr4; |
119 | u64 cr3; |
120 | u64 cr0; |
121 | u64 dr7; |
122 | u64 dr6; |
123 | }; |
124 | |
125 | struct vmcb_ctrl_area_cached { |
126 | u32 intercepts[MAX_INTERCEPT]; |
127 | u16 pause_filter_thresh; |
128 | u16 pause_filter_count; |
129 | u64 iopm_base_pa; |
130 | u64 msrpm_base_pa; |
131 | u64 tsc_offset; |
132 | u32 asid; |
133 | u8 tlb_ctl; |
134 | u32 int_ctl; |
135 | u32 int_vector; |
136 | u32 int_state; |
137 | u32 exit_code; |
138 | u32 exit_code_hi; |
139 | u64 exit_info_1; |
140 | u64 exit_info_2; |
141 | u32 exit_int_info; |
142 | u32 exit_int_info_err; |
143 | u64 nested_ctl; |
144 | u32 event_inj; |
145 | u32 event_inj_err; |
146 | u64 next_rip; |
147 | u64 nested_cr3; |
148 | u64 virt_ext; |
149 | u32 clean; |
150 | union { |
151 | #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV) |
152 | struct hv_vmcb_enlightenments hv_enlightenments; |
153 | #endif |
154 | u8 reserved_sw[32]; |
155 | }; |
156 | }; |
157 | |
158 | struct svm_nested_state { |
159 | struct kvm_vmcb_info vmcb02; |
160 | u64 hsave_msr; |
161 | u64 vm_cr_msr; |
162 | u64 vmcb12_gpa; |
163 | u64 last_vmcb12_gpa; |
164 | |
165 | /* These are the merged vectors */ |
166 | u32 *msrpm; |
167 | |
168 | /* A VMRUN has started but has not yet been performed, so |
169 | * we cannot inject a nested vmexit yet. */ |
170 | bool nested_run_pending; |
171 | |
172 | /* cache for control fields of the guest */ |
173 | struct vmcb_ctrl_area_cached ctl; |
174 | |
175 | /* |
176 | * Note: this struct is not kept up-to-date while L2 runs; it is only |
177 | * valid within nested_svm_vmrun. |
178 | */ |
179 | struct vmcb_save_area_cached save; |
180 | |
181 | bool initialized; |
182 | |
183 | /* |
184 | * Indicates whether MSR bitmap for L2 needs to be rebuilt due to |
185 | * changes in MSR bitmap for L1 or switching to a different L2. Note, |
186 | * this flag can only be used reliably in conjunction with a paravirt L1 |
187 | * which informs L0 whether any changes to MSR bitmap for L2 were done |
188 | * on its side. |
189 | */ |
190 | bool force_msr_bitmap_recalc; |
191 | }; |
192 | |
193 | struct vcpu_sev_es_state { |
194 | /* SEV-ES support */ |
195 | struct sev_es_save_area *vmsa; |
196 | struct ghcb *ghcb; |
197 | u8 valid_bitmap[16]; |
198 | struct kvm_host_map ghcb_map; |
199 | bool received_first_sipi; |
200 | |
201 | /* SEV-ES scratch area support */ |
202 | u64 sw_scratch; |
203 | void *ghcb_sa; |
204 | u32 ghcb_sa_len; |
205 | bool ghcb_sa_sync; |
206 | bool ghcb_sa_free; |
207 | }; |
208 | |
209 | struct vcpu_svm { |
210 | struct kvm_vcpu vcpu; |
211 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
212 | struct vmcb *vmcb; |
213 | struct kvm_vmcb_info vmcb01; |
214 | struct kvm_vmcb_info *current_vmcb; |
215 | u32 asid; |
216 | u32 sysenter_esp_hi; |
217 | u32 sysenter_eip_hi; |
218 | uint64_t tsc_aux; |
219 | |
220 | u64 msr_decfg; |
221 | |
222 | u64 next_rip; |
223 | |
224 | u64 spec_ctrl; |
225 | |
226 | u64 tsc_ratio_msr; |
227 | /* |
228 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be |
229 | * translated into the appropriate L2_CFG bits on the host to |
230 | * perform speculative control. |
231 | */ |
232 | u64 virt_spec_ctrl; |
233 | |
234 | u32 *msrpm; |
235 | |
236 | ulong nmi_iret_rip; |
237 | |
238 | struct svm_nested_state nested; |
239 | |
240 | /* NMI mask value, used when vNMI is not enabled */ |
241 | bool nmi_masked; |
242 | |
243 | /* |
244 | * True when NMIs are still masked but guest IRET was just intercepted |
245 | * and KVM is waiting for RIP to change, which will signal that the |
246 | * intercepted IRET was retired and thus NMI can be unmasked. |
247 | */ |
248 | bool awaiting_iret_completion; |
249 | |
250 | /* |
251 | * Set when KVM is awaiting IRET completion and needs to inject NMIs as |
252 | * soon as the IRET completes (e.g. NMI is pending injection). KVM |
253 | * temporarily steals RFLAGS.TF to single-step the guest in this case |
254 | * in order to regain control as soon as the NMI-blocking condition |
255 | * goes away. |
256 | */ |
257 | bool nmi_singlestep; |
258 | u64 nmi_singlestep_guest_rflags; |
259 | |
260 | bool nmi_l1_to_l2; |
261 | |
262 | unsigned long soft_int_csbase; |
263 | unsigned long soft_int_old_rip; |
264 | unsigned long soft_int_next_rip; |
265 | bool soft_int_injected; |
266 | |
267 | u32 ldr_reg; |
268 | u32 dfr_reg; |
269 | struct page *avic_backing_page; |
270 | u64 *avic_physical_id_cache; |
271 | |
272 | /* |
273 | * Per-vcpu list of struct amd_svm_iommu_ir: |
274 | * This is used mainly to store interrupt remapping information used |
275 | * when update the vcpu affinity. This avoids the need to scan for |
276 | * IRTE and try to match ga_tag in the IOMMU driver. |
277 | */ |
278 | struct list_head ir_list; |
279 | spinlock_t ir_list_lock; |
280 | |
281 | /* Save desired MSR intercept (read: pass-through) state */ |
282 | struct { |
283 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); |
284 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); |
285 | } shadow_msr_intercept; |
286 | |
287 | struct vcpu_sev_es_state sev_es; |
288 | |
289 | bool guest_state_loaded; |
290 | |
291 | bool x2avic_msrs_intercepted; |
292 | |
293 | /* Guest GIF value, used when vGIF is not enabled */ |
294 | bool guest_gif; |
295 | }; |
296 | |
297 | struct svm_cpu_data { |
298 | u64 asid_generation; |
299 | u32 max_asid; |
300 | u32 next_asid; |
301 | u32 min_asid; |
302 | |
303 | struct page *save_area; |
304 | unsigned long save_area_pa; |
305 | |
306 | struct vmcb *current_vmcb; |
307 | |
308 | /* index = sev_asid, value = vmcb pointer */ |
309 | struct vmcb **sev_vmcbs; |
310 | }; |
311 | |
312 | DECLARE_PER_CPU(struct svm_cpu_data, svm_data); |
313 | |
314 | void recalc_intercepts(struct vcpu_svm *svm); |
315 | |
316 | static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
317 | { |
318 | return container_of(kvm, struct kvm_svm, kvm); |
319 | } |
320 | |
321 | static __always_inline bool sev_guest(struct kvm *kvm) |
322 | { |
323 | #ifdef CONFIG_KVM_AMD_SEV |
324 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
325 | |
326 | return sev->active; |
327 | #else |
328 | return false; |
329 | #endif |
330 | } |
331 | |
332 | static __always_inline bool sev_es_guest(struct kvm *kvm) |
333 | { |
334 | #ifdef CONFIG_KVM_AMD_SEV |
335 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
336 | |
337 | return sev->es_active && !WARN_ON_ONCE(!sev->active); |
338 | #else |
339 | return false; |
340 | #endif |
341 | } |
342 | |
343 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
344 | { |
345 | vmcb->control.clean = 0; |
346 | } |
347 | |
348 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
349 | { |
350 | vmcb->control.clean = VMCB_ALL_CLEAN_MASK |
351 | & ~VMCB_ALWAYS_DIRTY_MASK; |
352 | } |
353 | |
354 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
355 | { |
356 | vmcb->control.clean &= ~(1 << bit); |
357 | } |
358 | |
359 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
360 | { |
361 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); |
362 | } |
363 | |
364 | static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
365 | { |
366 | return container_of(vcpu, struct vcpu_svm, vcpu); |
367 | } |
368 | |
369 | /* |
370 | * Only the PDPTRs are loaded on demand into the shadow MMU. All other |
371 | * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. |
372 | * |
373 | * CR3 might be out of date in the VMCB but it is not marked dirty; instead, |
374 | * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 |
375 | * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. |
376 | */ |
377 | #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) |
378 | |
379 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
380 | { |
381 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
382 | __set_bit(bit, (unsigned long *)&control->intercepts); |
383 | } |
384 | |
385 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) |
386 | { |
387 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
388 | __clear_bit(bit, (unsigned long *)&control->intercepts); |
389 | } |
390 | |
391 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) |
392 | { |
393 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
394 | return test_bit(bit, (unsigned long *)&control->intercepts); |
395 | } |
396 | |
397 | static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) |
398 | { |
399 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
400 | return test_bit(bit, (unsigned long *)&control->intercepts); |
401 | } |
402 | |
403 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
404 | { |
405 | struct vmcb *vmcb = svm->vmcb01.ptr; |
406 | |
407 | WARN_ON_ONCE(bit >= 32); |
408 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_EXCEPTION_OFFSET + bit); |
409 | |
410 | recalc_intercepts(svm); |
411 | } |
412 | |
413 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
414 | { |
415 | struct vmcb *vmcb = svm->vmcb01.ptr; |
416 | |
417 | WARN_ON_ONCE(bit >= 32); |
418 | vmcb_clr_intercept(control: &vmcb->control, bit: INTERCEPT_EXCEPTION_OFFSET + bit); |
419 | |
420 | recalc_intercepts(svm); |
421 | } |
422 | |
423 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
424 | { |
425 | struct vmcb *vmcb = svm->vmcb01.ptr; |
426 | |
427 | vmcb_set_intercept(control: &vmcb->control, bit); |
428 | |
429 | recalc_intercepts(svm); |
430 | } |
431 | |
432 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
433 | { |
434 | struct vmcb *vmcb = svm->vmcb01.ptr; |
435 | |
436 | vmcb_clr_intercept(control: &vmcb->control, bit); |
437 | |
438 | recalc_intercepts(svm); |
439 | } |
440 | |
441 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
442 | { |
443 | return vmcb_is_intercept(control: &svm->vmcb->control, bit); |
444 | } |
445 | |
446 | static inline bool nested_vgif_enabled(struct vcpu_svm *svm) |
447 | { |
448 | return guest_can_use(vcpu: &svm->vcpu, X86_FEATURE_VGIF) && |
449 | (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); |
450 | } |
451 | |
452 | static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) |
453 | { |
454 | if (!vgif) |
455 | return NULL; |
456 | |
457 | if (is_guest_mode(vcpu: &svm->vcpu) && !nested_vgif_enabled(svm)) |
458 | return svm->nested.vmcb02.ptr; |
459 | else |
460 | return svm->vmcb01.ptr; |
461 | } |
462 | |
463 | static inline void enable_gif(struct vcpu_svm *svm) |
464 | { |
465 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
466 | |
467 | if (vmcb) |
468 | vmcb->control.int_ctl |= V_GIF_MASK; |
469 | else |
470 | svm->guest_gif = true; |
471 | } |
472 | |
473 | static inline void disable_gif(struct vcpu_svm *svm) |
474 | { |
475 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
476 | |
477 | if (vmcb) |
478 | vmcb->control.int_ctl &= ~V_GIF_MASK; |
479 | else |
480 | svm->guest_gif = false; |
481 | } |
482 | |
483 | static inline bool gif_set(struct vcpu_svm *svm) |
484 | { |
485 | struct vmcb *vmcb = get_vgif_vmcb(svm); |
486 | |
487 | if (vmcb) |
488 | return !!(vmcb->control.int_ctl & V_GIF_MASK); |
489 | else |
490 | return svm->guest_gif; |
491 | } |
492 | |
493 | static inline bool nested_npt_enabled(struct vcpu_svm *svm) |
494 | { |
495 | return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; |
496 | } |
497 | |
498 | static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) |
499 | { |
500 | return guest_can_use(vcpu: &svm->vcpu, X86_FEATURE_VNMI) && |
501 | (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); |
502 | } |
503 | |
504 | static inline bool is_x2apic_msrpm_offset(u32 offset) |
505 | { |
506 | /* 4 msrs per u8, and 4 u8 in u32 */ |
507 | u32 msr = offset * 16; |
508 | |
509 | return (msr >= APIC_BASE_MSR) && |
510 | (msr < (APIC_BASE_MSR + 0x100)); |
511 | } |
512 | |
513 | static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) |
514 | { |
515 | if (!vnmi) |
516 | return NULL; |
517 | |
518 | if (is_guest_mode(vcpu: &svm->vcpu)) |
519 | return NULL; |
520 | else |
521 | return svm->vmcb01.ptr; |
522 | } |
523 | |
524 | static inline bool is_vnmi_enabled(struct vcpu_svm *svm) |
525 | { |
526 | struct vmcb *vmcb = get_vnmi_vmcb_l1(svm); |
527 | |
528 | if (vmcb) |
529 | return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK); |
530 | else |
531 | return false; |
532 | } |
533 | |
534 | /* svm.c */ |
535 | #define MSR_INVALID 0xffffffffU |
536 | |
537 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
538 | |
539 | extern bool dump_invalid_vmcb; |
540 | |
541 | u32 svm_msrpm_offset(u32 msr); |
542 | u32 *svm_vcpu_alloc_msrpm(void); |
543 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); |
544 | void svm_vcpu_free_msrpm(u32 *msrpm); |
545 | void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); |
546 | void svm_update_lbrv(struct kvm_vcpu *vcpu); |
547 | |
548 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
549 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
550 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
551 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
552 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
553 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); |
554 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); |
555 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
556 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
557 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
558 | int read, int write); |
559 | void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); |
560 | void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, |
561 | int trig_mode, int vec); |
562 | |
563 | /* nested.c */ |
564 | |
565 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ |
566 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ |
567 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ |
568 | |
569 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
570 | { |
571 | struct vcpu_svm *svm = to_svm(vcpu); |
572 | |
573 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); |
574 | } |
575 | |
576 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
577 | { |
578 | return vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_SMI); |
579 | } |
580 | |
581 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
582 | { |
583 | return vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_INTR); |
584 | } |
585 | |
586 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
587 | { |
588 | return vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_NMI); |
589 | } |
590 | |
591 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, |
592 | u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); |
593 | void svm_leave_nested(struct kvm_vcpu *vcpu); |
594 | void svm_free_nested(struct vcpu_svm *svm); |
595 | int svm_allocate_nested(struct vcpu_svm *svm); |
596 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
597 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
598 | struct vmcb_save_area *from_save); |
599 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); |
600 | int nested_svm_vmexit(struct vcpu_svm *svm); |
601 | |
602 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) |
603 | { |
604 | svm->vmcb->control.exit_code = exit_code; |
605 | svm->vmcb->control.exit_info_1 = 0; |
606 | svm->vmcb->control.exit_info_2 = 0; |
607 | return nested_svm_vmexit(svm); |
608 | } |
609 | |
610 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
611 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
612 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
613 | bool has_error_code, u32 error_code); |
614 | int nested_svm_exit_special(struct vcpu_svm *svm); |
615 | void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); |
616 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu); |
617 | void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, |
618 | struct vmcb_control_area *control); |
619 | void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, |
620 | struct vmcb_save_area *save); |
621 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
622 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
623 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); |
624 | |
625 | extern struct kvm_x86_nested_ops svm_nested_ops; |
626 | |
627 | /* avic.c */ |
628 | #define AVIC_REQUIRED_APICV_INHIBITS \ |
629 | ( \ |
630 | BIT(APICV_INHIBIT_REASON_DISABLE) | \ |
631 | BIT(APICV_INHIBIT_REASON_ABSENT) | \ |
632 | BIT(APICV_INHIBIT_REASON_HYPERV) | \ |
633 | BIT(APICV_INHIBIT_REASON_NESTED) | \ |
634 | BIT(APICV_INHIBIT_REASON_IRQWIN) | \ |
635 | BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \ |
636 | BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \ |
637 | BIT(APICV_INHIBIT_REASON_SEV) | \ |
638 | BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \ |
639 | BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \ |
640 | BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \ |
641 | BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \ |
642 | ) |
643 | |
644 | bool avic_hardware_setup(void); |
645 | int avic_ga_log_notifier(u32 ga_tag); |
646 | void avic_vm_destroy(struct kvm *kvm); |
647 | int avic_vm_init(struct kvm *kvm); |
648 | void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); |
649 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
650 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); |
651 | int avic_init_vcpu(struct vcpu_svm *svm); |
652 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
653 | void avic_vcpu_put(struct kvm_vcpu *vcpu); |
654 | void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); |
655 | void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); |
656 | int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, |
657 | uint32_t guest_irq, bool set); |
658 | void avic_vcpu_blocking(struct kvm_vcpu *vcpu); |
659 | void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); |
660 | void avic_ring_doorbell(struct kvm_vcpu *vcpu); |
661 | unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); |
662 | void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); |
663 | |
664 | |
665 | /* sev.c */ |
666 | |
667 | #define GHCB_VERSION_MAX 1ULL |
668 | #define GHCB_VERSION_MIN 1ULL |
669 | |
670 | |
671 | extern unsigned int max_sev_asid; |
672 | |
673 | void sev_vm_destroy(struct kvm *kvm); |
674 | int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); |
675 | int sev_mem_enc_register_region(struct kvm *kvm, |
676 | struct kvm_enc_region *range); |
677 | int sev_mem_enc_unregister_region(struct kvm *kvm, |
678 | struct kvm_enc_region *range); |
679 | int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); |
680 | int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); |
681 | void sev_guest_memory_reclaimed(struct kvm *kvm); |
682 | |
683 | void pre_sev_run(struct vcpu_svm *svm, int cpu); |
684 | void __init sev_set_cpu_caps(void); |
685 | void __init sev_hardware_setup(void); |
686 | void sev_hardware_unsetup(void); |
687 | int sev_cpu_init(struct svm_cpu_data *sd); |
688 | void sev_init_vmcb(struct vcpu_svm *svm); |
689 | void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm); |
690 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
691 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
692 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
693 | void sev_es_vcpu_reset(struct vcpu_svm *svm); |
694 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
695 | void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); |
696 | void sev_es_unmap_ghcb(struct vcpu_svm *svm); |
697 | struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu); |
698 | |
699 | /* vmenter.S */ |
700 | |
701 | void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted, |
702 | struct sev_es_save_area *hostsa); |
703 | void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); |
704 | |
705 | #define DEFINE_KVM_GHCB_ACCESSORS(field) \ |
706 | static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ |
707 | { \ |
708 | return test_bit(GHCB_BITMAP_IDX(field), \ |
709 | (unsigned long *)&svm->sev_es.valid_bitmap); \ |
710 | } \ |
711 | \ |
712 | static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ |
713 | { \ |
714 | return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ |
715 | } \ |
716 | |
717 | DEFINE_KVM_GHCB_ACCESSORS(cpl) |
718 | DEFINE_KVM_GHCB_ACCESSORS(rax) |
719 | DEFINE_KVM_GHCB_ACCESSORS(rcx) |
720 | DEFINE_KVM_GHCB_ACCESSORS(rdx) |
721 | DEFINE_KVM_GHCB_ACCESSORS(rbx) |
722 | DEFINE_KVM_GHCB_ACCESSORS(rsi) |
723 | DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) |
724 | DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) |
725 | DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) |
726 | DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) |
727 | DEFINE_KVM_GHCB_ACCESSORS(xcr0) |
728 | |
729 | #endif |
730 | |