1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * |
5 | * This header defines architecture specific interfaces, x86 version |
6 | */ |
7 | |
8 | #ifndef _ASM_X86_KVM_HOST_H |
9 | #define _ASM_X86_KVM_HOST_H |
10 | |
11 | #include <linux/types.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/mmu_notifier.h> |
14 | #include <linux/tracepoint.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/irq_work.h> |
17 | #include <linux/irq.h> |
18 | #include <linux/workqueue.h> |
19 | |
20 | #include <linux/kvm.h> |
21 | #include <linux/kvm_para.h> |
22 | #include <linux/kvm_types.h> |
23 | #include <linux/perf_event.h> |
24 | #include <linux/pvclock_gtod.h> |
25 | #include <linux/clocksource.h> |
26 | #include <linux/irqbypass.h> |
27 | #include <linux/hyperv.h> |
28 | #include <linux/kfifo.h> |
29 | |
30 | #include <asm/apic.h> |
31 | #include <asm/pvclock-abi.h> |
32 | #include <asm/desc.h> |
33 | #include <asm/mtrr.h> |
34 | #include <asm/msr-index.h> |
35 | #include <asm/asm.h> |
36 | #include <asm/kvm_page_track.h> |
37 | #include <asm/kvm_vcpu_regs.h> |
38 | #include <asm/hyperv-tlfs.h> |
39 | |
40 | #define __KVM_HAVE_ARCH_VCPU_DEBUGFS |
41 | |
42 | /* |
43 | * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if |
44 | * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS). |
45 | */ |
46 | #ifdef CONFIG_KVM_MAX_NR_VCPUS |
47 | #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS |
48 | #else |
49 | #define KVM_MAX_VCPUS 1024 |
50 | #endif |
51 | |
52 | /* |
53 | * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs |
54 | * might be larger than the actual number of VCPUs because the |
55 | * APIC ID encodes CPU topology information. |
56 | * |
57 | * In the worst case, we'll need less than one extra bit for the |
58 | * Core ID, and less than one extra bit for the Package (Die) ID, |
59 | * so ratio of 4 should be enough. |
60 | */ |
61 | #define KVM_VCPU_ID_RATIO 4 |
62 | #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO) |
63 | |
64 | /* memory slots that are not exposed to userspace */ |
65 | #define KVM_INTERNAL_MEM_SLOTS 3 |
66 | |
67 | #define KVM_HALT_POLL_NS_DEFAULT 200000 |
68 | |
69 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
70 | |
71 | #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ |
72 | KVM_DIRTY_LOG_INITIALLY_SET) |
73 | |
74 | #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ |
75 | KVM_BUS_LOCK_DETECTION_EXIT) |
76 | |
77 | #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \ |
78 | KVM_X86_NOTIFY_VMEXIT_USER) |
79 | |
80 | /* x86-specific vcpu->requests bit members */ |
81 | #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) |
82 | #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) |
83 | #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) |
84 | #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) |
85 | #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) |
86 | #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5) |
87 | #define KVM_REQ_EVENT KVM_ARCH_REQ(6) |
88 | #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) |
89 | #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) |
90 | #define KVM_REQ_NMI KVM_ARCH_REQ(9) |
91 | #define KVM_REQ_PMU KVM_ARCH_REQ(10) |
92 | #define KVM_REQ_PMI KVM_ARCH_REQ(11) |
93 | #ifdef CONFIG_KVM_SMM |
94 | #define KVM_REQ_SMI KVM_ARCH_REQ(12) |
95 | #endif |
96 | #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) |
97 | #define KVM_REQ_MCLOCK_INPROGRESS \ |
98 | KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
99 | #define KVM_REQ_SCAN_IOAPIC \ |
100 | KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
101 | #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16) |
102 | #define KVM_REQ_APIC_PAGE_RELOAD \ |
103 | KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
104 | #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18) |
105 | #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19) |
106 | #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20) |
107 | #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) |
108 | #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) |
109 | #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) |
110 | #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24) |
111 | #define KVM_REQ_APICV_UPDATE \ |
112 | KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
113 | #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26) |
114 | #define KVM_REQ_TLB_FLUSH_GUEST \ |
115 | KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
116 | #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) |
117 | #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29) |
118 | #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ |
119 | KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
120 | #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \ |
121 | KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
122 | #define KVM_REQ_HV_TLB_FLUSH \ |
123 | KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
124 | |
125 | #define CR0_RESERVED_BITS \ |
126 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ |
127 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ |
128 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) |
129 | |
130 | #define CR4_RESERVED_BITS \ |
131 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
132 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ |
133 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
134 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
135 | | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ |
136 | | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ |
137 | | X86_CR4_LAM_SUP)) |
138 | |
139 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) |
140 | |
141 | |
142 | |
143 | #define INVALID_PAGE (~(hpa_t)0) |
144 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
145 | |
146 | /* KVM Hugepage definitions for x86 */ |
147 | #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G |
148 | #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1) |
149 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
150 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) |
151 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
152 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
153 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
154 | |
155 | #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50 |
156 | #define KVM_MIN_ALLOC_MMU_PAGES 64UL |
157 | #define KVM_MMU_HASH_SHIFT 12 |
158 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
159 | #define KVM_MIN_FREE_MMU_PAGES 5 |
160 | #define KVM_REFILL_PAGES 25 |
161 | #define KVM_MAX_CPUID_ENTRIES 256 |
162 | #define KVM_NR_FIXED_MTRR_REGION 88 |
163 | #define KVM_NR_VAR_MTRR 8 |
164 | |
165 | #define ASYNC_PF_PER_VCPU 64 |
166 | |
167 | enum kvm_reg { |
168 | VCPU_REGS_RAX = __VCPU_REGS_RAX, |
169 | VCPU_REGS_RCX = __VCPU_REGS_RCX, |
170 | VCPU_REGS_RDX = __VCPU_REGS_RDX, |
171 | VCPU_REGS_RBX = __VCPU_REGS_RBX, |
172 | VCPU_REGS_RSP = __VCPU_REGS_RSP, |
173 | VCPU_REGS_RBP = __VCPU_REGS_RBP, |
174 | VCPU_REGS_RSI = __VCPU_REGS_RSI, |
175 | VCPU_REGS_RDI = __VCPU_REGS_RDI, |
176 | #ifdef CONFIG_X86_64 |
177 | VCPU_REGS_R8 = __VCPU_REGS_R8, |
178 | VCPU_REGS_R9 = __VCPU_REGS_R9, |
179 | VCPU_REGS_R10 = __VCPU_REGS_R10, |
180 | VCPU_REGS_R11 = __VCPU_REGS_R11, |
181 | VCPU_REGS_R12 = __VCPU_REGS_R12, |
182 | VCPU_REGS_R13 = __VCPU_REGS_R13, |
183 | VCPU_REGS_R14 = __VCPU_REGS_R14, |
184 | VCPU_REGS_R15 = __VCPU_REGS_R15, |
185 | #endif |
186 | VCPU_REGS_RIP, |
187 | NR_VCPU_REGS, |
188 | |
189 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, |
190 | VCPU_EXREG_CR0, |
191 | VCPU_EXREG_CR3, |
192 | VCPU_EXREG_CR4, |
193 | VCPU_EXREG_RFLAGS, |
194 | VCPU_EXREG_SEGMENTS, |
195 | VCPU_EXREG_EXIT_INFO_1, |
196 | VCPU_EXREG_EXIT_INFO_2, |
197 | }; |
198 | |
199 | enum { |
200 | VCPU_SREG_ES, |
201 | VCPU_SREG_CS, |
202 | VCPU_SREG_SS, |
203 | VCPU_SREG_DS, |
204 | VCPU_SREG_FS, |
205 | VCPU_SREG_GS, |
206 | VCPU_SREG_TR, |
207 | VCPU_SREG_LDTR, |
208 | }; |
209 | |
210 | enum exit_fastpath_completion { |
211 | EXIT_FASTPATH_NONE, |
212 | EXIT_FASTPATH_REENTER_GUEST, |
213 | EXIT_FASTPATH_EXIT_HANDLED, |
214 | }; |
215 | typedef enum exit_fastpath_completion fastpath_t; |
216 | |
217 | struct x86_emulate_ctxt; |
218 | struct x86_exception; |
219 | union kvm_smram; |
220 | enum x86_intercept; |
221 | enum x86_intercept_stage; |
222 | |
223 | #define KVM_NR_DB_REGS 4 |
224 | |
225 | #define DR6_BUS_LOCK (1 << 11) |
226 | #define DR6_BD (1 << 13) |
227 | #define DR6_BS (1 << 14) |
228 | #define DR6_BT (1 << 15) |
229 | #define DR6_RTM (1 << 16) |
230 | /* |
231 | * DR6_ACTIVE_LOW combines fixed-1 and active-low bits. |
232 | * We can regard all the bits in DR6_FIXED_1 as active_low bits; |
233 | * they will never be 0 for now, but when they are defined |
234 | * in the future it will require no code change. |
235 | * |
236 | * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. |
237 | */ |
238 | #define DR6_ACTIVE_LOW 0xffff0ff0 |
239 | #define DR6_VOLATILE 0x0001e80f |
240 | #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) |
241 | |
242 | #define DR7_BP_EN_MASK 0x000000ff |
243 | #define DR7_GE (1 << 9) |
244 | #define DR7_GD (1 << 13) |
245 | #define DR7_FIXED_1 0x00000400 |
246 | #define DR7_VOLATILE 0xffff2bff |
247 | |
248 | #define KVM_GUESTDBG_VALID_MASK \ |
249 | (KVM_GUESTDBG_ENABLE | \ |
250 | KVM_GUESTDBG_SINGLESTEP | \ |
251 | KVM_GUESTDBG_USE_HW_BP | \ |
252 | KVM_GUESTDBG_USE_SW_BP | \ |
253 | KVM_GUESTDBG_INJECT_BP | \ |
254 | KVM_GUESTDBG_INJECT_DB | \ |
255 | KVM_GUESTDBG_BLOCKIRQ) |
256 | |
257 | |
258 | #define PFERR_PRESENT_BIT 0 |
259 | #define PFERR_WRITE_BIT 1 |
260 | #define PFERR_USER_BIT 2 |
261 | #define PFERR_RSVD_BIT 3 |
262 | #define PFERR_FETCH_BIT 4 |
263 | #define PFERR_PK_BIT 5 |
264 | #define PFERR_SGX_BIT 15 |
265 | #define PFERR_GUEST_FINAL_BIT 32 |
266 | #define PFERR_GUEST_PAGE_BIT 33 |
267 | #define PFERR_IMPLICIT_ACCESS_BIT 48 |
268 | |
269 | #define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT) |
270 | #define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT) |
271 | #define PFERR_USER_MASK BIT(PFERR_USER_BIT) |
272 | #define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT) |
273 | #define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT) |
274 | #define PFERR_PK_MASK BIT(PFERR_PK_BIT) |
275 | #define PFERR_SGX_MASK BIT(PFERR_SGX_BIT) |
276 | #define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT) |
277 | #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT) |
278 | #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT) |
279 | |
280 | #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ |
281 | PFERR_WRITE_MASK | \ |
282 | PFERR_PRESENT_MASK) |
283 | |
284 | /* apic attention bits */ |
285 | #define KVM_APIC_CHECK_VAPIC 0 |
286 | /* |
287 | * The following bit is set with PV-EOI, unset on EOI. |
288 | * We detect PV-EOI changes by guest by comparing |
289 | * this bit with PV-EOI in guest memory. |
290 | * See the implementation in apic_update_pv_eoi. |
291 | */ |
292 | #define KVM_APIC_PV_EOI_PENDING 1 |
293 | |
294 | struct kvm_kernel_irq_routing_entry; |
295 | |
296 | /* |
297 | * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page |
298 | * also includes TDP pages) to determine whether or not a page can be used in |
299 | * the given MMU context. This is a subset of the overall kvm_cpu_role to |
300 | * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows |
301 | * allocating 2 bytes per gfn instead of 4 bytes per gfn. |
302 | * |
303 | * Upper-level shadow pages having gptes are tracked for write-protection via |
304 | * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must |
305 | * not create more than 2^16-1 upper-level shadow pages at a single gfn, |
306 | * otherwise gfn_write_track will overflow and explosions will ensue. |
307 | * |
308 | * A unique shadow page (SP) for a gfn is created if and only if an existing SP |
309 | * cannot be reused. The ability to reuse a SP is tracked by its role, which |
310 | * incorporates various mode bits and properties of the SP. Roughly speaking, |
311 | * the number of unique SPs that can theoretically be created is 2^n, where n |
312 | * is the number of bits that are used to compute the role. |
313 | * |
314 | * But, even though there are 19 bits in the mask below, not all combinations |
315 | * of modes and flags are possible: |
316 | * |
317 | * - invalid shadow pages are not accounted, so the bits are effectively 18 |
318 | * |
319 | * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging); |
320 | * execonly and ad_disabled are only used for nested EPT which has |
321 | * has_4_byte_gpte=0. Therefore, 2 bits are always unused. |
322 | * |
323 | * - the 4 bits of level are effectively limited to the values 2/3/4/5, |
324 | * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE |
325 | * paging has exactly one upper level, making level completely redundant |
326 | * when has_4_byte_gpte=1. |
327 | * |
328 | * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if |
329 | * cr0_wp=0, therefore these three bits only give rise to 5 possibilities. |
330 | * |
331 | * Therefore, the maximum number of possible upper-level shadow pages for a |
332 | * single gfn is a bit less than 2^13. |
333 | */ |
334 | union kvm_mmu_page_role { |
335 | u32 word; |
336 | struct { |
337 | unsigned level:4; |
338 | unsigned has_4_byte_gpte:1; |
339 | unsigned quadrant:2; |
340 | unsigned direct:1; |
341 | unsigned access:3; |
342 | unsigned invalid:1; |
343 | unsigned efer_nx:1; |
344 | unsigned cr0_wp:1; |
345 | unsigned smep_andnot_wp:1; |
346 | unsigned smap_andnot_wp:1; |
347 | unsigned ad_disabled:1; |
348 | unsigned guest_mode:1; |
349 | unsigned passthrough:1; |
350 | unsigned :5; |
351 | |
352 | /* |
353 | * This is left at the top of the word so that |
354 | * kvm_memslots_for_spte_role can extract it with a |
355 | * simple shift. While there is room, give it a whole |
356 | * byte so it is also faster to load it from memory. |
357 | */ |
358 | unsigned smm:8; |
359 | }; |
360 | }; |
361 | |
362 | /* |
363 | * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties |
364 | * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, |
365 | * including on nested transitions, if nothing in the full role changes then |
366 | * MMU re-configuration can be skipped. @valid bit is set on first usage so we |
367 | * don't treat all-zero structure as valid data. |
368 | * |
369 | * The properties that are tracked in the extended role but not the page role |
370 | * are for things that either (a) do not affect the validity of the shadow page |
371 | * or (b) are indirectly reflected in the shadow page's role. For example, |
372 | * CR4.PKE only affects permission checks for software walks of the guest page |
373 | * tables (because KVM doesn't support Protection Keys with shadow paging), and |
374 | * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level. |
375 | * |
376 | * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role. |
377 | * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and |
378 | * SMAP, but the MMU's permission checks for software walks need to be SMEP and |
379 | * SMAP aware regardless of CR0.WP. |
380 | */ |
381 | union kvm_mmu_extended_role { |
382 | u32 word; |
383 | struct { |
384 | unsigned int valid:1; |
385 | unsigned int execonly:1; |
386 | unsigned int cr4_pse:1; |
387 | unsigned int cr4_pke:1; |
388 | unsigned int cr4_smap:1; |
389 | unsigned int cr4_smep:1; |
390 | unsigned int cr4_la57:1; |
391 | unsigned int efer_lma:1; |
392 | }; |
393 | }; |
394 | |
395 | union kvm_cpu_role { |
396 | u64 as_u64; |
397 | struct { |
398 | union kvm_mmu_page_role base; |
399 | union kvm_mmu_extended_role ext; |
400 | }; |
401 | }; |
402 | |
403 | struct kvm_rmap_head { |
404 | unsigned long val; |
405 | }; |
406 | |
407 | struct kvm_pio_request { |
408 | unsigned long linear_rip; |
409 | unsigned long count; |
410 | int in; |
411 | int port; |
412 | int size; |
413 | }; |
414 | |
415 | #define PT64_ROOT_MAX_LEVEL 5 |
416 | |
417 | struct rsvd_bits_validate { |
418 | u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL]; |
419 | u64 bad_mt_xwr; |
420 | }; |
421 | |
422 | struct kvm_mmu_root_info { |
423 | gpa_t pgd; |
424 | hpa_t hpa; |
425 | }; |
426 | |
427 | #define KVM_MMU_ROOT_INFO_INVALID \ |
428 | ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE }) |
429 | |
430 | #define KVM_MMU_NUM_PREV_ROOTS 3 |
431 | |
432 | #define KVM_MMU_ROOT_CURRENT BIT(0) |
433 | #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) |
434 | #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1) |
435 | |
436 | #define KVM_HAVE_MMU_RWLOCK |
437 | |
438 | struct kvm_mmu_page; |
439 | struct kvm_page_fault; |
440 | |
441 | /* |
442 | * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, |
443 | * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the |
444 | * current mmu mode. |
445 | */ |
446 | struct kvm_mmu { |
447 | unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu); |
448 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
449 | int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); |
450 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
451 | struct x86_exception *fault); |
452 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
453 | gpa_t gva_or_gpa, u64 access, |
454 | struct x86_exception *exception); |
455 | int (*sync_spte)(struct kvm_vcpu *vcpu, |
456 | struct kvm_mmu_page *sp, int i); |
457 | struct kvm_mmu_root_info root; |
458 | union kvm_cpu_role cpu_role; |
459 | union kvm_mmu_page_role root_role; |
460 | |
461 | /* |
462 | * The pkru_mask indicates if protection key checks are needed. It |
463 | * consists of 16 domains indexed by page fault error code bits [4:1], |
464 | * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. |
465 | * Each domain has 2 bits which are ANDed with AD and WD from PKRU. |
466 | */ |
467 | u32 pkru_mask; |
468 | |
469 | struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; |
470 | |
471 | /* |
472 | * Bitmap; bit set = permission fault |
473 | * Byte index: page fault error code [4:1] |
474 | * Bit index: pte permissions in ACC_* format |
475 | */ |
476 | u8 permissions[16]; |
477 | |
478 | u64 *pae_root; |
479 | u64 *pml4_root; |
480 | u64 *pml5_root; |
481 | |
482 | /* |
483 | * check zero bits on shadow page table entries, these |
484 | * bits include not only hardware reserved bits but also |
485 | * the bits spte never used. |
486 | */ |
487 | struct rsvd_bits_validate shadow_zero_check; |
488 | |
489 | struct rsvd_bits_validate guest_rsvd_check; |
490 | |
491 | u64 pdptrs[4]; /* pae */ |
492 | }; |
493 | |
494 | enum pmc_type { |
495 | KVM_PMC_GP = 0, |
496 | KVM_PMC_FIXED, |
497 | }; |
498 | |
499 | struct kvm_pmc { |
500 | enum pmc_type type; |
501 | u8 idx; |
502 | bool is_paused; |
503 | bool intr; |
504 | /* |
505 | * Base value of the PMC counter, relative to the *consumed* count in |
506 | * the associated perf_event. This value includes counter updates from |
507 | * the perf_event and emulated_count since the last time the counter |
508 | * was reprogrammed, but it is *not* the current value as seen by the |
509 | * guest or userspace. |
510 | * |
511 | * The count is relative to the associated perf_event so that KVM |
512 | * doesn't need to reprogram the perf_event every time the guest writes |
513 | * to the counter. |
514 | */ |
515 | u64 counter; |
516 | /* |
517 | * PMC events triggered by KVM emulation that haven't been fully |
518 | * processed, i.e. haven't undergone overflow detection. |
519 | */ |
520 | u64 emulated_counter; |
521 | u64 eventsel; |
522 | struct perf_event *perf_event; |
523 | struct kvm_vcpu *vcpu; |
524 | /* |
525 | * only for creating or reusing perf_event, |
526 | * eventsel value for general purpose counters, |
527 | * ctrl value for fixed counters. |
528 | */ |
529 | u64 current_config; |
530 | }; |
531 | |
532 | /* More counters may conflict with other existing Architectural MSRs */ |
533 | #define KVM_INTEL_PMC_MAX_GENERIC 8 |
534 | #define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1) |
535 | #define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1) |
536 | #define KVM_PMC_MAX_FIXED 3 |
537 | #define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) |
538 | #define KVM_AMD_PMC_MAX_GENERIC 6 |
539 | |
540 | struct kvm_pmu { |
541 | u8 version; |
542 | unsigned nr_arch_gp_counters; |
543 | unsigned nr_arch_fixed_counters; |
544 | unsigned available_event_types; |
545 | u64 fixed_ctr_ctrl; |
546 | u64 fixed_ctr_ctrl_mask; |
547 | u64 global_ctrl; |
548 | u64 global_status; |
549 | u64 counter_bitmask[2]; |
550 | u64 global_ctrl_mask; |
551 | u64 global_status_mask; |
552 | u64 reserved_bits; |
553 | u64 raw_event_mask; |
554 | struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; |
555 | struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; |
556 | |
557 | /* |
558 | * Overlay the bitmap with a 64-bit atomic so that all bits can be |
559 | * set in a single access, e.g. to reprogram all counters when the PMU |
560 | * filter changes. |
561 | */ |
562 | union { |
563 | DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); |
564 | atomic64_t __reprogram_pmi; |
565 | }; |
566 | DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); |
567 | DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); |
568 | |
569 | u64 ds_area; |
570 | u64 pebs_enable; |
571 | u64 pebs_enable_mask; |
572 | u64 pebs_data_cfg; |
573 | u64 pebs_data_cfg_mask; |
574 | |
575 | /* |
576 | * If a guest counter is cross-mapped to host counter with different |
577 | * index, its PEBS capability will be temporarily disabled. |
578 | * |
579 | * The user should make sure that this mask is updated |
580 | * after disabling interrupts and before perf_guest_get_msrs(); |
581 | */ |
582 | u64 host_cross_mapped_mask; |
583 | |
584 | /* |
585 | * The gate to release perf_events not marked in |
586 | * pmc_in_use only once in a vcpu time slice. |
587 | */ |
588 | bool need_cleanup; |
589 | |
590 | /* |
591 | * The total number of programmed perf_events and it helps to avoid |
592 | * redundant check before cleanup if guest don't use vPMU at all. |
593 | */ |
594 | u8 event_count; |
595 | }; |
596 | |
597 | struct kvm_pmu_ops; |
598 | |
599 | enum { |
600 | KVM_DEBUGREG_BP_ENABLED = 1, |
601 | KVM_DEBUGREG_WONT_EXIT = 2, |
602 | }; |
603 | |
604 | struct kvm_mtrr_range { |
605 | u64 base; |
606 | u64 mask; |
607 | struct list_head node; |
608 | }; |
609 | |
610 | struct kvm_mtrr { |
611 | struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; |
612 | mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; |
613 | u64 deftype; |
614 | |
615 | struct list_head head; |
616 | }; |
617 | |
618 | /* Hyper-V SynIC timer */ |
619 | struct kvm_vcpu_hv_stimer { |
620 | struct hrtimer timer; |
621 | int index; |
622 | union hv_stimer_config config; |
623 | u64 count; |
624 | u64 exp_time; |
625 | struct hv_message msg; |
626 | bool msg_pending; |
627 | }; |
628 | |
629 | /* Hyper-V synthetic interrupt controller (SynIC)*/ |
630 | struct kvm_vcpu_hv_synic { |
631 | u64 version; |
632 | u64 control; |
633 | u64 msg_page; |
634 | u64 evt_page; |
635 | atomic64_t sint[HV_SYNIC_SINT_COUNT]; |
636 | atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; |
637 | DECLARE_BITMAP(auto_eoi_bitmap, 256); |
638 | DECLARE_BITMAP(vec_bitmap, 256); |
639 | bool active; |
640 | bool dont_zero_synic_pages; |
641 | }; |
642 | |
643 | /* The maximum number of entries on the TLB flush fifo. */ |
644 | #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16) |
645 | /* |
646 | * Note: the following 'magic' entry is made up by KVM to avoid putting |
647 | * anything besides GVA on the TLB flush fifo. It is theoretically possible |
648 | * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000 |
649 | * which will look identical. KVM's action to 'flush everything' instead of |
650 | * flushing these particular addresses is, however, fully legitimate as |
651 | * flushing more than requested is always OK. |
652 | */ |
653 | #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1) |
654 | |
655 | enum hv_tlb_flush_fifos { |
656 | HV_L1_TLB_FLUSH_FIFO, |
657 | HV_L2_TLB_FLUSH_FIFO, |
658 | HV_NR_TLB_FLUSH_FIFOS, |
659 | }; |
660 | |
661 | struct kvm_vcpu_hv_tlb_flush_fifo { |
662 | spinlock_t write_lock; |
663 | DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE); |
664 | }; |
665 | |
666 | /* Hyper-V per vcpu emulation context */ |
667 | struct kvm_vcpu_hv { |
668 | struct kvm_vcpu *vcpu; |
669 | u32 vp_index; |
670 | u64 hv_vapic; |
671 | s64 runtime_offset; |
672 | struct kvm_vcpu_hv_synic synic; |
673 | struct kvm_hyperv_exit exit; |
674 | struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; |
675 | DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); |
676 | bool enforce_cpuid; |
677 | struct { |
678 | u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */ |
679 | u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */ |
680 | u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */ |
681 | u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ |
682 | u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */ |
683 | u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ |
684 | u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */ |
685 | u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */ |
686 | } cpuid_cache; |
687 | |
688 | struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; |
689 | |
690 | /* Preallocated buffer for handling hypercalls passing sparse vCPU set */ |
691 | u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; |
692 | |
693 | struct hv_vp_assist_page vp_assist_page; |
694 | |
695 | struct { |
696 | u64 pa_page_gpa; |
697 | u64 vm_id; |
698 | u32 vp_id; |
699 | } nested; |
700 | }; |
701 | |
702 | struct kvm_hypervisor_cpuid { |
703 | u32 base; |
704 | u32 limit; |
705 | }; |
706 | |
707 | #ifdef CONFIG_KVM_XEN |
708 | /* Xen HVM per vcpu emulation context */ |
709 | struct kvm_vcpu_xen { |
710 | u64 hypercall_rip; |
711 | u32 current_runstate; |
712 | u8 upcall_vector; |
713 | struct gfn_to_pfn_cache vcpu_info_cache; |
714 | struct gfn_to_pfn_cache vcpu_time_info_cache; |
715 | struct gfn_to_pfn_cache runstate_cache; |
716 | struct gfn_to_pfn_cache runstate2_cache; |
717 | u64 last_steal; |
718 | u64 runstate_entry_time; |
719 | u64 runstate_times[4]; |
720 | unsigned long evtchn_pending_sel; |
721 | u32 vcpu_id; /* The Xen / ACPI vCPU ID */ |
722 | u32 timer_virq; |
723 | u64 timer_expires; /* In guest epoch */ |
724 | atomic_t timer_pending; |
725 | struct hrtimer timer; |
726 | int poll_evtchn; |
727 | struct timer_list poll_timer; |
728 | struct kvm_hypervisor_cpuid cpuid; |
729 | }; |
730 | #endif |
731 | |
732 | struct kvm_queued_exception { |
733 | bool pending; |
734 | bool injected; |
735 | bool has_error_code; |
736 | u8 vector; |
737 | u32 error_code; |
738 | unsigned long payload; |
739 | bool has_payload; |
740 | }; |
741 | |
742 | struct kvm_vcpu_arch { |
743 | /* |
744 | * rip and regs accesses must go through |
745 | * kvm_{register,rip}_{read,write} functions. |
746 | */ |
747 | unsigned long regs[NR_VCPU_REGS]; |
748 | u32 regs_avail; |
749 | u32 regs_dirty; |
750 | |
751 | unsigned long cr0; |
752 | unsigned long cr0_guest_owned_bits; |
753 | unsigned long cr2; |
754 | unsigned long cr3; |
755 | unsigned long cr4; |
756 | unsigned long cr4_guest_owned_bits; |
757 | unsigned long cr4_guest_rsvd_bits; |
758 | unsigned long cr8; |
759 | u32 host_pkru; |
760 | u32 pkru; |
761 | u32 hflags; |
762 | u64 efer; |
763 | u64 apic_base; |
764 | struct kvm_lapic *apic; /* kernel irqchip context */ |
765 | bool load_eoi_exitmap_pending; |
766 | DECLARE_BITMAP(ioapic_handled_vectors, 256); |
767 | unsigned long apic_attention; |
768 | int32_t apic_arb_prio; |
769 | int mp_state; |
770 | u64 ia32_misc_enable_msr; |
771 | u64 smbase; |
772 | u64 smi_count; |
773 | bool at_instruction_boundary; |
774 | bool tpr_access_reporting; |
775 | bool xfd_no_write_intercept; |
776 | u64 ia32_xss; |
777 | u64 microcode_version; |
778 | u64 arch_capabilities; |
779 | u64 perf_capabilities; |
780 | |
781 | /* |
782 | * Paging state of the vcpu |
783 | * |
784 | * If the vcpu runs in guest mode with two level paging this still saves |
785 | * the paging mode of the l1 guest. This context is always used to |
786 | * handle faults. |
787 | */ |
788 | struct kvm_mmu *mmu; |
789 | |
790 | /* Non-nested MMU for L1 */ |
791 | struct kvm_mmu root_mmu; |
792 | |
793 | /* L1 MMU when running nested */ |
794 | struct kvm_mmu guest_mmu; |
795 | |
796 | /* |
797 | * Paging state of an L2 guest (used for nested npt) |
798 | * |
799 | * This context will save all necessary information to walk page tables |
800 | * of an L2 guest. This context is only initialized for page table |
801 | * walking and not for faulting since we never handle l2 page faults on |
802 | * the host. |
803 | */ |
804 | struct kvm_mmu nested_mmu; |
805 | |
806 | /* |
807 | * Pointer to the mmu context currently used for |
808 | * gva_to_gpa translations. |
809 | */ |
810 | struct kvm_mmu *walk_mmu; |
811 | |
812 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
813 | struct kvm_mmu_memory_cache mmu_shadow_page_cache; |
814 | struct kvm_mmu_memory_cache mmu_shadowed_info_cache; |
815 | struct kvm_mmu_memory_cache ; |
816 | |
817 | /* |
818 | * QEMU userspace and the guest each have their own FPU state. |
819 | * In vcpu_run, we switch between the user and guest FPU contexts. |
820 | * While running a VCPU, the VCPU thread will have the guest FPU |
821 | * context. |
822 | * |
823 | * Note that while the PKRU state lives inside the fpu registers, |
824 | * it is switched out separately at VMENTER and VMEXIT time. The |
825 | * "guest_fpstate" state here contains the guest FPU context, with the |
826 | * host PRKU bits. |
827 | */ |
828 | struct fpu_guest guest_fpu; |
829 | |
830 | u64 xcr0; |
831 | u64 guest_supported_xcr0; |
832 | |
833 | struct kvm_pio_request pio; |
834 | void *pio_data; |
835 | void *sev_pio_data; |
836 | unsigned sev_pio_count; |
837 | |
838 | u8 event_exit_inst_len; |
839 | |
840 | bool exception_from_userspace; |
841 | |
842 | /* Exceptions to be injected to the guest. */ |
843 | struct kvm_queued_exception exception; |
844 | /* Exception VM-Exits to be synthesized to L1. */ |
845 | struct kvm_queued_exception exception_vmexit; |
846 | |
847 | struct kvm_queued_interrupt { |
848 | bool injected; |
849 | bool soft; |
850 | u8 nr; |
851 | } interrupt; |
852 | |
853 | int halt_request; /* real mode on Intel only */ |
854 | |
855 | int cpuid_nent; |
856 | struct kvm_cpuid_entry2 *cpuid_entries; |
857 | struct kvm_hypervisor_cpuid kvm_cpuid; |
858 | bool is_amd_compatible; |
859 | |
860 | /* |
861 | * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly |
862 | * when "struct kvm_vcpu_arch" is no longer defined in an |
863 | * arch/x86/include/asm header. The max is mostly arbitrary, i.e. |
864 | * can be increased as necessary. |
865 | */ |
866 | #define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG |
867 | |
868 | /* |
869 | * Track whether or not the guest is allowed to use features that are |
870 | * governed by KVM, where "governed" means KVM needs to manage state |
871 | * and/or explicitly enable the feature in hardware. Typically, but |
872 | * not always, governed features can be used by the guest if and only |
873 | * if both KVM and userspace want to expose the feature to the guest. |
874 | */ |
875 | struct { |
876 | DECLARE_BITMAP(enabled, KVM_MAX_NR_GOVERNED_FEATURES); |
877 | } governed_features; |
878 | |
879 | u64 reserved_gpa_bits; |
880 | int maxphyaddr; |
881 | |
882 | /* emulate context */ |
883 | |
884 | struct x86_emulate_ctxt *emulate_ctxt; |
885 | bool emulate_regs_need_sync_to_vcpu; |
886 | bool emulate_regs_need_sync_from_vcpu; |
887 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
888 | |
889 | gpa_t time; |
890 | struct pvclock_vcpu_time_info hv_clock; |
891 | unsigned int hw_tsc_khz; |
892 | struct gfn_to_pfn_cache pv_time; |
893 | /* set guest stopped flag in pvclock flags field */ |
894 | bool pvclock_set_guest_stopped_request; |
895 | |
896 | struct { |
897 | u8 preempted; |
898 | u64 msr_val; |
899 | u64 last_steal; |
900 | struct gfn_to_hva_cache cache; |
901 | } st; |
902 | |
903 | u64 l1_tsc_offset; |
904 | u64 tsc_offset; /* current tsc offset */ |
905 | u64 last_guest_tsc; |
906 | u64 last_host_tsc; |
907 | u64 tsc_offset_adjustment; |
908 | u64 this_tsc_nsec; |
909 | u64 this_tsc_write; |
910 | u64 this_tsc_generation; |
911 | bool tsc_catchup; |
912 | bool tsc_always_catchup; |
913 | s8 virtual_tsc_shift; |
914 | u32 virtual_tsc_mult; |
915 | u32 virtual_tsc_khz; |
916 | s64 ia32_tsc_adjust_msr; |
917 | u64 msr_ia32_power_ctl; |
918 | u64 l1_tsc_scaling_ratio; |
919 | u64 tsc_scaling_ratio; /* current scaling ratio */ |
920 | |
921 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
922 | /* Number of NMIs pending injection, not including hardware vNMIs. */ |
923 | unsigned int nmi_pending; |
924 | bool nmi_injected; /* Trying to inject an NMI this entry */ |
925 | bool smi_pending; /* SMI queued after currently running handler */ |
926 | u8 handling_intr_from_guest; |
927 | |
928 | struct kvm_mtrr mtrr_state; |
929 | u64 pat; |
930 | |
931 | unsigned switch_db_regs; |
932 | unsigned long db[KVM_NR_DB_REGS]; |
933 | unsigned long dr6; |
934 | unsigned long dr7; |
935 | unsigned long eff_db[KVM_NR_DB_REGS]; |
936 | unsigned long guest_debug_dr7; |
937 | u64 msr_platform_info; |
938 | u64 msr_misc_features_enables; |
939 | |
940 | u64 mcg_cap; |
941 | u64 mcg_status; |
942 | u64 mcg_ctl; |
943 | u64 mcg_ext_ctl; |
944 | u64 *mce_banks; |
945 | u64 *mci_ctl2_banks; |
946 | |
947 | /* Cache MMIO info */ |
948 | u64 mmio_gva; |
949 | unsigned mmio_access; |
950 | gfn_t mmio_gfn; |
951 | u64 mmio_gen; |
952 | |
953 | struct kvm_pmu pmu; |
954 | |
955 | /* used for guest single stepping over the given code position */ |
956 | unsigned long singlestep_rip; |
957 | |
958 | #ifdef CONFIG_KVM_HYPERV |
959 | bool hyperv_enabled; |
960 | struct kvm_vcpu_hv *hyperv; |
961 | #endif |
962 | #ifdef CONFIG_KVM_XEN |
963 | struct kvm_vcpu_xen xen; |
964 | #endif |
965 | cpumask_var_t wbinvd_dirty_mask; |
966 | |
967 | unsigned long last_retry_eip; |
968 | unsigned long last_retry_addr; |
969 | |
970 | struct { |
971 | bool halted; |
972 | gfn_t gfns[ASYNC_PF_PER_VCPU]; |
973 | struct gfn_to_hva_cache data; |
974 | u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */ |
975 | u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */ |
976 | u16 vec; |
977 | u32 id; |
978 | bool send_user_only; |
979 | u32 host_apf_flags; |
980 | bool delivery_as_pf_vmexit; |
981 | bool ; |
982 | } apf; |
983 | |
984 | /* OSVW MSRs (AMD only) */ |
985 | struct { |
986 | u64 length; |
987 | u64 status; |
988 | } osvw; |
989 | |
990 | struct { |
991 | u64 msr_val; |
992 | struct gfn_to_hva_cache data; |
993 | } pv_eoi; |
994 | |
995 | u64 msr_kvm_poll_control; |
996 | |
997 | /* set at EPT violation at this point */ |
998 | unsigned long exit_qualification; |
999 | |
1000 | /* pv related host specific info */ |
1001 | struct { |
1002 | bool pv_unhalted; |
1003 | } pv; |
1004 | |
1005 | int pending_ioapic_eoi; |
1006 | int pending_external_vector; |
1007 | |
1008 | /* be preempted when it's in kernel-mode(cpl=0) */ |
1009 | bool preempted_in_kernel; |
1010 | |
1011 | /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ |
1012 | bool l1tf_flush_l1d; |
1013 | |
1014 | /* Host CPU on which VM-entry was most recently attempted */ |
1015 | int last_vmentry_cpu; |
1016 | |
1017 | /* AMD MSRC001_0015 Hardware Configuration */ |
1018 | u64 msr_hwcr; |
1019 | |
1020 | /* pv related cpuid info */ |
1021 | struct { |
1022 | /* |
1023 | * value of the eax register in the KVM_CPUID_FEATURES CPUID |
1024 | * leaf. |
1025 | */ |
1026 | u32 features; |
1027 | |
1028 | /* |
1029 | * indicates whether pv emulation should be disabled if features |
1030 | * are not present in the guest's cpuid |
1031 | */ |
1032 | bool enforce; |
1033 | } pv_cpuid; |
1034 | |
1035 | /* Protected Guests */ |
1036 | bool guest_state_protected; |
1037 | |
1038 | /* |
1039 | * Set when PDPTS were loaded directly by the userspace without |
1040 | * reading the guest memory |
1041 | */ |
1042 | bool pdptrs_from_userspace; |
1043 | |
1044 | #if IS_ENABLED(CONFIG_HYPERV) |
1045 | hpa_t hv_root_tdp; |
1046 | #endif |
1047 | }; |
1048 | |
1049 | struct kvm_lpage_info { |
1050 | int disallow_lpage; |
1051 | }; |
1052 | |
1053 | struct kvm_arch_memory_slot { |
1054 | struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; |
1055 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
1056 | unsigned short *gfn_write_track; |
1057 | }; |
1058 | |
1059 | /* |
1060 | * Track the mode of the optimized logical map, as the rules for decoding the |
1061 | * destination vary per mode. Enabling the optimized logical map requires all |
1062 | * software-enabled local APIs to be in the same mode, each addressable APIC to |
1063 | * be mapped to only one MDA, and each MDA to map to at most one APIC. |
1064 | */ |
1065 | enum kvm_apic_logical_mode { |
1066 | /* All local APICs are software disabled. */ |
1067 | KVM_APIC_MODE_SW_DISABLED, |
1068 | /* All software enabled local APICs in xAPIC cluster addressing mode. */ |
1069 | KVM_APIC_MODE_XAPIC_CLUSTER, |
1070 | /* All software enabled local APICs in xAPIC flat addressing mode. */ |
1071 | KVM_APIC_MODE_XAPIC_FLAT, |
1072 | /* All software enabled local APICs in x2APIC mode. */ |
1073 | KVM_APIC_MODE_X2APIC, |
1074 | /* |
1075 | * Optimized map disabled, e.g. not all local APICs in the same logical |
1076 | * mode, same logical ID assigned to multiple APICs, etc. |
1077 | */ |
1078 | KVM_APIC_MODE_MAP_DISABLED, |
1079 | }; |
1080 | |
1081 | struct kvm_apic_map { |
1082 | struct rcu_head rcu; |
1083 | enum kvm_apic_logical_mode logical_mode; |
1084 | u32 max_apic_id; |
1085 | union { |
1086 | struct kvm_lapic *xapic_flat_map[8]; |
1087 | struct kvm_lapic *xapic_cluster_map[16][4]; |
1088 | }; |
1089 | struct kvm_lapic *phys_map[]; |
1090 | }; |
1091 | |
1092 | /* Hyper-V synthetic debugger (SynDbg)*/ |
1093 | struct kvm_hv_syndbg { |
1094 | struct { |
1095 | u64 control; |
1096 | u64 status; |
1097 | u64 send_page; |
1098 | u64 recv_page; |
1099 | u64 pending_page; |
1100 | } control; |
1101 | u64 options; |
1102 | }; |
1103 | |
1104 | /* Current state of Hyper-V TSC page clocksource */ |
1105 | enum hv_tsc_page_status { |
1106 | /* TSC page was not set up or disabled */ |
1107 | HV_TSC_PAGE_UNSET = 0, |
1108 | /* TSC page MSR was written by the guest, update pending */ |
1109 | HV_TSC_PAGE_GUEST_CHANGED, |
1110 | /* TSC page update was triggered from the host side */ |
1111 | HV_TSC_PAGE_HOST_CHANGED, |
1112 | /* TSC page was properly set up and is currently active */ |
1113 | HV_TSC_PAGE_SET, |
1114 | /* TSC page was set up with an inaccessible GPA */ |
1115 | HV_TSC_PAGE_BROKEN, |
1116 | }; |
1117 | |
1118 | #ifdef CONFIG_KVM_HYPERV |
1119 | /* Hyper-V emulation context */ |
1120 | struct kvm_hv { |
1121 | struct mutex hv_lock; |
1122 | u64 hv_guest_os_id; |
1123 | u64 hv_hypercall; |
1124 | u64 hv_tsc_page; |
1125 | enum hv_tsc_page_status hv_tsc_page_status; |
1126 | |
1127 | /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ |
1128 | u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; |
1129 | u64 hv_crash_ctl; |
1130 | |
1131 | struct ms_hyperv_tsc_page tsc_ref; |
1132 | |
1133 | struct idr conn_to_evt; |
1134 | |
1135 | u64 hv_reenlightenment_control; |
1136 | u64 hv_tsc_emulation_control; |
1137 | u64 hv_tsc_emulation_status; |
1138 | u64 hv_invtsc_control; |
1139 | |
1140 | /* How many vCPUs have VP index != vCPU index */ |
1141 | atomic_t num_mismatched_vp_indexes; |
1142 | |
1143 | /* |
1144 | * How many SynICs use 'AutoEOI' feature |
1145 | * (protected by arch.apicv_update_lock) |
1146 | */ |
1147 | unsigned int synic_auto_eoi_used; |
1148 | |
1149 | struct kvm_hv_syndbg hv_syndbg; |
1150 | |
1151 | bool xsaves_xsavec_checked; |
1152 | }; |
1153 | #endif |
1154 | |
1155 | struct msr_bitmap_range { |
1156 | u32 flags; |
1157 | u32 nmsrs; |
1158 | u32 base; |
1159 | unsigned long *bitmap; |
1160 | }; |
1161 | |
1162 | #ifdef CONFIG_KVM_XEN |
1163 | /* Xen emulation context */ |
1164 | struct kvm_xen { |
1165 | struct mutex xen_lock; |
1166 | u32 xen_version; |
1167 | bool long_mode; |
1168 | bool runstate_update_flag; |
1169 | u8 upcall_vector; |
1170 | struct gfn_to_pfn_cache shinfo_cache; |
1171 | struct idr evtchn_ports; |
1172 | unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
1173 | }; |
1174 | #endif |
1175 | |
1176 | enum kvm_irqchip_mode { |
1177 | KVM_IRQCHIP_NONE, |
1178 | KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ |
1179 | KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ |
1180 | }; |
1181 | |
1182 | struct kvm_x86_msr_filter { |
1183 | u8 count; |
1184 | bool default_allow:1; |
1185 | struct msr_bitmap_range ranges[16]; |
1186 | }; |
1187 | |
1188 | struct kvm_x86_pmu_event_filter { |
1189 | __u32 action; |
1190 | __u32 nevents; |
1191 | __u32 fixed_counter_bitmap; |
1192 | __u32 flags; |
1193 | __u32 nr_includes; |
1194 | __u32 nr_excludes; |
1195 | __u64 *includes; |
1196 | __u64 *excludes; |
1197 | __u64 events[]; |
1198 | }; |
1199 | |
1200 | enum kvm_apicv_inhibit { |
1201 | |
1202 | /********************************************************************/ |
1203 | /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */ |
1204 | /********************************************************************/ |
1205 | |
1206 | /* |
1207 | * APIC acceleration is disabled by a module parameter |
1208 | * and/or not supported in hardware. |
1209 | */ |
1210 | APICV_INHIBIT_REASON_DISABLE, |
1211 | |
1212 | /* |
1213 | * APIC acceleration is inhibited because AutoEOI feature is |
1214 | * being used by a HyperV guest. |
1215 | */ |
1216 | APICV_INHIBIT_REASON_HYPERV, |
1217 | |
1218 | /* |
1219 | * APIC acceleration is inhibited because the userspace didn't yet |
1220 | * enable the kernel/split irqchip. |
1221 | */ |
1222 | APICV_INHIBIT_REASON_ABSENT, |
1223 | |
1224 | /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ |
1225 | * (out of band, debug measure of blocking all interrupts on this vCPU) |
1226 | * was enabled, to avoid AVIC/APICv bypassing it. |
1227 | */ |
1228 | APICV_INHIBIT_REASON_BLOCKIRQ, |
1229 | |
1230 | /* |
1231 | * APICv is disabled because not all vCPUs have a 1:1 mapping between |
1232 | * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack. |
1233 | */ |
1234 | APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED, |
1235 | |
1236 | /* |
1237 | * For simplicity, the APIC acceleration is inhibited |
1238 | * first time either APIC ID or APIC base are changed by the guest |
1239 | * from their reset values. |
1240 | */ |
1241 | APICV_INHIBIT_REASON_APIC_ID_MODIFIED, |
1242 | APICV_INHIBIT_REASON_APIC_BASE_MODIFIED, |
1243 | |
1244 | /******************************************************/ |
1245 | /* INHIBITs that are relevant only to the AMD's AVIC. */ |
1246 | /******************************************************/ |
1247 | |
1248 | /* |
1249 | * AVIC is inhibited on a vCPU because it runs a nested guest. |
1250 | * |
1251 | * This is needed because unlike APICv, the peers of this vCPU |
1252 | * cannot use the doorbell mechanism to signal interrupts via AVIC when |
1253 | * a vCPU runs nested. |
1254 | */ |
1255 | APICV_INHIBIT_REASON_NESTED, |
1256 | |
1257 | /* |
1258 | * On SVM, the wait for the IRQ window is implemented with pending vIRQ, |
1259 | * which cannot be injected when the AVIC is enabled, thus AVIC |
1260 | * is inhibited while KVM waits for IRQ window. |
1261 | */ |
1262 | APICV_INHIBIT_REASON_IRQWIN, |
1263 | |
1264 | /* |
1265 | * PIT (i8254) 're-inject' mode, relies on EOI intercept, |
1266 | * which AVIC doesn't support for edge triggered interrupts. |
1267 | */ |
1268 | APICV_INHIBIT_REASON_PIT_REINJ, |
1269 | |
1270 | /* |
1271 | * AVIC is disabled because SEV doesn't support it. |
1272 | */ |
1273 | APICV_INHIBIT_REASON_SEV, |
1274 | |
1275 | /* |
1276 | * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1 |
1277 | * mapping between logical ID and vCPU. |
1278 | */ |
1279 | APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED, |
1280 | }; |
1281 | |
1282 | struct kvm_arch { |
1283 | unsigned long vm_type; |
1284 | unsigned long n_used_mmu_pages; |
1285 | unsigned long n_requested_mmu_pages; |
1286 | unsigned long n_max_mmu_pages; |
1287 | unsigned int indirect_shadow_pages; |
1288 | u8 mmu_valid_gen; |
1289 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
1290 | struct list_head active_mmu_pages; |
1291 | struct list_head zapped_obsolete_pages; |
1292 | /* |
1293 | * A list of kvm_mmu_page structs that, if zapped, could possibly be |
1294 | * replaced by an NX huge page. A shadow page is on this list if its |
1295 | * existence disallows an NX huge page (nx_huge_page_disallowed is set) |
1296 | * and there are no other conditions that prevent a huge page, e.g. |
1297 | * the backing host page is huge, dirtly logging is not enabled for its |
1298 | * memslot, etc... Note, zapping shadow pages on this list doesn't |
1299 | * guarantee an NX huge page will be created in its stead, e.g. if the |
1300 | * guest attempts to execute from the region then KVM obviously can't |
1301 | * create an NX huge page (without hanging the guest). |
1302 | */ |
1303 | struct list_head possible_nx_huge_pages; |
1304 | #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING |
1305 | struct kvm_page_track_notifier_head track_notifier_head; |
1306 | #endif |
1307 | /* |
1308 | * Protects marking pages unsync during page faults, as TDP MMU page |
1309 | * faults only take mmu_lock for read. For simplicity, the unsync |
1310 | * pages lock is always taken when marking pages unsync regardless of |
1311 | * whether mmu_lock is held for read or write. |
1312 | */ |
1313 | spinlock_t mmu_unsync_pages_lock; |
1314 | |
1315 | struct iommu_domain *iommu_domain; |
1316 | bool iommu_noncoherent; |
1317 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
1318 | atomic_t noncoherent_dma_count; |
1319 | #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
1320 | atomic_t assigned_device_count; |
1321 | struct kvm_pic *vpic; |
1322 | struct kvm_ioapic *vioapic; |
1323 | struct kvm_pit *vpit; |
1324 | atomic_t vapics_in_nmi_mode; |
1325 | struct mutex apic_map_lock; |
1326 | struct kvm_apic_map __rcu *apic_map; |
1327 | atomic_t apic_map_dirty; |
1328 | |
1329 | bool apic_access_memslot_enabled; |
1330 | bool apic_access_memslot_inhibited; |
1331 | |
1332 | /* Protects apicv_inhibit_reasons */ |
1333 | struct rw_semaphore apicv_update_lock; |
1334 | unsigned long apicv_inhibit_reasons; |
1335 | |
1336 | gpa_t wall_clock; |
1337 | |
1338 | bool mwait_in_guest; |
1339 | bool hlt_in_guest; |
1340 | bool pause_in_guest; |
1341 | bool cstate_in_guest; |
1342 | |
1343 | unsigned long irq_sources_bitmap; |
1344 | s64 kvmclock_offset; |
1345 | |
1346 | /* |
1347 | * This also protects nr_vcpus_matched_tsc which is read from a |
1348 | * preemption-disabled region, so it must be a raw spinlock. |
1349 | */ |
1350 | raw_spinlock_t tsc_write_lock; |
1351 | u64 last_tsc_nsec; |
1352 | u64 last_tsc_write; |
1353 | u32 last_tsc_khz; |
1354 | u64 last_tsc_offset; |
1355 | u64 cur_tsc_nsec; |
1356 | u64 cur_tsc_write; |
1357 | u64 cur_tsc_offset; |
1358 | u64 cur_tsc_generation; |
1359 | int nr_vcpus_matched_tsc; |
1360 | |
1361 | u32 default_tsc_khz; |
1362 | bool user_set_tsc; |
1363 | |
1364 | seqcount_raw_spinlock_t pvclock_sc; |
1365 | bool use_master_clock; |
1366 | u64 master_kernel_ns; |
1367 | u64 master_cycle_now; |
1368 | struct delayed_work kvmclock_update_work; |
1369 | struct delayed_work kvmclock_sync_work; |
1370 | |
1371 | struct kvm_xen_hvm_config xen_hvm_config; |
1372 | |
1373 | /* reads protected by irq_srcu, writes by irq_lock */ |
1374 | struct hlist_head mask_notifier_list; |
1375 | |
1376 | #ifdef CONFIG_KVM_HYPERV |
1377 | struct kvm_hv hyperv; |
1378 | #endif |
1379 | |
1380 | #ifdef CONFIG_KVM_XEN |
1381 | struct kvm_xen xen; |
1382 | #endif |
1383 | |
1384 | bool backwards_tsc_observed; |
1385 | bool boot_vcpu_runs_old_kvmclock; |
1386 | u32 bsp_vcpu_id; |
1387 | |
1388 | u64 disabled_quirks; |
1389 | |
1390 | enum kvm_irqchip_mode irqchip_mode; |
1391 | u8 nr_reserved_ioapic_pins; |
1392 | |
1393 | bool disabled_lapic_found; |
1394 | |
1395 | bool x2apic_format; |
1396 | bool x2apic_broadcast_quirk_disabled; |
1397 | |
1398 | bool guest_can_read_msr_platform_info; |
1399 | bool exception_payload_enabled; |
1400 | |
1401 | bool triple_fault_event; |
1402 | |
1403 | bool bus_lock_detection_enabled; |
1404 | bool enable_pmu; |
1405 | |
1406 | u32 notify_window; |
1407 | u32 notify_vmexit_flags; |
1408 | /* |
1409 | * If exit_on_emulation_error is set, and the in-kernel instruction |
1410 | * emulator fails to emulate an instruction, allow userspace |
1411 | * the opportunity to look at it. |
1412 | */ |
1413 | bool exit_on_emulation_error; |
1414 | |
1415 | /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ |
1416 | u32 user_space_msr_mask; |
1417 | struct kvm_x86_msr_filter __rcu *msr_filter; |
1418 | |
1419 | u32 hypercall_exit_enabled; |
1420 | |
1421 | /* Guest can access the SGX PROVISIONKEY. */ |
1422 | bool sgx_provisioning_allowed; |
1423 | |
1424 | struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter; |
1425 | struct task_struct *nx_huge_page_recovery_thread; |
1426 | |
1427 | #ifdef CONFIG_X86_64 |
1428 | /* The number of TDP MMU pages across all roots. */ |
1429 | atomic64_t tdp_mmu_pages; |
1430 | |
1431 | /* |
1432 | * List of struct kvm_mmu_pages being used as roots. |
1433 | * All struct kvm_mmu_pages in the list should have |
1434 | * tdp_mmu_page set. |
1435 | * |
1436 | * For reads, this list is protected by: |
1437 | * the MMU lock in read mode + RCU or |
1438 | * the MMU lock in write mode |
1439 | * |
1440 | * For writes, this list is protected by tdp_mmu_pages_lock; see |
1441 | * below for the details. |
1442 | * |
1443 | * Roots will remain in the list until their tdp_mmu_root_count |
1444 | * drops to zero, at which point the thread that decremented the |
1445 | * count to zero should removed the root from the list and clean |
1446 | * it up, freeing the root after an RCU grace period. |
1447 | */ |
1448 | struct list_head tdp_mmu_roots; |
1449 | |
1450 | /* |
1451 | * Protects accesses to the following fields when the MMU lock |
1452 | * is held in read mode: |
1453 | * - tdp_mmu_roots (above) |
1454 | * - the link field of kvm_mmu_page structs used by the TDP MMU |
1455 | * - possible_nx_huge_pages; |
1456 | * - the possible_nx_huge_page_link field of kvm_mmu_page structs used |
1457 | * by the TDP MMU |
1458 | * Because the lock is only taken within the MMU lock, strictly |
1459 | * speaking it is redundant to acquire this lock when the thread |
1460 | * holds the MMU lock in write mode. However it often simplifies |
1461 | * the code to do so. |
1462 | */ |
1463 | spinlock_t tdp_mmu_pages_lock; |
1464 | #endif /* CONFIG_X86_64 */ |
1465 | |
1466 | /* |
1467 | * If set, at least one shadow root has been allocated. This flag |
1468 | * is used as one input when determining whether certain memslot |
1469 | * related allocations are necessary. |
1470 | */ |
1471 | bool shadow_root_allocated; |
1472 | |
1473 | #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING |
1474 | /* |
1475 | * If set, the VM has (or had) an external write tracking user, and |
1476 | * thus all write tracking metadata has been allocated, even if KVM |
1477 | * itself isn't using write tracking. |
1478 | */ |
1479 | bool external_write_tracking_enabled; |
1480 | #endif |
1481 | |
1482 | #if IS_ENABLED(CONFIG_HYPERV) |
1483 | hpa_t hv_root_tdp; |
1484 | spinlock_t hv_root_tdp_lock; |
1485 | struct hv_partition_assist_pg *hv_pa_pg; |
1486 | #endif |
1487 | /* |
1488 | * VM-scope maximum vCPU ID. Used to determine the size of structures |
1489 | * that increase along with the maximum vCPU ID, in which case, using |
1490 | * the global KVM_MAX_VCPU_IDS may lead to significant memory waste. |
1491 | */ |
1492 | u32 max_vcpu_ids; |
1493 | |
1494 | bool disable_nx_huge_pages; |
1495 | |
1496 | /* |
1497 | * Memory caches used to allocate shadow pages when performing eager |
1498 | * page splitting. No need for a shadowed_info_cache since eager page |
1499 | * splitting only allocates direct shadow pages. |
1500 | * |
1501 | * Protected by kvm->slots_lock. |
1502 | */ |
1503 | struct kvm_mmu_memory_cache split_shadow_page_cache; |
1504 | struct kvm_mmu_memory_cache ; |
1505 | |
1506 | /* |
1507 | * Memory cache used to allocate pte_list_desc structs while splitting |
1508 | * huge pages. In the worst case, to split one huge page, 512 |
1509 | * pte_list_desc structs are needed to add each lower level leaf sptep |
1510 | * to the rmap plus 1 to extend the parent_ptes rmap of the lower level |
1511 | * page table. |
1512 | * |
1513 | * Protected by kvm->slots_lock. |
1514 | */ |
1515 | #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1) |
1516 | struct kvm_mmu_memory_cache split_desc_cache; |
1517 | }; |
1518 | |
1519 | struct kvm_vm_stat { |
1520 | struct kvm_vm_stat_generic generic; |
1521 | u64 mmu_shadow_zapped; |
1522 | u64 mmu_pte_write; |
1523 | u64 mmu_pde_zapped; |
1524 | u64 mmu_flooded; |
1525 | u64 mmu_recycled; |
1526 | u64 mmu_cache_miss; |
1527 | u64 mmu_unsync; |
1528 | union { |
1529 | struct { |
1530 | atomic64_t pages_4k; |
1531 | atomic64_t pages_2m; |
1532 | atomic64_t pages_1g; |
1533 | }; |
1534 | atomic64_t pages[KVM_NR_PAGE_SIZES]; |
1535 | }; |
1536 | u64 nx_lpage_splits; |
1537 | u64 max_mmu_page_hash_collisions; |
1538 | u64 max_mmu_rmap_size; |
1539 | }; |
1540 | |
1541 | struct kvm_vcpu_stat { |
1542 | struct kvm_vcpu_stat_generic generic; |
1543 | u64 pf_taken; |
1544 | u64 pf_fixed; |
1545 | u64 pf_emulate; |
1546 | u64 pf_spurious; |
1547 | u64 pf_fast; |
1548 | u64 pf_mmio_spte_created; |
1549 | u64 pf_guest; |
1550 | u64 tlb_flush; |
1551 | u64 invlpg; |
1552 | |
1553 | u64 exits; |
1554 | u64 io_exits; |
1555 | u64 mmio_exits; |
1556 | u64 signal_exits; |
1557 | u64 irq_window_exits; |
1558 | u64 nmi_window_exits; |
1559 | u64 l1d_flush; |
1560 | u64 halt_exits; |
1561 | u64 request_irq_exits; |
1562 | u64 irq_exits; |
1563 | u64 host_state_reload; |
1564 | u64 fpu_reload; |
1565 | u64 insn_emulation; |
1566 | u64 insn_emulation_fail; |
1567 | u64 hypercalls; |
1568 | u64 irq_injections; |
1569 | u64 nmi_injections; |
1570 | u64 req_event; |
1571 | u64 nested_run; |
1572 | u64 directed_yield_attempted; |
1573 | u64 directed_yield_successful; |
1574 | u64 preemption_reported; |
1575 | u64 preemption_other; |
1576 | u64 guest_mode; |
1577 | u64 notify_window_exits; |
1578 | }; |
1579 | |
1580 | struct x86_instruction_info; |
1581 | |
1582 | struct msr_data { |
1583 | bool host_initiated; |
1584 | u32 index; |
1585 | u64 data; |
1586 | }; |
1587 | |
1588 | struct kvm_lapic_irq { |
1589 | u32 vector; |
1590 | u16 delivery_mode; |
1591 | u16 dest_mode; |
1592 | bool level; |
1593 | u16 trig_mode; |
1594 | u32 shorthand; |
1595 | u32 dest_id; |
1596 | bool msi_redir_hint; |
1597 | }; |
1598 | |
1599 | static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) |
1600 | { |
1601 | return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; |
1602 | } |
1603 | |
1604 | struct kvm_x86_ops { |
1605 | const char *name; |
1606 | |
1607 | int (*check_processor_compatibility)(void); |
1608 | |
1609 | int (*hardware_enable)(void); |
1610 | void (*hardware_disable)(void); |
1611 | void (*hardware_unsetup)(void); |
1612 | bool (*has_emulated_msr)(struct kvm *kvm, u32 index); |
1613 | void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); |
1614 | |
1615 | unsigned int vm_size; |
1616 | int (*vm_init)(struct kvm *kvm); |
1617 | void (*vm_destroy)(struct kvm *kvm); |
1618 | |
1619 | /* Create, but do not attach this VCPU */ |
1620 | int (*vcpu_precreate)(struct kvm *kvm); |
1621 | int (*vcpu_create)(struct kvm_vcpu *vcpu); |
1622 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
1623 | void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); |
1624 | |
1625 | void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu); |
1626 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
1627 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
1628 | |
1629 | void (*update_exception_bitmap)(struct kvm_vcpu *vcpu); |
1630 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
1631 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
1632 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
1633 | void (*get_segment)(struct kvm_vcpu *vcpu, |
1634 | struct kvm_segment *var, int seg); |
1635 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
1636 | void (*set_segment)(struct kvm_vcpu *vcpu, |
1637 | struct kvm_segment *var, int seg); |
1638 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
1639 | bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
1640 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
1641 | void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
1642 | bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
1643 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
1644 | int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
1645 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
1646 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
1647 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
1648 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
1649 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
1650 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
1651 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
1652 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
1653 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
1654 | bool (*get_if_flag)(struct kvm_vcpu *vcpu); |
1655 | |
1656 | void (*flush_tlb_all)(struct kvm_vcpu *vcpu); |
1657 | void (*flush_tlb_current)(struct kvm_vcpu *vcpu); |
1658 | #if IS_ENABLED(CONFIG_HYPERV) |
1659 | int (*flush_remote_tlbs)(struct kvm *kvm); |
1660 | int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn, |
1661 | gfn_t nr_pages); |
1662 | #endif |
1663 | |
1664 | /* |
1665 | * Flush any TLB entries associated with the given GVA. |
1666 | * Does not need to flush GPA->HPA mappings. |
1667 | * Can potentially get non-canonical addresses through INVLPGs, which |
1668 | * the implementation may choose to ignore if appropriate. |
1669 | */ |
1670 | void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr); |
1671 | |
1672 | /* |
1673 | * Flush any TLB entries created by the guest. Like tlb_flush_gva(), |
1674 | * does not need to flush GPA->HPA mappings. |
1675 | */ |
1676 | void (*flush_tlb_guest)(struct kvm_vcpu *vcpu); |
1677 | |
1678 | int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); |
1679 | enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu, |
1680 | bool force_immediate_exit); |
1681 | int (*handle_exit)(struct kvm_vcpu *vcpu, |
1682 | enum exit_fastpath_completion exit_fastpath); |
1683 | int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
1684 | void (*update_emulated_instruction)(struct kvm_vcpu *vcpu); |
1685 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
1686 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
1687 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
1688 | unsigned char *hypercall_addr); |
1689 | void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected); |
1690 | void (*inject_nmi)(struct kvm_vcpu *vcpu); |
1691 | void (*inject_exception)(struct kvm_vcpu *vcpu); |
1692 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
1693 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
1694 | int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
1695 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
1696 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); |
1697 | /* Whether or not a virtual NMI is pending in hardware. */ |
1698 | bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu); |
1699 | /* |
1700 | * Attempt to pend a virtual NMI in hardware. Returns %true on success |
1701 | * to allow using static_call_ret0 as the fallback. |
1702 | */ |
1703 | bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu); |
1704 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
1705 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
1706 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
1707 | bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason); |
1708 | const unsigned long required_apicv_inhibits; |
1709 | bool allow_apicv_in_x2apic_without_x2apic_virtualization; |
1710 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); |
1711 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
1712 | void (*hwapic_isr_update)(int isr); |
1713 | bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); |
1714 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
1715 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); |
1716 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); |
1717 | void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode, |
1718 | int trig_mode, int vector); |
1719 | int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); |
1720 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
1721 | int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); |
1722 | u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
1723 | |
1724 | void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
1725 | int root_level); |
1726 | |
1727 | bool (*has_wbinvd_exit)(void); |
1728 | |
1729 | u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); |
1730 | u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); |
1731 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu); |
1732 | void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu); |
1733 | |
1734 | /* |
1735 | * Retrieve somewhat arbitrary exit information. Intended to |
1736 | * be used only from within tracepoints or error paths. |
1737 | */ |
1738 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason, |
1739 | u64 *info1, u64 *info2, |
1740 | u32 *exit_int_info, u32 *exit_int_info_err_code); |
1741 | |
1742 | int (*check_intercept)(struct kvm_vcpu *vcpu, |
1743 | struct x86_instruction_info *info, |
1744 | enum x86_intercept_stage stage, |
1745 | struct x86_exception *exception); |
1746 | void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); |
1747 | |
1748 | void (*sched_in)(struct kvm_vcpu *vcpu, int cpu); |
1749 | |
1750 | /* |
1751 | * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero |
1752 | * value indicates CPU dirty logging is unsupported or disabled. |
1753 | */ |
1754 | int cpu_dirty_log_size; |
1755 | void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); |
1756 | |
1757 | const struct kvm_x86_nested_ops *nested_ops; |
1758 | |
1759 | void (*vcpu_blocking)(struct kvm_vcpu *vcpu); |
1760 | void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); |
1761 | |
1762 | int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq, |
1763 | uint32_t guest_irq, bool set); |
1764 | void (*pi_start_assignment)(struct kvm *kvm); |
1765 | void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu); |
1766 | void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); |
1767 | bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); |
1768 | |
1769 | int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, |
1770 | bool *expired); |
1771 | void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); |
1772 | |
1773 | void (*setup_mce)(struct kvm_vcpu *vcpu); |
1774 | |
1775 | #ifdef CONFIG_KVM_SMM |
1776 | int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); |
1777 | int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram); |
1778 | int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram); |
1779 | void (*enable_smi_window)(struct kvm_vcpu *vcpu); |
1780 | #endif |
1781 | |
1782 | int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp); |
1783 | int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1784 | int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1785 | int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); |
1786 | int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); |
1787 | void (*guest_memory_reclaimed)(struct kvm *kvm); |
1788 | |
1789 | int (*get_msr_feature)(struct kvm_msr_entry *entry); |
1790 | |
1791 | int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type, |
1792 | void *insn, int insn_len); |
1793 | |
1794 | bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); |
1795 | int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu); |
1796 | |
1797 | void (*migrate_timers)(struct kvm_vcpu *vcpu); |
1798 | void (*msr_filter_changed)(struct kvm_vcpu *vcpu); |
1799 | int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); |
1800 | |
1801 | void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector); |
1802 | |
1803 | /* |
1804 | * Returns vCPU specific APICv inhibit reasons |
1805 | */ |
1806 | unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); |
1807 | |
1808 | gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); |
1809 | void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu); |
1810 | }; |
1811 | |
1812 | struct kvm_x86_nested_ops { |
1813 | void (*leave_nested)(struct kvm_vcpu *vcpu); |
1814 | bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector, |
1815 | u32 error_code); |
1816 | int (*check_events)(struct kvm_vcpu *vcpu); |
1817 | bool (*has_events)(struct kvm_vcpu *vcpu); |
1818 | void (*triple_fault)(struct kvm_vcpu *vcpu); |
1819 | int (*get_state)(struct kvm_vcpu *vcpu, |
1820 | struct kvm_nested_state __user *user_kvm_nested_state, |
1821 | unsigned user_data_size); |
1822 | int (*set_state)(struct kvm_vcpu *vcpu, |
1823 | struct kvm_nested_state __user *user_kvm_nested_state, |
1824 | struct kvm_nested_state *kvm_state); |
1825 | bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu); |
1826 | int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); |
1827 | |
1828 | int (*enable_evmcs)(struct kvm_vcpu *vcpu, |
1829 | uint16_t *vmcs_version); |
1830 | uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); |
1831 | void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu); |
1832 | }; |
1833 | |
1834 | struct kvm_x86_init_ops { |
1835 | int (*hardware_setup)(void); |
1836 | unsigned int (*handle_intel_pt_intr)(void); |
1837 | |
1838 | struct kvm_x86_ops *runtime_ops; |
1839 | struct kvm_pmu_ops *pmu_ops; |
1840 | }; |
1841 | |
1842 | struct kvm_arch_async_pf { |
1843 | u32 token; |
1844 | gfn_t gfn; |
1845 | unsigned long cr3; |
1846 | bool direct_map; |
1847 | }; |
1848 | |
1849 | extern u32 __read_mostly kvm_nr_uret_msrs; |
1850 | extern u64 __read_mostly host_efer; |
1851 | extern bool __read_mostly allow_smaller_maxphyaddr; |
1852 | extern bool __read_mostly enable_apicv; |
1853 | extern struct kvm_x86_ops kvm_x86_ops; |
1854 | |
1855 | #define KVM_X86_OP(func) \ |
1856 | DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); |
1857 | #define KVM_X86_OP_OPTIONAL KVM_X86_OP |
1858 | #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP |
1859 | #include <asm/kvm-x86-ops.h> |
1860 | |
1861 | int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops); |
1862 | void kvm_x86_vendor_exit(void); |
1863 | |
1864 | #define __KVM_HAVE_ARCH_VM_ALLOC |
1865 | static inline struct kvm *kvm_arch_alloc_vm(void) |
1866 | { |
1867 | return __vmalloc(size: kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
1868 | } |
1869 | |
1870 | #define __KVM_HAVE_ARCH_VM_FREE |
1871 | void kvm_arch_free_vm(struct kvm *kvm); |
1872 | |
1873 | #if IS_ENABLED(CONFIG_HYPERV) |
1874 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS |
1875 | static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
1876 | { |
1877 | if (kvm_x86_ops.flush_remote_tlbs && |
1878 | !static_call(kvm_x86_flush_remote_tlbs)(kvm)) |
1879 | return 0; |
1880 | else |
1881 | return -ENOTSUPP; |
1882 | } |
1883 | |
1884 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE |
1885 | static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, |
1886 | u64 nr_pages) |
1887 | { |
1888 | if (!kvm_x86_ops.flush_remote_tlbs_range) |
1889 | return -EOPNOTSUPP; |
1890 | |
1891 | return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages); |
1892 | } |
1893 | #endif /* CONFIG_HYPERV */ |
1894 | |
1895 | enum kvm_intr_type { |
1896 | /* Values are arbitrary, but must be non-zero. */ |
1897 | KVM_HANDLING_IRQ = 1, |
1898 | KVM_HANDLING_NMI, |
1899 | }; |
1900 | |
1901 | /* Enable perf NMI and timer modes to work, and minimise false positives. */ |
1902 | #define kvm_arch_pmi_in_guest(vcpu) \ |
1903 | ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \ |
1904 | (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI))) |
1905 | |
1906 | void __init kvm_mmu_x86_module_init(void); |
1907 | int kvm_mmu_vendor_module_init(void); |
1908 | void kvm_mmu_vendor_module_exit(void); |
1909 | |
1910 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); |
1911 | int kvm_mmu_create(struct kvm_vcpu *vcpu); |
1912 | void kvm_mmu_init_vm(struct kvm *kvm); |
1913 | void kvm_mmu_uninit_vm(struct kvm *kvm); |
1914 | |
1915 | void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm, |
1916 | struct kvm_memory_slot *slot); |
1917 | |
1918 | void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); |
1919 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
1920 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
1921 | const struct kvm_memory_slot *memslot, |
1922 | int start_level); |
1923 | void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, |
1924 | const struct kvm_memory_slot *memslot, |
1925 | int target_level); |
1926 | void kvm_mmu_try_split_huge_pages(struct kvm *kvm, |
1927 | const struct kvm_memory_slot *memslot, |
1928 | u64 start, u64 end, |
1929 | int target_level); |
1930 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
1931 | const struct kvm_memory_slot *memslot); |
1932 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
1933 | const struct kvm_memory_slot *memslot); |
1934 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); |
1935 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); |
1936 | |
1937 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
1938 | |
1939 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
1940 | const void *val, int bytes); |
1941 | |
1942 | struct kvm_irq_mask_notifier { |
1943 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); |
1944 | int irq; |
1945 | struct hlist_node link; |
1946 | }; |
1947 | |
1948 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, |
1949 | struct kvm_irq_mask_notifier *kimn); |
1950 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, |
1951 | struct kvm_irq_mask_notifier *kimn); |
1952 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, |
1953 | bool mask); |
1954 | |
1955 | extern bool tdp_enabled; |
1956 | |
1957 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
1958 | |
1959 | /* |
1960 | * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing |
1961 | * userspace I/O) to indicate that the emulation context |
1962 | * should be reused as is, i.e. skip initialization of |
1963 | * emulation context, instruction fetch and decode. |
1964 | * |
1965 | * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. |
1966 | * Indicates that only select instructions (tagged with |
1967 | * EmulateOnUD) should be emulated (to minimize the emulator |
1968 | * attack surface). See also EMULTYPE_TRAP_UD_FORCED. |
1969 | * |
1970 | * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to |
1971 | * decode the instruction length. For use *only* by |
1972 | * kvm_x86_ops.skip_emulated_instruction() implementations if |
1973 | * EMULTYPE_COMPLETE_USER_EXIT is not set. |
1974 | * |
1975 | * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to |
1976 | * retry native execution under certain conditions, |
1977 | * Can only be set in conjunction with EMULTYPE_PF. |
1978 | * |
1979 | * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was |
1980 | * triggered by KVM's magic "force emulation" prefix, |
1981 | * which is opt in via module param (off by default). |
1982 | * Bypasses EmulateOnUD restriction despite emulating |
1983 | * due to an intercepted #UD (see EMULTYPE_TRAP_UD). |
1984 | * Used to test the full emulator from userspace. |
1985 | * |
1986 | * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware |
1987 | * backdoor emulation, which is opt in via module param. |
1988 | * VMware backdoor emulation handles select instructions |
1989 | * and reinjects the #GP for all other cases. |
1990 | * |
1991 | * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which |
1992 | * case the CR2/GPA value pass on the stack is valid. |
1993 | * |
1994 | * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility |
1995 | * state and inject single-step #DBs after skipping |
1996 | * an instruction (after completing userspace I/O). |
1997 | * |
1998 | * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that |
1999 | * is attempting to write a gfn that contains one or |
2000 | * more of the PTEs used to translate the write itself, |
2001 | * and the owning page table is being shadowed by KVM. |
2002 | * If emulation of the faulting instruction fails and |
2003 | * this flag is set, KVM will exit to userspace instead |
2004 | * of retrying emulation as KVM cannot make forward |
2005 | * progress. |
2006 | * |
2007 | * If emulation fails for a write to guest page tables, |
2008 | * KVM unprotects (zaps) the shadow page for the target |
2009 | * gfn and resumes the guest to retry the non-emulatable |
2010 | * instruction (on hardware). Unprotecting the gfn |
2011 | * doesn't allow forward progress for a self-changing |
2012 | * access because doing so also zaps the translation for |
2013 | * the gfn, i.e. retrying the instruction will hit a |
2014 | * !PRESENT fault, which results in a new shadow page |
2015 | * and sends KVM back to square one. |
2016 | */ |
2017 | #define EMULTYPE_NO_DECODE (1 << 0) |
2018 | #define EMULTYPE_TRAP_UD (1 << 1) |
2019 | #define EMULTYPE_SKIP (1 << 2) |
2020 | #define EMULTYPE_ALLOW_RETRY_PF (1 << 3) |
2021 | #define EMULTYPE_TRAP_UD_FORCED (1 << 4) |
2022 | #define EMULTYPE_VMWARE_GP (1 << 5) |
2023 | #define EMULTYPE_PF (1 << 6) |
2024 | #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7) |
2025 | #define EMULTYPE_WRITE_PF_TO_SP (1 << 8) |
2026 | |
2027 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); |
2028 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, |
2029 | void *insn, int insn_len); |
2030 | void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, |
2031 | u64 *data, u8 ndata); |
2032 | void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu); |
2033 | |
2034 | void kvm_enable_efer_bits(u64); |
2035 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
2036 | int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated); |
2037 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data); |
2038 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data); |
2039 | int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu); |
2040 | int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu); |
2041 | int kvm_emulate_as_nop(struct kvm_vcpu *vcpu); |
2042 | int kvm_emulate_invd(struct kvm_vcpu *vcpu); |
2043 | int kvm_emulate_mwait(struct kvm_vcpu *vcpu); |
2044 | int kvm_handle_invalid_op(struct kvm_vcpu *vcpu); |
2045 | int kvm_emulate_monitor(struct kvm_vcpu *vcpu); |
2046 | |
2047 | int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); |
2048 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
2049 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
2050 | int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu); |
2051 | int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu); |
2052 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
2053 | |
2054 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
2055 | void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
2056 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
2057 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
2058 | |
2059 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
2060 | int reason, bool has_error_code, u32 error_code); |
2061 | |
2062 | void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0); |
2063 | void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4); |
2064 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2065 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
2066 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
2067 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
2068 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
2069 | unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr); |
2070 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
2071 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
2072 | int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); |
2073 | |
2074 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
2075 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
2076 | |
2077 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
2078 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); |
2079 | int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu); |
2080 | |
2081 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
2082 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
2083 | void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload); |
2084 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
2085 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
2086 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
2087 | void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, |
2088 | struct x86_exception *fault); |
2089 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
2090 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); |
2091 | |
2092 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
2093 | int irq_source_id, int level) |
2094 | { |
2095 | /* Logical OR for level trig interrupt */ |
2096 | if (level) |
2097 | __set_bit(irq_source_id, irq_state); |
2098 | else |
2099 | __clear_bit(irq_source_id, irq_state); |
2100 | |
2101 | return !!(*irq_state); |
2102 | } |
2103 | |
2104 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); |
2105 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); |
2106 | |
2107 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
2108 | int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu); |
2109 | |
2110 | void kvm_update_dr7(struct kvm_vcpu *vcpu); |
2111 | |
2112 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
2113 | void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, |
2114 | ulong roots_to_free); |
2115 | void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu); |
2116 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
2117 | struct x86_exception *exception); |
2118 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, |
2119 | struct x86_exception *exception); |
2120 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, |
2121 | struct x86_exception *exception); |
2122 | |
2123 | bool kvm_apicv_activated(struct kvm *kvm); |
2124 | bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu); |
2125 | void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu); |
2126 | void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, |
2127 | enum kvm_apicv_inhibit reason, bool set); |
2128 | void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, |
2129 | enum kvm_apicv_inhibit reason, bool set); |
2130 | |
2131 | static inline void kvm_set_apicv_inhibit(struct kvm *kvm, |
2132 | enum kvm_apicv_inhibit reason) |
2133 | { |
2134 | kvm_set_or_clear_apicv_inhibit(kvm, reason, set: true); |
2135 | } |
2136 | |
2137 | static inline void kvm_clear_apicv_inhibit(struct kvm *kvm, |
2138 | enum kvm_apicv_inhibit reason) |
2139 | { |
2140 | kvm_set_or_clear_apicv_inhibit(kvm, reason, set: false); |
2141 | } |
2142 | |
2143 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
2144 | |
2145 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, |
2146 | void *insn, int insn_len); |
2147 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
2148 | void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
2149 | u64 addr, unsigned long roots); |
2150 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); |
2151 | void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); |
2152 | |
2153 | void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, |
2154 | int tdp_max_root_level, int tdp_huge_page_level); |
2155 | |
2156 | #ifdef CONFIG_KVM_PRIVATE_MEM |
2157 | #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM) |
2158 | #else |
2159 | #define kvm_arch_has_private_mem(kvm) false |
2160 | #endif |
2161 | |
2162 | static inline u16 kvm_read_ldt(void) |
2163 | { |
2164 | u16 ldt; |
2165 | asm("sldt %0" : "=g" (ldt)); |
2166 | return ldt; |
2167 | } |
2168 | |
2169 | static inline void kvm_load_ldt(u16 sel) |
2170 | { |
2171 | asm("lldt %0" : : "rm" (sel)); |
2172 | } |
2173 | |
2174 | #ifdef CONFIG_X86_64 |
2175 | static inline unsigned long read_msr(unsigned long msr) |
2176 | { |
2177 | u64 value; |
2178 | |
2179 | rdmsrl(msr, value); |
2180 | return value; |
2181 | } |
2182 | #endif |
2183 | |
2184 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
2185 | { |
2186 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
2187 | } |
2188 | |
2189 | #define TSS_IOPB_BASE_OFFSET 0x66 |
2190 | #define TSS_BASE_SIZE 0x68 |
2191 | #define TSS_IOPB_SIZE (65536 / 8) |
2192 | #define TSS_REDIRECTION_SIZE (256 / 8) |
2193 | #define RMODE_TSS_SIZE \ |
2194 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) |
2195 | |
2196 | enum { |
2197 | TASK_SWITCH_CALL = 0, |
2198 | TASK_SWITCH_IRET = 1, |
2199 | TASK_SWITCH_JMP = 2, |
2200 | TASK_SWITCH_GATE = 3, |
2201 | }; |
2202 | |
2203 | #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */ |
2204 | |
2205 | #ifdef CONFIG_KVM_SMM |
2206 | #define HF_SMM_MASK (1 << 1) |
2207 | #define HF_SMM_INSIDE_NMI_MASK (1 << 2) |
2208 | |
2209 | # define KVM_MAX_NR_ADDRESS_SPACES 2 |
2210 | /* SMM is currently unsupported for guests with private memory. */ |
2211 | # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2) |
2212 | # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) |
2213 | # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) |
2214 | #else |
2215 | # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) |
2216 | #endif |
2217 | |
2218 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
2219 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
2220 | int kvm_cpu_has_extint(struct kvm_vcpu *v); |
2221 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
2222 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
2223 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
2224 | |
2225 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, |
2226 | unsigned long ipi_bitmap_high, u32 min, |
2227 | unsigned long icr, int op_64_bit); |
2228 | |
2229 | int kvm_add_user_return_msr(u32 msr); |
2230 | int kvm_find_user_return_msr(u32 msr); |
2231 | int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); |
2232 | |
2233 | static inline bool kvm_is_supported_user_return_msr(u32 msr) |
2234 | { |
2235 | return kvm_find_user_return_msr(msr) >= 0; |
2236 | } |
2237 | |
2238 | u64 kvm_scale_tsc(u64 tsc, u64 ratio); |
2239 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); |
2240 | u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier); |
2241 | u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); |
2242 | |
2243 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); |
2244 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
2245 | |
2246 | void kvm_make_scan_ioapic_request(struct kvm *kvm); |
2247 | void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, |
2248 | unsigned long *vcpu_bitmap); |
2249 | |
2250 | bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
2251 | struct kvm_async_pf *work); |
2252 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, |
2253 | struct kvm_async_pf *work); |
2254 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
2255 | struct kvm_async_pf *work); |
2256 | void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu); |
2257 | bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); |
2258 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
2259 | |
2260 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); |
2261 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
2262 | |
2263 | void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, |
2264 | u32 size); |
2265 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); |
2266 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); |
2267 | |
2268 | bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, |
2269 | struct kvm_vcpu **dest_vcpu); |
2270 | |
2271 | void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, |
2272 | struct kvm_lapic_irq *irq); |
2273 | |
2274 | static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) |
2275 | { |
2276 | /* We can only post Fixed and LowPrio IRQs */ |
2277 | return (irq->delivery_mode == APIC_DM_FIXED || |
2278 | irq->delivery_mode == APIC_DM_LOWEST); |
2279 | } |
2280 | |
2281 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
2282 | { |
2283 | static_call_cond(kvm_x86_vcpu_blocking)(vcpu); |
2284 | } |
2285 | |
2286 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
2287 | { |
2288 | static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); |
2289 | } |
2290 | |
2291 | static inline int kvm_cpu_get_apicid(int mps_cpu) |
2292 | { |
2293 | #ifdef CONFIG_X86_LOCAL_APIC |
2294 | return default_cpu_present_to_apicid(mps_cpu); |
2295 | #else |
2296 | WARN_ON_ONCE(1); |
2297 | return BAD_APICID; |
2298 | #endif |
2299 | } |
2300 | |
2301 | int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); |
2302 | |
2303 | #define KVM_CLOCK_VALID_FLAGS \ |
2304 | (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) |
2305 | |
2306 | #define KVM_X86_VALID_QUIRKS \ |
2307 | (KVM_X86_QUIRK_LINT0_REENABLED | \ |
2308 | KVM_X86_QUIRK_CD_NW_CLEARED | \ |
2309 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \ |
2310 | KVM_X86_QUIRK_OUT_7E_INC_RIP | \ |
2311 | KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \ |
2312 | KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \ |
2313 | KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) |
2314 | |
2315 | /* |
2316 | * KVM previously used a u32 field in kvm_run to indicate the hypercall was |
2317 | * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the |
2318 | * remaining 31 lower bits must be 0 to preserve ABI. |
2319 | */ |
2320 | #define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1) |
2321 | |
2322 | #endif /* _ASM_X86_KVM_HOST_H */ |
2323 | |