1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * KVM Microsoft Hyper-V emulation |
4 | * |
5 | * derived from arch/x86/kvm/x86.c |
6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * Copyright (C) 2008 Qumranet, Inc. |
9 | * Copyright IBM Corporation, 2008 |
10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
11 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> |
12 | * |
13 | * Authors: |
14 | * Avi Kivity <avi@qumranet.com> |
15 | * Yaniv Kamay <yaniv@qumranet.com> |
16 | * Amit Shah <amit.shah@qumranet.com> |
17 | * Ben-Ami Yassour <benami@il.ibm.com> |
18 | * Andrey Smetanin <asmetanin@virtuozzo.com> |
19 | */ |
20 | |
21 | #ifndef __ARCH_X86_KVM_HYPERV_H__ |
22 | #define __ARCH_X86_KVM_HYPERV_H__ |
23 | |
24 | #include <linux/kvm_host.h> |
25 | #include "x86.h" |
26 | |
27 | #ifdef CONFIG_KVM_HYPERV |
28 | |
29 | /* "Hv#1" signature */ |
30 | #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 |
31 | |
32 | /* |
33 | * The #defines related to the synthetic debugger are required by KDNet, but |
34 | * they are not documented in the Hyper-V TLFS because the synthetic debugger |
35 | * functionality has been deprecated and is subject to removal in future |
36 | * versions of Windows. |
37 | */ |
38 | #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080 |
39 | #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081 |
40 | #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082 |
41 | |
42 | /* |
43 | * Hyper-V synthetic debugger platform capabilities |
44 | * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits. |
45 | */ |
46 | #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) |
47 | |
48 | /* Hyper-V Synthetic debug options MSR */ |
49 | #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 |
50 | #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 |
51 | #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 |
52 | #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 |
53 | #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 |
54 | #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF |
55 | |
56 | /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ |
57 | #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) |
58 | |
59 | static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) |
60 | { |
61 | return &kvm->arch.hyperv; |
62 | } |
63 | |
64 | static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) |
65 | { |
66 | return vcpu->arch.hyperv; |
67 | } |
68 | |
69 | static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) |
70 | { |
71 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
72 | |
73 | return &hv_vcpu->synic; |
74 | } |
75 | |
76 | static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) |
77 | { |
78 | struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic); |
79 | |
80 | return hv_vcpu->vcpu; |
81 | } |
82 | |
83 | static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) |
84 | { |
85 | return &vcpu->kvm->arch.hyperv.hv_syndbg; |
86 | } |
87 | |
88 | static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) |
89 | { |
90 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
91 | |
92 | return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; |
93 | } |
94 | |
95 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); |
96 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); |
97 | |
98 | static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) |
99 | { |
100 | return vcpu->arch.hyperv_enabled && to_kvm_hv(kvm: vcpu->kvm)->hv_guest_os_id; |
101 | } |
102 | |
103 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu); |
104 | |
105 | void kvm_hv_irq_routing_update(struct kvm *kvm); |
106 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); |
107 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); |
108 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); |
109 | |
110 | static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) |
111 | { |
112 | return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap); |
113 | } |
114 | |
115 | static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) |
116 | { |
117 | return to_hv_vcpu(vcpu) && |
118 | test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap); |
119 | } |
120 | |
121 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); |
122 | |
123 | bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); |
124 | int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu); |
125 | |
126 | static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, |
127 | int timer_index) |
128 | { |
129 | return &to_hv_vcpu(vcpu)->stimer[timer_index]; |
130 | } |
131 | |
132 | static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) |
133 | { |
134 | struct kvm_vcpu_hv *hv_vcpu; |
135 | |
136 | hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, |
137 | stimer[0]); |
138 | return hv_vcpu->vcpu; |
139 | } |
140 | |
141 | static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) |
142 | { |
143 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
144 | |
145 | if (!hv_vcpu) |
146 | return false; |
147 | |
148 | return !bitmap_empty(src: hv_vcpu->stimer_pending_bitmap, |
149 | HV_SYNIC_STIMER_COUNT); |
150 | } |
151 | |
152 | /* |
153 | * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8]) |
154 | * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to. |
155 | */ |
156 | static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) |
157 | { |
158 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
159 | |
160 | /* |
161 | * If Hyper-V's invariant TSC control is not exposed to the guest, |
162 | * the invariant TSC CPUID flag is not suppressed, Windows guests were |
163 | * observed to be able to handle it correctly. Going forward, VMMs are |
164 | * encouraged to enable Hyper-V's invariant TSC control when invariant |
165 | * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V. |
166 | */ |
167 | if (!hv_vcpu || |
168 | !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT)) |
169 | return false; |
170 | |
171 | /* |
172 | * If Hyper-V's invariant TSC control is exposed to the guest, KVM is |
173 | * responsible for suppressing the invariant TSC CPUID flag if the |
174 | * Hyper-V control is not enabled. |
175 | */ |
176 | return !(to_kvm_hv(kvm: vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC); |
177 | } |
178 | |
179 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); |
180 | |
181 | void kvm_hv_setup_tsc_page(struct kvm *kvm, |
182 | struct pvclock_vcpu_time_info *hv_clock); |
183 | void kvm_hv_request_tsc_page_update(struct kvm *kvm); |
184 | |
185 | void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu); |
186 | |
187 | void kvm_hv_init_vm(struct kvm *kvm); |
188 | void kvm_hv_destroy_vm(struct kvm *kvm); |
189 | int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); |
190 | void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled); |
191 | int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce); |
192 | int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); |
193 | int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, |
194 | struct kvm_cpuid_entry2 __user *entries); |
195 | |
196 | static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu, |
197 | bool is_guest_mode) |
198 | { |
199 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
200 | int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO : |
201 | HV_L1_TLB_FLUSH_FIFO; |
202 | |
203 | return &hv_vcpu->tlb_flush_fifo[i]; |
204 | } |
205 | |
206 | static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) |
207 | { |
208 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; |
209 | |
210 | if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) |
211 | return; |
212 | |
213 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode: is_guest_mode(vcpu)); |
214 | |
215 | kfifo_reset_out(&tlb_flush_fifo->entries); |
216 | } |
217 | |
218 | static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) |
219 | { |
220 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
221 | |
222 | return hv_vcpu && |
223 | (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH); |
224 | } |
225 | |
226 | static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) |
227 | { |
228 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
229 | u16 code; |
230 | |
231 | if (!hv_vcpu) |
232 | return false; |
233 | |
234 | code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) : |
235 | kvm_rax_read(vcpu); |
236 | |
237 | return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || |
238 | code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || |
239 | code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || |
240 | code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX); |
241 | } |
242 | |
243 | static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) |
244 | { |
245 | if (!to_hv_vcpu(vcpu)) |
246 | return 0; |
247 | |
248 | if (!kvm_hv_assist_page_enabled(vcpu)) |
249 | return 0; |
250 | |
251 | return kvm_hv_get_assist_page(vcpu); |
252 | } |
253 | |
254 | static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, |
255 | bool tdp_enabled) |
256 | { |
257 | /* |
258 | * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or |
259 | * L2's VP_ID upon request from the guest. Make sure we check for |
260 | * pending entries in the right FIFO upon L1/L2 transition as these |
261 | * requests are put by other vCPUs asynchronously. |
262 | */ |
263 | if (to_hv_vcpu(vcpu) && tdp_enabled) |
264 | kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); |
265 | } |
266 | |
267 | int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu); |
268 | #else /* CONFIG_KVM_HYPERV */ |
269 | static inline void kvm_hv_setup_tsc_page(struct kvm *kvm, |
270 | struct pvclock_vcpu_time_info *hv_clock) {} |
271 | static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {} |
272 | static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {} |
273 | static inline void kvm_hv_init_vm(struct kvm *kvm) {} |
274 | static inline void kvm_hv_destroy_vm(struct kvm *kvm) {} |
275 | static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
276 | { |
277 | return 0; |
278 | } |
279 | static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {} |
280 | static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) |
281 | { |
282 | return false; |
283 | } |
284 | static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
285 | { |
286 | return HV_STATUS_ACCESS_DENIED; |
287 | } |
288 | static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {} |
289 | static inline void kvm_hv_free_pa_page(struct kvm *kvm) {} |
290 | static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) |
291 | { |
292 | return false; |
293 | } |
294 | static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) |
295 | { |
296 | return false; |
297 | } |
298 | static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {} |
299 | static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) |
300 | { |
301 | return false; |
302 | } |
303 | static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {} |
304 | static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) |
305 | { |
306 | return false; |
307 | } |
308 | static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) |
309 | { |
310 | return false; |
311 | } |
312 | static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) |
313 | { |
314 | return false; |
315 | } |
316 | static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) |
317 | { |
318 | return 0; |
319 | } |
320 | static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) |
321 | { |
322 | return vcpu->vcpu_idx; |
323 | } |
324 | static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {} |
325 | #endif /* CONFIG_KVM_HYPERV */ |
326 | |
327 | #endif /* __ARCH_X86_KVM_HYPERV_H__ */ |
328 | |