1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * KVM Microsoft Hyper-V emulation |
4 | * |
5 | * derived from arch/x86/kvm/x86.c |
6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * Copyright (C) 2008 Qumranet, Inc. |
9 | * Copyright IBM Corporation, 2008 |
10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
11 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> |
12 | * |
13 | * Authors: |
14 | * Avi Kivity <avi@qumranet.com> |
15 | * Yaniv Kamay <yaniv@qumranet.com> |
16 | * Amit Shah <amit.shah@qumranet.com> |
17 | * Ben-Ami Yassour <benami@il.ibm.com> |
18 | * Andrey Smetanin <asmetanin@virtuozzo.com> |
19 | */ |
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | |
22 | #include "x86.h" |
23 | #include "lapic.h" |
24 | #include "ioapic.h" |
25 | #include "cpuid.h" |
26 | #include "hyperv.h" |
27 | #include "mmu.h" |
28 | #include "xen.h" |
29 | |
30 | #include <linux/cpu.h> |
31 | #include <linux/kvm_host.h> |
32 | #include <linux/highmem.h> |
33 | #include <linux/sched/cputime.h> |
34 | #include <linux/spinlock.h> |
35 | #include <linux/eventfd.h> |
36 | |
37 | #include <asm/apicdef.h> |
38 | #include <asm/mshyperv.h> |
39 | #include <trace/events/kvm.h> |
40 | |
41 | #include "trace.h" |
42 | #include "irq.h" |
43 | #include "fpu.h" |
44 | |
45 | #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK) |
46 | |
47 | /* |
48 | * As per Hyper-V TLFS, extended hypercalls start from 0x8001 |
49 | * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value |
50 | * where each bit tells which extended hypercall is available besides |
51 | * HvExtCallQueryCapabilities. |
52 | * |
53 | * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit |
54 | * assigned. |
55 | * |
56 | * 0x8002 - Bit 0 |
57 | * 0x8003 - Bit 1 |
58 | * .. |
59 | * 0x8041 - Bit 63 |
60 | * |
61 | * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64 |
62 | */ |
63 | #define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64) |
64 | |
65 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
66 | bool vcpu_kick); |
67 | |
68 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
69 | { |
70 | return atomic64_read(v: &synic->sint[sint]); |
71 | } |
72 | |
73 | static inline int synic_get_sint_vector(u64 sint_value) |
74 | { |
75 | if (sint_value & HV_SYNIC_SINT_MASKED) |
76 | return -1; |
77 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; |
78 | } |
79 | |
80 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, |
81 | int vector) |
82 | { |
83 | int i; |
84 | |
85 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { |
86 | if (synic_get_sint_vector(sint_value: synic_read_sint(synic, sint: i)) == vector) |
87 | return true; |
88 | } |
89 | return false; |
90 | } |
91 | |
92 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, |
93 | int vector) |
94 | { |
95 | int i; |
96 | u64 sint_value; |
97 | |
98 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { |
99 | sint_value = synic_read_sint(synic, sint: i); |
100 | if (synic_get_sint_vector(sint_value) == vector && |
101 | sint_value & HV_SYNIC_SINT_AUTO_EOI) |
102 | return true; |
103 | } |
104 | return false; |
105 | } |
106 | |
107 | static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, |
108 | int vector) |
109 | { |
110 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
111 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
112 | bool auto_eoi_old, auto_eoi_new; |
113 | |
114 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR) |
115 | return; |
116 | |
117 | if (synic_has_vector_connected(synic, vector)) |
118 | __set_bit(vector, synic->vec_bitmap); |
119 | else |
120 | __clear_bit(vector, synic->vec_bitmap); |
121 | |
122 | auto_eoi_old = !bitmap_empty(src: synic->auto_eoi_bitmap, nbits: 256); |
123 | |
124 | if (synic_has_vector_auto_eoi(synic, vector)) |
125 | __set_bit(vector, synic->auto_eoi_bitmap); |
126 | else |
127 | __clear_bit(vector, synic->auto_eoi_bitmap); |
128 | |
129 | auto_eoi_new = !bitmap_empty(src: synic->auto_eoi_bitmap, nbits: 256); |
130 | |
131 | if (auto_eoi_old == auto_eoi_new) |
132 | return; |
133 | |
134 | if (!enable_apicv) |
135 | return; |
136 | |
137 | down_write(sem: &vcpu->kvm->arch.apicv_update_lock); |
138 | |
139 | if (auto_eoi_new) |
140 | hv->synic_auto_eoi_used++; |
141 | else |
142 | hv->synic_auto_eoi_used--; |
143 | |
144 | /* |
145 | * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on |
146 | * the hypervisor to manually inject IRQs. |
147 | */ |
148 | __kvm_set_or_clear_apicv_inhibit(kvm: vcpu->kvm, |
149 | reason: APICV_INHIBIT_REASON_HYPERV, |
150 | set: !!hv->synic_auto_eoi_used); |
151 | |
152 | up_write(sem: &vcpu->kvm->arch.apicv_update_lock); |
153 | } |
154 | |
155 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, |
156 | u64 data, bool host) |
157 | { |
158 | int vector, old_vector; |
159 | bool masked; |
160 | |
161 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; |
162 | masked = data & HV_SYNIC_SINT_MASKED; |
163 | |
164 | /* |
165 | * Valid vectors are 16-255, however, nested Hyper-V attempts to write |
166 | * default '0x10000' value on boot and this should not #GP. We need to |
167 | * allow zero-initing the register from host as well. |
168 | */ |
169 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked) |
170 | return 1; |
171 | /* |
172 | * Guest may configure multiple SINTs to use the same vector, so |
173 | * we maintain a bitmap of vectors handled by synic, and a |
174 | * bitmap of vectors with auto-eoi behavior. The bitmaps are |
175 | * updated here, and atomically queried on fast paths. |
176 | */ |
177 | old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK; |
178 | |
179 | atomic64_set(v: &synic->sint[sint], i: data); |
180 | |
181 | synic_update_vector(synic, vector: old_vector); |
182 | |
183 | synic_update_vector(synic, vector); |
184 | |
185 | /* Load SynIC vectors into EOI exit bitmap */ |
186 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, vcpu: hv_synic_to_vcpu(synic)); |
187 | return 0; |
188 | } |
189 | |
190 | static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) |
191 | { |
192 | struct kvm_vcpu *vcpu = NULL; |
193 | unsigned long i; |
194 | |
195 | if (vpidx >= KVM_MAX_VCPUS) |
196 | return NULL; |
197 | |
198 | vcpu = kvm_get_vcpu(kvm, i: vpidx); |
199 | if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) |
200 | return vcpu; |
201 | kvm_for_each_vcpu(i, vcpu, kvm) |
202 | if (kvm_hv_get_vpindex(vcpu) == vpidx) |
203 | return vcpu; |
204 | return NULL; |
205 | } |
206 | |
207 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) |
208 | { |
209 | struct kvm_vcpu *vcpu; |
210 | struct kvm_vcpu_hv_synic *synic; |
211 | |
212 | vcpu = get_vcpu_by_vpidx(kvm, vpidx); |
213 | if (!vcpu || !to_hv_vcpu(vcpu)) |
214 | return NULL; |
215 | synic = to_hv_synic(vcpu); |
216 | return (synic->active) ? synic : NULL; |
217 | } |
218 | |
219 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) |
220 | { |
221 | struct kvm *kvm = vcpu->kvm; |
222 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
223 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
224 | struct kvm_vcpu_hv_stimer *stimer; |
225 | int gsi, idx; |
226 | |
227 | trace_kvm_hv_notify_acked_sint(vcpu_id: vcpu->vcpu_id, sint); |
228 | |
229 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
230 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { |
231 | stimer = &hv_vcpu->stimer[idx]; |
232 | if (stimer->msg_pending && stimer->config.enable && |
233 | !stimer->config.direct_mode && |
234 | stimer->config.sintx == sint) |
235 | stimer_mark_pending(stimer, vcpu_kick: false); |
236 | } |
237 | |
238 | idx = srcu_read_lock(ssp: &kvm->irq_srcu); |
239 | gsi = atomic_read(v: &synic->sint_to_gsi[sint]); |
240 | if (gsi != -1) |
241 | kvm_notify_acked_gsi(kvm, gsi); |
242 | srcu_read_unlock(ssp: &kvm->irq_srcu, idx); |
243 | } |
244 | |
245 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
246 | { |
247 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
248 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
249 | |
250 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; |
251 | hv_vcpu->exit.u.synic.msr = msr; |
252 | hv_vcpu->exit.u.synic.control = synic->control; |
253 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; |
254 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; |
255 | |
256 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); |
257 | } |
258 | |
259 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
260 | u32 msr, u64 data, bool host) |
261 | { |
262 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
263 | int ret; |
264 | |
265 | if (!synic->active && (!host || data)) |
266 | return 1; |
267 | |
268 | trace_kvm_hv_synic_set_msr(vcpu_id: vcpu->vcpu_id, msr, data, host); |
269 | |
270 | ret = 0; |
271 | switch (msr) { |
272 | case HV_X64_MSR_SCONTROL: |
273 | synic->control = data; |
274 | if (!host) |
275 | synic_exit(synic, msr); |
276 | break; |
277 | case HV_X64_MSR_SVERSION: |
278 | if (!host) { |
279 | ret = 1; |
280 | break; |
281 | } |
282 | synic->version = data; |
283 | break; |
284 | case HV_X64_MSR_SIEFP: |
285 | if ((data & HV_SYNIC_SIEFP_ENABLE) && !host && |
286 | !synic->dont_zero_synic_pages) |
287 | if (kvm_clear_guest(kvm: vcpu->kvm, |
288 | gpa: data & PAGE_MASK, PAGE_SIZE)) { |
289 | ret = 1; |
290 | break; |
291 | } |
292 | synic->evt_page = data; |
293 | if (!host) |
294 | synic_exit(synic, msr); |
295 | break; |
296 | case HV_X64_MSR_SIMP: |
297 | if ((data & HV_SYNIC_SIMP_ENABLE) && !host && |
298 | !synic->dont_zero_synic_pages) |
299 | if (kvm_clear_guest(kvm: vcpu->kvm, |
300 | gpa: data & PAGE_MASK, PAGE_SIZE)) { |
301 | ret = 1; |
302 | break; |
303 | } |
304 | synic->msg_page = data; |
305 | if (!host) |
306 | synic_exit(synic, msr); |
307 | break; |
308 | case HV_X64_MSR_EOM: { |
309 | int i; |
310 | |
311 | if (!synic->active) |
312 | break; |
313 | |
314 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) |
315 | kvm_hv_notify_acked_sint(vcpu, sint: i); |
316 | break; |
317 | } |
318 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
319 | ret = synic_set_sint(synic, sint: msr - HV_X64_MSR_SINT0, data, host); |
320 | break; |
321 | default: |
322 | ret = 1; |
323 | break; |
324 | } |
325 | return ret; |
326 | } |
327 | |
328 | static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) |
329 | { |
330 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
331 | |
332 | return hv_vcpu->cpuid_cache.syndbg_cap_eax & |
333 | HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; |
334 | } |
335 | |
336 | static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) |
337 | { |
338 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
339 | |
340 | if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) |
341 | hv->hv_syndbg.control.status = |
342 | vcpu->run->hyperv.u.syndbg.status; |
343 | return 1; |
344 | } |
345 | |
346 | static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) |
347 | { |
348 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
349 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
350 | |
351 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; |
352 | hv_vcpu->exit.u.syndbg.msr = msr; |
353 | hv_vcpu->exit.u.syndbg.control = syndbg->control.control; |
354 | hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; |
355 | hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; |
356 | hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; |
357 | vcpu->arch.complete_userspace_io = |
358 | kvm_hv_syndbg_complete_userspace; |
359 | |
360 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); |
361 | } |
362 | |
363 | static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
364 | { |
365 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
366 | |
367 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) |
368 | return 1; |
369 | |
370 | trace_kvm_hv_syndbg_set_msr(vcpu_id: vcpu->vcpu_id, |
371 | vp_index: to_hv_vcpu(vcpu)->vp_index, msr, data); |
372 | switch (msr) { |
373 | case HV_X64_MSR_SYNDBG_CONTROL: |
374 | syndbg->control.control = data; |
375 | if (!host) |
376 | syndbg_exit(vcpu, msr); |
377 | break; |
378 | case HV_X64_MSR_SYNDBG_STATUS: |
379 | syndbg->control.status = data; |
380 | break; |
381 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: |
382 | syndbg->control.send_page = data; |
383 | break; |
384 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: |
385 | syndbg->control.recv_page = data; |
386 | break; |
387 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
388 | syndbg->control.pending_page = data; |
389 | if (!host) |
390 | syndbg_exit(vcpu, msr); |
391 | break; |
392 | case HV_X64_MSR_SYNDBG_OPTIONS: |
393 | syndbg->options = data; |
394 | break; |
395 | default: |
396 | break; |
397 | } |
398 | |
399 | return 0; |
400 | } |
401 | |
402 | static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) |
403 | { |
404 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
405 | |
406 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) |
407 | return 1; |
408 | |
409 | switch (msr) { |
410 | case HV_X64_MSR_SYNDBG_CONTROL: |
411 | *pdata = syndbg->control.control; |
412 | break; |
413 | case HV_X64_MSR_SYNDBG_STATUS: |
414 | *pdata = syndbg->control.status; |
415 | break; |
416 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: |
417 | *pdata = syndbg->control.send_page; |
418 | break; |
419 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: |
420 | *pdata = syndbg->control.recv_page; |
421 | break; |
422 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
423 | *pdata = syndbg->control.pending_page; |
424 | break; |
425 | case HV_X64_MSR_SYNDBG_OPTIONS: |
426 | *pdata = syndbg->options; |
427 | break; |
428 | default: |
429 | break; |
430 | } |
431 | |
432 | trace_kvm_hv_syndbg_get_msr(vcpu_id: vcpu->vcpu_id, vp_index: kvm_hv_get_vpindex(vcpu), msr, data: *pdata); |
433 | |
434 | return 0; |
435 | } |
436 | |
437 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, |
438 | bool host) |
439 | { |
440 | int ret; |
441 | |
442 | if (!synic->active && !host) |
443 | return 1; |
444 | |
445 | ret = 0; |
446 | switch (msr) { |
447 | case HV_X64_MSR_SCONTROL: |
448 | *pdata = synic->control; |
449 | break; |
450 | case HV_X64_MSR_SVERSION: |
451 | *pdata = synic->version; |
452 | break; |
453 | case HV_X64_MSR_SIEFP: |
454 | *pdata = synic->evt_page; |
455 | break; |
456 | case HV_X64_MSR_SIMP: |
457 | *pdata = synic->msg_page; |
458 | break; |
459 | case HV_X64_MSR_EOM: |
460 | *pdata = 0; |
461 | break; |
462 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
463 | *pdata = atomic64_read(v: &synic->sint[msr - HV_X64_MSR_SINT0]); |
464 | break; |
465 | default: |
466 | ret = 1; |
467 | break; |
468 | } |
469 | return ret; |
470 | } |
471 | |
472 | static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) |
473 | { |
474 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
475 | struct kvm_lapic_irq irq; |
476 | int ret, vector; |
477 | |
478 | if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) |
479 | return -EINVAL; |
480 | |
481 | if (sint >= ARRAY_SIZE(synic->sint)) |
482 | return -EINVAL; |
483 | |
484 | vector = synic_get_sint_vector(sint_value: synic_read_sint(synic, sint)); |
485 | if (vector < 0) |
486 | return -ENOENT; |
487 | |
488 | memset(&irq, 0, sizeof(irq)); |
489 | irq.shorthand = APIC_DEST_SELF; |
490 | irq.dest_mode = APIC_DEST_PHYSICAL; |
491 | irq.delivery_mode = APIC_DM_FIXED; |
492 | irq.vector = vector; |
493 | irq.level = 1; |
494 | |
495 | ret = kvm_irq_delivery_to_apic(kvm: vcpu->kvm, src: vcpu->arch.apic, irq: &irq, NULL); |
496 | trace_kvm_hv_synic_set_irq(vcpu_id: vcpu->vcpu_id, sint, vector: irq.vector, ret); |
497 | return ret; |
498 | } |
499 | |
500 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) |
501 | { |
502 | struct kvm_vcpu_hv_synic *synic; |
503 | |
504 | synic = synic_get(kvm, vpidx); |
505 | if (!synic) |
506 | return -EINVAL; |
507 | |
508 | return synic_set_irq(synic, sint); |
509 | } |
510 | |
511 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) |
512 | { |
513 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
514 | int i; |
515 | |
516 | trace_kvm_hv_synic_send_eoi(vcpu_id: vcpu->vcpu_id, vector); |
517 | |
518 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) |
519 | if (synic_get_sint_vector(sint_value: synic_read_sint(synic, sint: i)) == vector) |
520 | kvm_hv_notify_acked_sint(vcpu, sint: i); |
521 | } |
522 | |
523 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi) |
524 | { |
525 | struct kvm_vcpu_hv_synic *synic; |
526 | |
527 | synic = synic_get(kvm, vpidx); |
528 | if (!synic) |
529 | return -EINVAL; |
530 | |
531 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) |
532 | return -EINVAL; |
533 | |
534 | atomic_set(v: &synic->sint_to_gsi[sint], i: gsi); |
535 | return 0; |
536 | } |
537 | |
538 | void kvm_hv_irq_routing_update(struct kvm *kvm) |
539 | { |
540 | struct kvm_irq_routing_table *irq_rt; |
541 | struct kvm_kernel_irq_routing_entry *e; |
542 | u32 gsi; |
543 | |
544 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, |
545 | lockdep_is_held(&kvm->irq_lock)); |
546 | |
547 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { |
548 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { |
549 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) |
550 | kvm_hv_set_sint_gsi(kvm, vpidx: e->hv_sint.vcpu, |
551 | sint: e->hv_sint.sint, gsi); |
552 | } |
553 | } |
554 | } |
555 | |
556 | static void synic_init(struct kvm_vcpu_hv_synic *synic) |
557 | { |
558 | int i; |
559 | |
560 | memset(synic, 0, sizeof(*synic)); |
561 | synic->version = HV_SYNIC_VERSION_1; |
562 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { |
563 | atomic64_set(v: &synic->sint[i], HV_SYNIC_SINT_MASKED); |
564 | atomic_set(v: &synic->sint_to_gsi[i], i: -1); |
565 | } |
566 | } |
567 | |
568 | static u64 get_time_ref_counter(struct kvm *kvm) |
569 | { |
570 | struct kvm_hv *hv = to_kvm_hv(kvm); |
571 | struct kvm_vcpu *vcpu; |
572 | u64 tsc; |
573 | |
574 | /* |
575 | * Fall back to get_kvmclock_ns() when TSC page hasn't been set up, |
576 | * is broken, disabled or being updated. |
577 | */ |
578 | if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) |
579 | return div_u64(dividend: get_kvmclock_ns(kvm), divisor: 100); |
580 | |
581 | vcpu = kvm_get_vcpu(kvm, i: 0); |
582 | tsc = kvm_read_l1_tsc(vcpu, host_tsc: rdtsc()); |
583 | return mul_u64_u64_shr(a: tsc, mul: hv->tsc_ref.tsc_scale, shift: 64) |
584 | + hv->tsc_ref.tsc_offset; |
585 | } |
586 | |
587 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
588 | bool vcpu_kick) |
589 | { |
590 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
591 | |
592 | set_bit(nr: stimer->index, |
593 | addr: to_hv_vcpu(vcpu)->stimer_pending_bitmap); |
594 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); |
595 | if (vcpu_kick) |
596 | kvm_vcpu_kick(vcpu); |
597 | } |
598 | |
599 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
600 | { |
601 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
602 | |
603 | trace_kvm_hv_stimer_cleanup(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
604 | timer_index: stimer->index); |
605 | |
606 | hrtimer_cancel(timer: &stimer->timer); |
607 | clear_bit(nr: stimer->index, |
608 | addr: to_hv_vcpu(vcpu)->stimer_pending_bitmap); |
609 | stimer->msg_pending = false; |
610 | stimer->exp_time = 0; |
611 | } |
612 | |
613 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) |
614 | { |
615 | struct kvm_vcpu_hv_stimer *stimer; |
616 | |
617 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); |
618 | trace_kvm_hv_stimer_callback(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
619 | timer_index: stimer->index); |
620 | stimer_mark_pending(stimer, vcpu_kick: true); |
621 | |
622 | return HRTIMER_NORESTART; |
623 | } |
624 | |
625 | /* |
626 | * stimer_start() assumptions: |
627 | * a) stimer->count is not equal to 0 |
628 | * b) stimer->config has HV_STIMER_ENABLE flag |
629 | */ |
630 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
631 | { |
632 | u64 time_now; |
633 | ktime_t ktime_now; |
634 | |
635 | time_now = get_time_ref_counter(kvm: hv_stimer_to_vcpu(stimer)->kvm); |
636 | ktime_now = ktime_get(); |
637 | |
638 | if (stimer->config.periodic) { |
639 | if (stimer->exp_time) { |
640 | if (time_now >= stimer->exp_time) { |
641 | u64 remainder; |
642 | |
643 | div64_u64_rem(dividend: time_now - stimer->exp_time, |
644 | divisor: stimer->count, remainder: &remainder); |
645 | stimer->exp_time = |
646 | time_now + (stimer->count - remainder); |
647 | } |
648 | } else |
649 | stimer->exp_time = time_now + stimer->count; |
650 | |
651 | trace_kvm_hv_stimer_start_periodic( |
652 | vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
653 | timer_index: stimer->index, |
654 | time_now, exp_time: stimer->exp_time); |
655 | |
656 | hrtimer_start(timer: &stimer->timer, |
657 | ktime_add_ns(ktime_now, |
658 | 100 * (stimer->exp_time - time_now)), |
659 | mode: HRTIMER_MODE_ABS); |
660 | return 0; |
661 | } |
662 | stimer->exp_time = stimer->count; |
663 | if (time_now >= stimer->count) { |
664 | /* |
665 | * Expire timer according to Hypervisor Top-Level Functional |
666 | * specification v4(15.3.1): |
667 | * "If a one shot is enabled and the specified count is in |
668 | * the past, it will expire immediately." |
669 | */ |
670 | stimer_mark_pending(stimer, vcpu_kick: false); |
671 | return 0; |
672 | } |
673 | |
674 | trace_kvm_hv_stimer_start_one_shot(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
675 | timer_index: stimer->index, |
676 | time_now, count: stimer->count); |
677 | |
678 | hrtimer_start(timer: &stimer->timer, |
679 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), |
680 | mode: HRTIMER_MODE_ABS); |
681 | return 0; |
682 | } |
683 | |
684 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, |
685 | bool host) |
686 | { |
687 | union hv_stimer_config new_config = {.as_uint64 = config}, |
688 | old_config = {.as_uint64 = stimer->config.as_uint64}; |
689 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
690 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
691 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
692 | |
693 | if (!synic->active && (!host || config)) |
694 | return 1; |
695 | |
696 | if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && |
697 | !(hv_vcpu->cpuid_cache.features_edx & |
698 | HV_STIMER_DIRECT_MODE_AVAILABLE))) |
699 | return 1; |
700 | |
701 | trace_kvm_hv_stimer_set_config(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
702 | timer_index: stimer->index, config, host); |
703 | |
704 | stimer_cleanup(stimer); |
705 | if (old_config.enable && |
706 | !new_config.direct_mode && new_config.sintx == 0) |
707 | new_config.enable = 0; |
708 | stimer->config.as_uint64 = new_config.as_uint64; |
709 | |
710 | if (stimer->config.enable) |
711 | stimer_mark_pending(stimer, vcpu_kick: false); |
712 | |
713 | return 0; |
714 | } |
715 | |
716 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, |
717 | bool host) |
718 | { |
719 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
720 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
721 | |
722 | if (!synic->active && (!host || count)) |
723 | return 1; |
724 | |
725 | trace_kvm_hv_stimer_set_count(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
726 | timer_index: stimer->index, count, host); |
727 | |
728 | stimer_cleanup(stimer); |
729 | stimer->count = count; |
730 | if (!host) { |
731 | if (stimer->count == 0) |
732 | stimer->config.enable = 0; |
733 | else if (stimer->config.auto_enable) |
734 | stimer->config.enable = 1; |
735 | } |
736 | |
737 | if (stimer->config.enable) |
738 | stimer_mark_pending(stimer, vcpu_kick: false); |
739 | |
740 | return 0; |
741 | } |
742 | |
743 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) |
744 | { |
745 | *pconfig = stimer->config.as_uint64; |
746 | return 0; |
747 | } |
748 | |
749 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) |
750 | { |
751 | *pcount = stimer->count; |
752 | return 0; |
753 | } |
754 | |
755 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, |
756 | struct hv_message *src_msg, bool no_retry) |
757 | { |
758 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
759 | int msg_off = offsetof(struct hv_message_page, sint_message[sint]); |
760 | gfn_t msg_page_gfn; |
761 | struct hv_message_header hv_hdr; |
762 | int r; |
763 | |
764 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) |
765 | return -ENOENT; |
766 | |
767 | msg_page_gfn = synic->msg_page >> PAGE_SHIFT; |
768 | |
769 | /* |
770 | * Strictly following the spec-mandated ordering would assume setting |
771 | * .msg_pending before checking .message_type. However, this function |
772 | * is only called in vcpu context so the entire update is atomic from |
773 | * guest POV and thus the exact order here doesn't matter. |
774 | */ |
775 | r = kvm_vcpu_read_guest_page(vcpu, gfn: msg_page_gfn, data: &hv_hdr.message_type, |
776 | offset: msg_off + offsetof(struct hv_message, |
777 | header.message_type), |
778 | len: sizeof(hv_hdr.message_type)); |
779 | if (r < 0) |
780 | return r; |
781 | |
782 | if (hv_hdr.message_type != HVMSG_NONE) { |
783 | if (no_retry) |
784 | return 0; |
785 | |
786 | hv_hdr.message_flags.msg_pending = 1; |
787 | r = kvm_vcpu_write_guest_page(vcpu, gfn: msg_page_gfn, |
788 | data: &hv_hdr.message_flags, |
789 | offset: msg_off + |
790 | offsetof(struct hv_message, |
791 | header.message_flags), |
792 | len: sizeof(hv_hdr.message_flags)); |
793 | if (r < 0) |
794 | return r; |
795 | return -EAGAIN; |
796 | } |
797 | |
798 | r = kvm_vcpu_write_guest_page(vcpu, gfn: msg_page_gfn, data: src_msg, offset: msg_off, |
799 | len: sizeof(src_msg->header) + |
800 | src_msg->header.payload_size); |
801 | if (r < 0) |
802 | return r; |
803 | |
804 | r = synic_set_irq(synic, sint); |
805 | if (r < 0) |
806 | return r; |
807 | if (r == 0) |
808 | return -EFAULT; |
809 | return 0; |
810 | } |
811 | |
812 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
813 | { |
814 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
815 | struct hv_message *msg = &stimer->msg; |
816 | struct hv_timer_message_payload *payload = |
817 | (struct hv_timer_message_payload *)&msg->u.payload; |
818 | |
819 | /* |
820 | * To avoid piling up periodic ticks, don't retry message |
821 | * delivery for them (within "lazy" lost ticks policy). |
822 | */ |
823 | bool no_retry = stimer->config.periodic; |
824 | |
825 | payload->expiration_time = stimer->exp_time; |
826 | payload->delivery_time = get_time_ref_counter(kvm: vcpu->kvm); |
827 | return synic_deliver_msg(synic: to_hv_synic(vcpu), |
828 | sint: stimer->config.sintx, src_msg: msg, |
829 | no_retry); |
830 | } |
831 | |
832 | static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) |
833 | { |
834 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
835 | struct kvm_lapic_irq irq = { |
836 | .delivery_mode = APIC_DM_FIXED, |
837 | .vector = stimer->config.apic_vector |
838 | }; |
839 | |
840 | if (lapic_in_kernel(vcpu)) |
841 | return !kvm_apic_set_irq(vcpu, irq: &irq, NULL); |
842 | return 0; |
843 | } |
844 | |
845 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) |
846 | { |
847 | int r, direct = stimer->config.direct_mode; |
848 | |
849 | stimer->msg_pending = true; |
850 | if (!direct) |
851 | r = stimer_send_msg(stimer); |
852 | else |
853 | r = stimer_notify_direct(stimer); |
854 | trace_kvm_hv_stimer_expiration(vcpu_id: hv_stimer_to_vcpu(stimer)->vcpu_id, |
855 | timer_index: stimer->index, direct, msg_send_result: r); |
856 | if (!r) { |
857 | stimer->msg_pending = false; |
858 | if (!(stimer->config.periodic)) |
859 | stimer->config.enable = 0; |
860 | } |
861 | } |
862 | |
863 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) |
864 | { |
865 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
866 | struct kvm_vcpu_hv_stimer *stimer; |
867 | u64 time_now, exp_time; |
868 | int i; |
869 | |
870 | if (!hv_vcpu) |
871 | return; |
872 | |
873 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) |
874 | if (test_and_clear_bit(nr: i, addr: hv_vcpu->stimer_pending_bitmap)) { |
875 | stimer = &hv_vcpu->stimer[i]; |
876 | if (stimer->config.enable) { |
877 | exp_time = stimer->exp_time; |
878 | |
879 | if (exp_time) { |
880 | time_now = |
881 | get_time_ref_counter(kvm: vcpu->kvm); |
882 | if (time_now >= exp_time) |
883 | stimer_expiration(stimer); |
884 | } |
885 | |
886 | if ((stimer->config.enable) && |
887 | stimer->count) { |
888 | if (!stimer->msg_pending) |
889 | stimer_start(stimer); |
890 | } else |
891 | stimer_cleanup(stimer); |
892 | } |
893 | } |
894 | } |
895 | |
896 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) |
897 | { |
898 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
899 | int i; |
900 | |
901 | if (!hv_vcpu) |
902 | return; |
903 | |
904 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) |
905 | stimer_cleanup(stimer: &hv_vcpu->stimer[i]); |
906 | |
907 | kfree(objp: hv_vcpu); |
908 | vcpu->arch.hyperv = NULL; |
909 | } |
910 | |
911 | bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) |
912 | { |
913 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
914 | |
915 | if (!hv_vcpu) |
916 | return false; |
917 | |
918 | if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) |
919 | return false; |
920 | return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; |
921 | } |
922 | EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); |
923 | |
924 | int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) |
925 | { |
926 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
927 | |
928 | if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu)) |
929 | return -EFAULT; |
930 | |
931 | return kvm_read_guest_cached(kvm: vcpu->kvm, ghc: &vcpu->arch.pv_eoi.data, |
932 | data: &hv_vcpu->vp_assist_page, len: sizeof(struct hv_vp_assist_page)); |
933 | } |
934 | EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); |
935 | |
936 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) |
937 | { |
938 | struct hv_message *msg = &stimer->msg; |
939 | struct hv_timer_message_payload *payload = |
940 | (struct hv_timer_message_payload *)&msg->u.payload; |
941 | |
942 | memset(&msg->header, 0, sizeof(msg->header)); |
943 | msg->header.message_type = HVMSG_TIMER_EXPIRED; |
944 | msg->header.payload_size = sizeof(*payload); |
945 | |
946 | payload->timer_index = stimer->index; |
947 | payload->expiration_time = 0; |
948 | payload->delivery_time = 0; |
949 | } |
950 | |
951 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) |
952 | { |
953 | memset(stimer, 0, sizeof(*stimer)); |
954 | stimer->index = timer_index; |
955 | hrtimer_init(timer: &stimer->timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_ABS); |
956 | stimer->timer.function = stimer_timer_callback; |
957 | stimer_prepare_msg(stimer); |
958 | } |
959 | |
960 | int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
961 | { |
962 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
963 | int i; |
964 | |
965 | if (hv_vcpu) |
966 | return 0; |
967 | |
968 | hv_vcpu = kzalloc(size: sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); |
969 | if (!hv_vcpu) |
970 | return -ENOMEM; |
971 | |
972 | vcpu->arch.hyperv = hv_vcpu; |
973 | hv_vcpu->vcpu = vcpu; |
974 | |
975 | synic_init(synic: &hv_vcpu->synic); |
976 | |
977 | bitmap_zero(dst: hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); |
978 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) |
979 | stimer_init(stimer: &hv_vcpu->stimer[i], timer_index: i); |
980 | |
981 | hv_vcpu->vp_index = vcpu->vcpu_idx; |
982 | |
983 | for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) { |
984 | INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries); |
985 | spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock); |
986 | } |
987 | |
988 | return 0; |
989 | } |
990 | |
991 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) |
992 | { |
993 | struct kvm_vcpu_hv_synic *synic; |
994 | int r; |
995 | |
996 | r = kvm_hv_vcpu_init(vcpu); |
997 | if (r) |
998 | return r; |
999 | |
1000 | synic = to_hv_synic(vcpu); |
1001 | |
1002 | synic->active = true; |
1003 | synic->dont_zero_synic_pages = dont_zero_synic_pages; |
1004 | synic->control = HV_SYNIC_CONTROL_ENABLE; |
1005 | return 0; |
1006 | } |
1007 | |
1008 | static bool kvm_hv_msr_partition_wide(u32 msr) |
1009 | { |
1010 | bool r = false; |
1011 | |
1012 | switch (msr) { |
1013 | case HV_X64_MSR_GUEST_OS_ID: |
1014 | case HV_X64_MSR_HYPERCALL: |
1015 | case HV_X64_MSR_REFERENCE_TSC: |
1016 | case HV_X64_MSR_TIME_REF_COUNT: |
1017 | case HV_X64_MSR_CRASH_CTL: |
1018 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1019 | case HV_X64_MSR_RESET: |
1020 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1021 | case HV_X64_MSR_TSC_EMULATION_CONTROL: |
1022 | case HV_X64_MSR_TSC_EMULATION_STATUS: |
1023 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1024 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1025 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
1026 | r = true; |
1027 | break; |
1028 | } |
1029 | |
1030 | return r; |
1031 | } |
1032 | |
1033 | static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata) |
1034 | { |
1035 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1036 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
1037 | |
1038 | if (WARN_ON_ONCE(index >= size)) |
1039 | return -EINVAL; |
1040 | |
1041 | *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; |
1042 | return 0; |
1043 | } |
1044 | |
1045 | static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) |
1046 | { |
1047 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1048 | |
1049 | *pdata = hv->hv_crash_ctl; |
1050 | return 0; |
1051 | } |
1052 | |
1053 | static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) |
1054 | { |
1055 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1056 | |
1057 | hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; |
1058 | |
1059 | return 0; |
1060 | } |
1061 | |
1062 | static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) |
1063 | { |
1064 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1065 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
1066 | |
1067 | if (WARN_ON_ONCE(index >= size)) |
1068 | return -EINVAL; |
1069 | |
1070 | hv->hv_crash_param[array_index_nospec(index, size)] = data; |
1071 | return 0; |
1072 | } |
1073 | |
1074 | /* |
1075 | * The kvmclock and Hyper-V TSC page use similar formulas, and converting |
1076 | * between them is possible: |
1077 | * |
1078 | * kvmclock formula: |
1079 | * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) |
1080 | * + system_time |
1081 | * |
1082 | * Hyper-V formula: |
1083 | * nsec/100 = ticks * scale / 2^64 + offset |
1084 | * |
1085 | * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. |
1086 | * By dividing the kvmclock formula by 100 and equating what's left we get: |
1087 | * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 |
1088 | * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 |
1089 | * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 |
1090 | * |
1091 | * Now expand the kvmclock formula and divide by 100: |
1092 | * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) |
1093 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) |
1094 | * + system_time |
1095 | * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 |
1096 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 |
1097 | * + system_time / 100 |
1098 | * |
1099 | * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: |
1100 | * nsec/100 = ticks * scale / 2^64 |
1101 | * - tsc_timestamp * scale / 2^64 |
1102 | * + system_time / 100 |
1103 | * |
1104 | * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: |
1105 | * offset = system_time / 100 - tsc_timestamp * scale / 2^64 |
1106 | * |
1107 | * These two equivalencies are implemented in this function. |
1108 | */ |
1109 | static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, |
1110 | struct ms_hyperv_tsc_page *tsc_ref) |
1111 | { |
1112 | u64 max_mul; |
1113 | |
1114 | if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) |
1115 | return false; |
1116 | |
1117 | /* |
1118 | * check if scale would overflow, if so we use the time ref counter |
1119 | * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 |
1120 | * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) |
1121 | * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) |
1122 | */ |
1123 | max_mul = 100ull << (32 - hv_clock->tsc_shift); |
1124 | if (hv_clock->tsc_to_system_mul >= max_mul) |
1125 | return false; |
1126 | |
1127 | /* |
1128 | * Otherwise compute the scale and offset according to the formulas |
1129 | * derived above. |
1130 | */ |
1131 | tsc_ref->tsc_scale = |
1132 | mul_u64_u32_div(a: 1ULL << (32 + hv_clock->tsc_shift), |
1133 | mul: hv_clock->tsc_to_system_mul, |
1134 | div: 100); |
1135 | |
1136 | tsc_ref->tsc_offset = hv_clock->system_time; |
1137 | do_div(tsc_ref->tsc_offset, 100); |
1138 | tsc_ref->tsc_offset -= |
1139 | mul_u64_u64_shr(a: hv_clock->tsc_timestamp, mul: tsc_ref->tsc_scale, shift: 64); |
1140 | return true; |
1141 | } |
1142 | |
1143 | /* |
1144 | * Don't touch TSC page values if the guest has opted for TSC emulation after |
1145 | * migration. KVM doesn't fully support reenlightenment notifications and TSC |
1146 | * access emulation and Hyper-V is known to expect the values in TSC page to |
1147 | * stay constant before TSC access emulation is disabled from guest side |
1148 | * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC |
1149 | * frequency and guest visible TSC value across migration (and prevent it when |
1150 | * TSC scaling is unsupported). |
1151 | */ |
1152 | static inline bool tsc_page_update_unsafe(struct kvm_hv *hv) |
1153 | { |
1154 | return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && |
1155 | hv->hv_tsc_emulation_control; |
1156 | } |
1157 | |
1158 | void kvm_hv_setup_tsc_page(struct kvm *kvm, |
1159 | struct pvclock_vcpu_time_info *hv_clock) |
1160 | { |
1161 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1162 | u32 tsc_seq; |
1163 | u64 gfn; |
1164 | |
1165 | BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); |
1166 | BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); |
1167 | |
1168 | mutex_lock(&hv->hv_lock); |
1169 | |
1170 | if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || |
1171 | hv->hv_tsc_page_status == HV_TSC_PAGE_SET || |
1172 | hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) |
1173 | goto out_unlock; |
1174 | |
1175 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) |
1176 | goto out_unlock; |
1177 | |
1178 | gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; |
1179 | /* |
1180 | * Because the TSC parameters only vary when there is a |
1181 | * change in the master clock, do not bother with caching. |
1182 | */ |
1183 | if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), |
1184 | &tsc_seq, sizeof(tsc_seq)))) |
1185 | goto out_err; |
1186 | |
1187 | if (tsc_seq && tsc_page_update_unsafe(hv)) { |
1188 | if (kvm_read_guest(kvm, gpa: gfn_to_gpa(gfn), data: &hv->tsc_ref, len: sizeof(hv->tsc_ref))) |
1189 | goto out_err; |
1190 | |
1191 | hv->hv_tsc_page_status = HV_TSC_PAGE_SET; |
1192 | goto out_unlock; |
1193 | } |
1194 | |
1195 | /* |
1196 | * While we're computing and writing the parameters, force the |
1197 | * guest to use the time reference count MSR. |
1198 | */ |
1199 | hv->tsc_ref.tsc_sequence = 0; |
1200 | if (kvm_write_guest(kvm, gpa: gfn_to_gpa(gfn), |
1201 | data: &hv->tsc_ref, len: sizeof(hv->tsc_ref.tsc_sequence))) |
1202 | goto out_err; |
1203 | |
1204 | if (!compute_tsc_page_parameters(hv_clock, tsc_ref: &hv->tsc_ref)) |
1205 | goto out_err; |
1206 | |
1207 | /* Ensure sequence is zero before writing the rest of the struct. */ |
1208 | smp_wmb(); |
1209 | if (kvm_write_guest(kvm, gpa: gfn_to_gpa(gfn), data: &hv->tsc_ref, len: sizeof(hv->tsc_ref))) |
1210 | goto out_err; |
1211 | |
1212 | /* |
1213 | * Now switch to the TSC page mechanism by writing the sequence. |
1214 | */ |
1215 | tsc_seq++; |
1216 | if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) |
1217 | tsc_seq = 1; |
1218 | |
1219 | /* Write the struct entirely before the non-zero sequence. */ |
1220 | smp_wmb(); |
1221 | |
1222 | hv->tsc_ref.tsc_sequence = tsc_seq; |
1223 | if (kvm_write_guest(kvm, gpa: gfn_to_gpa(gfn), |
1224 | data: &hv->tsc_ref, len: sizeof(hv->tsc_ref.tsc_sequence))) |
1225 | goto out_err; |
1226 | |
1227 | hv->hv_tsc_page_status = HV_TSC_PAGE_SET; |
1228 | goto out_unlock; |
1229 | |
1230 | out_err: |
1231 | hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; |
1232 | out_unlock: |
1233 | mutex_unlock(lock: &hv->hv_lock); |
1234 | } |
1235 | |
1236 | void kvm_hv_request_tsc_page_update(struct kvm *kvm) |
1237 | { |
1238 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1239 | |
1240 | mutex_lock(&hv->hv_lock); |
1241 | |
1242 | if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET && |
1243 | !tsc_page_update_unsafe(hv)) |
1244 | hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; |
1245 | |
1246 | mutex_unlock(lock: &hv->hv_lock); |
1247 | } |
1248 | |
1249 | static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) |
1250 | { |
1251 | if (!hv_vcpu->enforce_cpuid) |
1252 | return true; |
1253 | |
1254 | switch (msr) { |
1255 | case HV_X64_MSR_GUEST_OS_ID: |
1256 | case HV_X64_MSR_HYPERCALL: |
1257 | return hv_vcpu->cpuid_cache.features_eax & |
1258 | HV_MSR_HYPERCALL_AVAILABLE; |
1259 | case HV_X64_MSR_VP_RUNTIME: |
1260 | return hv_vcpu->cpuid_cache.features_eax & |
1261 | HV_MSR_VP_RUNTIME_AVAILABLE; |
1262 | case HV_X64_MSR_TIME_REF_COUNT: |
1263 | return hv_vcpu->cpuid_cache.features_eax & |
1264 | HV_MSR_TIME_REF_COUNT_AVAILABLE; |
1265 | case HV_X64_MSR_VP_INDEX: |
1266 | return hv_vcpu->cpuid_cache.features_eax & |
1267 | HV_MSR_VP_INDEX_AVAILABLE; |
1268 | case HV_X64_MSR_RESET: |
1269 | return hv_vcpu->cpuid_cache.features_eax & |
1270 | HV_MSR_RESET_AVAILABLE; |
1271 | case HV_X64_MSR_REFERENCE_TSC: |
1272 | return hv_vcpu->cpuid_cache.features_eax & |
1273 | HV_MSR_REFERENCE_TSC_AVAILABLE; |
1274 | case HV_X64_MSR_SCONTROL: |
1275 | case HV_X64_MSR_SVERSION: |
1276 | case HV_X64_MSR_SIEFP: |
1277 | case HV_X64_MSR_SIMP: |
1278 | case HV_X64_MSR_EOM: |
1279 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
1280 | return hv_vcpu->cpuid_cache.features_eax & |
1281 | HV_MSR_SYNIC_AVAILABLE; |
1282 | case HV_X64_MSR_STIMER0_CONFIG: |
1283 | case HV_X64_MSR_STIMER1_CONFIG: |
1284 | case HV_X64_MSR_STIMER2_CONFIG: |
1285 | case HV_X64_MSR_STIMER3_CONFIG: |
1286 | case HV_X64_MSR_STIMER0_COUNT: |
1287 | case HV_X64_MSR_STIMER1_COUNT: |
1288 | case HV_X64_MSR_STIMER2_COUNT: |
1289 | case HV_X64_MSR_STIMER3_COUNT: |
1290 | return hv_vcpu->cpuid_cache.features_eax & |
1291 | HV_MSR_SYNTIMER_AVAILABLE; |
1292 | case HV_X64_MSR_EOI: |
1293 | case HV_X64_MSR_ICR: |
1294 | case HV_X64_MSR_TPR: |
1295 | case HV_X64_MSR_VP_ASSIST_PAGE: |
1296 | return hv_vcpu->cpuid_cache.features_eax & |
1297 | HV_MSR_APIC_ACCESS_AVAILABLE; |
1298 | case HV_X64_MSR_TSC_FREQUENCY: |
1299 | case HV_X64_MSR_APIC_FREQUENCY: |
1300 | return hv_vcpu->cpuid_cache.features_eax & |
1301 | HV_ACCESS_FREQUENCY_MSRS; |
1302 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1303 | case HV_X64_MSR_TSC_EMULATION_CONTROL: |
1304 | case HV_X64_MSR_TSC_EMULATION_STATUS: |
1305 | return hv_vcpu->cpuid_cache.features_eax & |
1306 | HV_ACCESS_REENLIGHTENMENT; |
1307 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1308 | return hv_vcpu->cpuid_cache.features_eax & |
1309 | HV_ACCESS_TSC_INVARIANT; |
1310 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1311 | case HV_X64_MSR_CRASH_CTL: |
1312 | return hv_vcpu->cpuid_cache.features_edx & |
1313 | HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; |
1314 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1315 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
1316 | return hv_vcpu->cpuid_cache.features_edx & |
1317 | HV_FEATURE_DEBUG_MSRS_AVAILABLE; |
1318 | default: |
1319 | break; |
1320 | } |
1321 | |
1322 | return false; |
1323 | } |
1324 | |
1325 | #define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839 |
1326 | #define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */ |
1327 | |
1328 | /* |
1329 | * Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC |
1330 | * configuration. |
1331 | * Such configuration can result from, for example, AMD Erratum 1386 workaround. |
1332 | * |
1333 | * Print a notice so users aren't left wondering what's suddenly gone wrong. |
1334 | */ |
1335 | static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) |
1336 | { |
1337 | struct kvm *kvm = vcpu->kvm; |
1338 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1339 | |
1340 | /* Check again under the hv_lock. */ |
1341 | if (hv->xsaves_xsavec_checked) |
1342 | return; |
1343 | |
1344 | if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) != |
1345 | KVM_HV_WIN2016_GUEST_ID) |
1346 | return; |
1347 | |
1348 | hv->xsaves_xsavec_checked = true; |
1349 | |
1350 | /* UP configurations aren't affected */ |
1351 | if (atomic_read(v: &kvm->online_vcpus) < 2) |
1352 | return; |
1353 | |
1354 | if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) || |
1355 | !guest_cpuid_has(vcpu, X86_FEATURE_XSAVEC)) |
1356 | return; |
1357 | |
1358 | pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. " |
1359 | "If it fails to boot try disabling XSAVEC in the VM config.\n" ); |
1360 | } |
1361 | |
1362 | void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) |
1363 | { |
1364 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
1365 | |
1366 | if (!vcpu->arch.hyperv_enabled || |
1367 | hv->xsaves_xsavec_checked) |
1368 | return; |
1369 | |
1370 | mutex_lock(&hv->hv_lock); |
1371 | __kvm_hv_xsaves_xsavec_maybe_warn(vcpu); |
1372 | mutex_unlock(lock: &hv->hv_lock); |
1373 | } |
1374 | |
1375 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, |
1376 | bool host) |
1377 | { |
1378 | struct kvm *kvm = vcpu->kvm; |
1379 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1380 | |
1381 | if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) |
1382 | return 1; |
1383 | |
1384 | switch (msr) { |
1385 | case HV_X64_MSR_GUEST_OS_ID: |
1386 | hv->hv_guest_os_id = data; |
1387 | /* setting guest os id to zero disables hypercall page */ |
1388 | if (!hv->hv_guest_os_id) |
1389 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; |
1390 | break; |
1391 | case HV_X64_MSR_HYPERCALL: { |
1392 | u8 instructions[9]; |
1393 | int i = 0; |
1394 | u64 addr; |
1395 | |
1396 | /* if guest os id is not set hypercall should remain disabled */ |
1397 | if (!hv->hv_guest_os_id) |
1398 | break; |
1399 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { |
1400 | hv->hv_hypercall = data; |
1401 | break; |
1402 | } |
1403 | |
1404 | /* |
1405 | * If Xen and Hyper-V hypercalls are both enabled, disambiguate |
1406 | * the same way Xen itself does, by setting the bit 31 of EAX |
1407 | * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just |
1408 | * going to be clobbered on 64-bit. |
1409 | */ |
1410 | if (kvm_xen_hypercall_enabled(kvm)) { |
1411 | /* orl $0x80000000, %eax */ |
1412 | instructions[i++] = 0x0d; |
1413 | instructions[i++] = 0x00; |
1414 | instructions[i++] = 0x00; |
1415 | instructions[i++] = 0x00; |
1416 | instructions[i++] = 0x80; |
1417 | } |
1418 | |
1419 | /* vmcall/vmmcall */ |
1420 | static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i); |
1421 | i += 3; |
1422 | |
1423 | /* ret */ |
1424 | ((unsigned char *)instructions)[i++] = 0xc3; |
1425 | |
1426 | addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; |
1427 | if (kvm_vcpu_write_guest(vcpu, gpa: addr, data: instructions, len: i)) |
1428 | return 1; |
1429 | hv->hv_hypercall = data; |
1430 | break; |
1431 | } |
1432 | case HV_X64_MSR_REFERENCE_TSC: |
1433 | hv->hv_tsc_page = data; |
1434 | if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { |
1435 | if (!host) |
1436 | hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; |
1437 | else |
1438 | hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; |
1439 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); |
1440 | } else { |
1441 | hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; |
1442 | } |
1443 | break; |
1444 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1445 | return kvm_hv_msr_set_crash_data(kvm, |
1446 | index: msr - HV_X64_MSR_CRASH_P0, |
1447 | data); |
1448 | case HV_X64_MSR_CRASH_CTL: |
1449 | if (host) |
1450 | return kvm_hv_msr_set_crash_ctl(kvm, data); |
1451 | |
1452 | if (data & HV_CRASH_CTL_CRASH_NOTIFY) { |
1453 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n" , |
1454 | hv->hv_crash_param[0], |
1455 | hv->hv_crash_param[1], |
1456 | hv->hv_crash_param[2], |
1457 | hv->hv_crash_param[3], |
1458 | hv->hv_crash_param[4]); |
1459 | |
1460 | /* Send notification about crash to user space */ |
1461 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); |
1462 | } |
1463 | break; |
1464 | case HV_X64_MSR_RESET: |
1465 | if (data == 1) { |
1466 | vcpu_debug(vcpu, "hyper-v reset requested\n" ); |
1467 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); |
1468 | } |
1469 | break; |
1470 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1471 | hv->hv_reenlightenment_control = data; |
1472 | break; |
1473 | case HV_X64_MSR_TSC_EMULATION_CONTROL: |
1474 | hv->hv_tsc_emulation_control = data; |
1475 | break; |
1476 | case HV_X64_MSR_TSC_EMULATION_STATUS: |
1477 | if (data && !host) |
1478 | return 1; |
1479 | |
1480 | hv->hv_tsc_emulation_status = data; |
1481 | break; |
1482 | case HV_X64_MSR_TIME_REF_COUNT: |
1483 | /* read-only, but still ignore it if host-initiated */ |
1484 | if (!host) |
1485 | return 1; |
1486 | break; |
1487 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1488 | /* Only bit 0 is supported */ |
1489 | if (data & ~HV_EXPOSE_INVARIANT_TSC) |
1490 | return 1; |
1491 | |
1492 | /* The feature can't be disabled from the guest */ |
1493 | if (!host && hv->hv_invtsc_control && !data) |
1494 | return 1; |
1495 | |
1496 | hv->hv_invtsc_control = data; |
1497 | break; |
1498 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1499 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
1500 | return syndbg_set_msr(vcpu, msr, data, host); |
1501 | default: |
1502 | kvm_pr_unimpl_wrmsr(vcpu, msr, data); |
1503 | return 1; |
1504 | } |
1505 | return 0; |
1506 | } |
1507 | |
1508 | /* Calculate cpu time spent by current task in 100ns units */ |
1509 | static u64 current_task_runtime_100ns(void) |
1510 | { |
1511 | u64 utime, stime; |
1512 | |
1513 | task_cputime_adjusted(current, ut: &utime, st: &stime); |
1514 | |
1515 | return div_u64(dividend: utime + stime, divisor: 100); |
1516 | } |
1517 | |
1518 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
1519 | { |
1520 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1521 | |
1522 | if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) |
1523 | return 1; |
1524 | |
1525 | switch (msr) { |
1526 | case HV_X64_MSR_VP_INDEX: { |
1527 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
1528 | u32 new_vp_index = (u32)data; |
1529 | |
1530 | if (!host || new_vp_index >= KVM_MAX_VCPUS) |
1531 | return 1; |
1532 | |
1533 | if (new_vp_index == hv_vcpu->vp_index) |
1534 | return 0; |
1535 | |
1536 | /* |
1537 | * The VP index is initialized to vcpu_index by |
1538 | * kvm_hv_vcpu_postcreate so they initially match. Now the |
1539 | * VP index is changing, adjust num_mismatched_vp_indexes if |
1540 | * it now matches or no longer matches vcpu_idx. |
1541 | */ |
1542 | if (hv_vcpu->vp_index == vcpu->vcpu_idx) |
1543 | atomic_inc(v: &hv->num_mismatched_vp_indexes); |
1544 | else if (new_vp_index == vcpu->vcpu_idx) |
1545 | atomic_dec(v: &hv->num_mismatched_vp_indexes); |
1546 | |
1547 | hv_vcpu->vp_index = new_vp_index; |
1548 | break; |
1549 | } |
1550 | case HV_X64_MSR_VP_ASSIST_PAGE: { |
1551 | u64 gfn; |
1552 | unsigned long addr; |
1553 | |
1554 | if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { |
1555 | hv_vcpu->hv_vapic = data; |
1556 | if (kvm_lapic_set_pv_eoi(vcpu, data: 0, len: 0)) |
1557 | return 1; |
1558 | break; |
1559 | } |
1560 | gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; |
1561 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); |
1562 | if (kvm_is_error_hva(addr)) |
1563 | return 1; |
1564 | |
1565 | /* |
1566 | * Clear apic_assist portion of struct hv_vp_assist_page |
1567 | * only, there can be valuable data in the rest which needs |
1568 | * to be preserved e.g. on migration. |
1569 | */ |
1570 | if (__put_user(0, (u32 __user *)addr)) |
1571 | return 1; |
1572 | hv_vcpu->hv_vapic = data; |
1573 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
1574 | if (kvm_lapic_set_pv_eoi(vcpu, |
1575 | data: gfn_to_gpa(gfn) | KVM_MSR_ENABLED, |
1576 | len: sizeof(struct hv_vp_assist_page))) |
1577 | return 1; |
1578 | break; |
1579 | } |
1580 | case HV_X64_MSR_EOI: |
1581 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); |
1582 | case HV_X64_MSR_ICR: |
1583 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); |
1584 | case HV_X64_MSR_TPR: |
1585 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); |
1586 | case HV_X64_MSR_VP_RUNTIME: |
1587 | if (!host) |
1588 | return 1; |
1589 | hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); |
1590 | break; |
1591 | case HV_X64_MSR_SCONTROL: |
1592 | case HV_X64_MSR_SVERSION: |
1593 | case HV_X64_MSR_SIEFP: |
1594 | case HV_X64_MSR_SIMP: |
1595 | case HV_X64_MSR_EOM: |
1596 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
1597 | return synic_set_msr(synic: to_hv_synic(vcpu), msr, data, host); |
1598 | case HV_X64_MSR_STIMER0_CONFIG: |
1599 | case HV_X64_MSR_STIMER1_CONFIG: |
1600 | case HV_X64_MSR_STIMER2_CONFIG: |
1601 | case HV_X64_MSR_STIMER3_CONFIG: { |
1602 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; |
1603 | |
1604 | return stimer_set_config(stimer: to_hv_stimer(vcpu, timer_index), |
1605 | config: data, host); |
1606 | } |
1607 | case HV_X64_MSR_STIMER0_COUNT: |
1608 | case HV_X64_MSR_STIMER1_COUNT: |
1609 | case HV_X64_MSR_STIMER2_COUNT: |
1610 | case HV_X64_MSR_STIMER3_COUNT: { |
1611 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; |
1612 | |
1613 | return stimer_set_count(stimer: to_hv_stimer(vcpu, timer_index), |
1614 | count: data, host); |
1615 | } |
1616 | case HV_X64_MSR_TSC_FREQUENCY: |
1617 | case HV_X64_MSR_APIC_FREQUENCY: |
1618 | /* read-only, but still ignore it if host-initiated */ |
1619 | if (!host) |
1620 | return 1; |
1621 | break; |
1622 | default: |
1623 | kvm_pr_unimpl_wrmsr(vcpu, msr, data); |
1624 | return 1; |
1625 | } |
1626 | |
1627 | return 0; |
1628 | } |
1629 | |
1630 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1631 | bool host) |
1632 | { |
1633 | u64 data = 0; |
1634 | struct kvm *kvm = vcpu->kvm; |
1635 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1636 | |
1637 | if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) |
1638 | return 1; |
1639 | |
1640 | switch (msr) { |
1641 | case HV_X64_MSR_GUEST_OS_ID: |
1642 | data = hv->hv_guest_os_id; |
1643 | break; |
1644 | case HV_X64_MSR_HYPERCALL: |
1645 | data = hv->hv_hypercall; |
1646 | break; |
1647 | case HV_X64_MSR_TIME_REF_COUNT: |
1648 | data = get_time_ref_counter(kvm); |
1649 | break; |
1650 | case HV_X64_MSR_REFERENCE_TSC: |
1651 | data = hv->hv_tsc_page; |
1652 | break; |
1653 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1654 | return kvm_hv_msr_get_crash_data(kvm, |
1655 | index: msr - HV_X64_MSR_CRASH_P0, |
1656 | pdata); |
1657 | case HV_X64_MSR_CRASH_CTL: |
1658 | return kvm_hv_msr_get_crash_ctl(kvm, pdata); |
1659 | case HV_X64_MSR_RESET: |
1660 | data = 0; |
1661 | break; |
1662 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1663 | data = hv->hv_reenlightenment_control; |
1664 | break; |
1665 | case HV_X64_MSR_TSC_EMULATION_CONTROL: |
1666 | data = hv->hv_tsc_emulation_control; |
1667 | break; |
1668 | case HV_X64_MSR_TSC_EMULATION_STATUS: |
1669 | data = hv->hv_tsc_emulation_status; |
1670 | break; |
1671 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1672 | data = hv->hv_invtsc_control; |
1673 | break; |
1674 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1675 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: |
1676 | return syndbg_get_msr(vcpu, msr, pdata, host); |
1677 | default: |
1678 | kvm_pr_unimpl_rdmsr(vcpu, msr); |
1679 | return 1; |
1680 | } |
1681 | |
1682 | *pdata = data; |
1683 | return 0; |
1684 | } |
1685 | |
1686 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1687 | bool host) |
1688 | { |
1689 | u64 data = 0; |
1690 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1691 | |
1692 | if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) |
1693 | return 1; |
1694 | |
1695 | switch (msr) { |
1696 | case HV_X64_MSR_VP_INDEX: |
1697 | data = hv_vcpu->vp_index; |
1698 | break; |
1699 | case HV_X64_MSR_EOI: |
1700 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, data: pdata); |
1701 | case HV_X64_MSR_ICR: |
1702 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, data: pdata); |
1703 | case HV_X64_MSR_TPR: |
1704 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, data: pdata); |
1705 | case HV_X64_MSR_VP_ASSIST_PAGE: |
1706 | data = hv_vcpu->hv_vapic; |
1707 | break; |
1708 | case HV_X64_MSR_VP_RUNTIME: |
1709 | data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; |
1710 | break; |
1711 | case HV_X64_MSR_SCONTROL: |
1712 | case HV_X64_MSR_SVERSION: |
1713 | case HV_X64_MSR_SIEFP: |
1714 | case HV_X64_MSR_SIMP: |
1715 | case HV_X64_MSR_EOM: |
1716 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
1717 | return synic_get_msr(synic: to_hv_synic(vcpu), msr, pdata, host); |
1718 | case HV_X64_MSR_STIMER0_CONFIG: |
1719 | case HV_X64_MSR_STIMER1_CONFIG: |
1720 | case HV_X64_MSR_STIMER2_CONFIG: |
1721 | case HV_X64_MSR_STIMER3_CONFIG: { |
1722 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; |
1723 | |
1724 | return stimer_get_config(stimer: to_hv_stimer(vcpu, timer_index), |
1725 | pconfig: pdata); |
1726 | } |
1727 | case HV_X64_MSR_STIMER0_COUNT: |
1728 | case HV_X64_MSR_STIMER1_COUNT: |
1729 | case HV_X64_MSR_STIMER2_COUNT: |
1730 | case HV_X64_MSR_STIMER3_COUNT: { |
1731 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; |
1732 | |
1733 | return stimer_get_count(stimer: to_hv_stimer(vcpu, timer_index), |
1734 | pcount: pdata); |
1735 | } |
1736 | case HV_X64_MSR_TSC_FREQUENCY: |
1737 | data = (u64)vcpu->arch.virtual_tsc_khz * 1000; |
1738 | break; |
1739 | case HV_X64_MSR_APIC_FREQUENCY: |
1740 | data = APIC_BUS_FREQUENCY; |
1741 | break; |
1742 | default: |
1743 | kvm_pr_unimpl_rdmsr(vcpu, msr); |
1744 | return 1; |
1745 | } |
1746 | *pdata = data; |
1747 | return 0; |
1748 | } |
1749 | |
1750 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
1751 | { |
1752 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
1753 | |
1754 | if (!host && !vcpu->arch.hyperv_enabled) |
1755 | return 1; |
1756 | |
1757 | if (kvm_hv_vcpu_init(vcpu)) |
1758 | return 1; |
1759 | |
1760 | if (kvm_hv_msr_partition_wide(msr)) { |
1761 | int r; |
1762 | |
1763 | mutex_lock(&hv->hv_lock); |
1764 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
1765 | mutex_unlock(lock: &hv->hv_lock); |
1766 | return r; |
1767 | } else |
1768 | return kvm_hv_set_msr(vcpu, msr, data, host); |
1769 | } |
1770 | |
1771 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) |
1772 | { |
1773 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
1774 | |
1775 | if (!host && !vcpu->arch.hyperv_enabled) |
1776 | return 1; |
1777 | |
1778 | if (kvm_hv_vcpu_init(vcpu)) |
1779 | return 1; |
1780 | |
1781 | if (kvm_hv_msr_partition_wide(msr)) { |
1782 | int r; |
1783 | |
1784 | mutex_lock(&hv->hv_lock); |
1785 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host); |
1786 | mutex_unlock(lock: &hv->hv_lock); |
1787 | return r; |
1788 | } else |
1789 | return kvm_hv_get_msr(vcpu, msr, pdata, host); |
1790 | } |
1791 | |
1792 | static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks, |
1793 | u64 valid_bank_mask, unsigned long *vcpu_mask) |
1794 | { |
1795 | struct kvm_hv *hv = to_kvm_hv(kvm); |
1796 | bool has_mismatch = atomic_read(v: &hv->num_mismatched_vp_indexes); |
1797 | u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; |
1798 | struct kvm_vcpu *vcpu; |
1799 | int bank, sbank = 0; |
1800 | unsigned long i; |
1801 | u64 *bitmap; |
1802 | |
1803 | BUILD_BUG_ON(sizeof(vp_bitmap) > |
1804 | sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS)); |
1805 | |
1806 | /* |
1807 | * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else |
1808 | * fill a temporary buffer and manually test each vCPU's VP index. |
1809 | */ |
1810 | if (likely(!has_mismatch)) |
1811 | bitmap = (u64 *)vcpu_mask; |
1812 | else |
1813 | bitmap = vp_bitmap; |
1814 | |
1815 | /* |
1816 | * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask |
1817 | * having a '1' for each bank that exists in sparse_banks. Sets must |
1818 | * be in ascending order, i.e. bank0..bankN. |
1819 | */ |
1820 | memset(bitmap, 0, sizeof(vp_bitmap)); |
1821 | for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, |
1822 | KVM_HV_MAX_SPARSE_VCPU_SET_BITS) |
1823 | bitmap[bank] = sparse_banks[sbank++]; |
1824 | |
1825 | if (likely(!has_mismatch)) |
1826 | return; |
1827 | |
1828 | bitmap_zero(dst: vcpu_mask, KVM_MAX_VCPUS); |
1829 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1830 | if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) |
1831 | __set_bit(i, vcpu_mask); |
1832 | } |
1833 | } |
1834 | |
1835 | static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[]) |
1836 | { |
1837 | int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK; |
1838 | unsigned long sbank; |
1839 | |
1840 | if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask)) |
1841 | return false; |
1842 | |
1843 | /* |
1844 | * The index into the sparse bank is the number of preceding bits in |
1845 | * the valid mask. Optimize for VMs with <64 vCPUs by skipping the |
1846 | * fancy math if there can't possibly be preceding bits. |
1847 | */ |
1848 | if (valid_bit_nr) |
1849 | sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0)); |
1850 | else |
1851 | sbank = 0; |
1852 | |
1853 | return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK, |
1854 | (unsigned long *)&sparse_banks[sbank]); |
1855 | } |
1856 | |
1857 | struct kvm_hv_hcall { |
1858 | /* Hypercall input data */ |
1859 | u64 param; |
1860 | u64 ingpa; |
1861 | u64 outgpa; |
1862 | u16 code; |
1863 | u16 var_cnt; |
1864 | u16 rep_cnt; |
1865 | u16 rep_idx; |
1866 | bool fast; |
1867 | bool rep; |
1868 | sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; |
1869 | |
1870 | /* |
1871 | * Current read offset when KVM reads hypercall input data gradually, |
1872 | * either offset in bytes from 'ingpa' for regular hypercalls or the |
1873 | * number of already consumed 'XMM halves' for 'fast' hypercalls. |
1874 | */ |
1875 | union { |
1876 | gpa_t data_offset; |
1877 | int consumed_xmm_halves; |
1878 | }; |
1879 | }; |
1880 | |
1881 | |
1882 | static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, |
1883 | u16 orig_cnt, u16 cnt_cap, u64 *data) |
1884 | { |
1885 | /* |
1886 | * Preserve the original count when ignoring entries via a "cap", KVM |
1887 | * still needs to validate the guest input (though the non-XMM path |
1888 | * punts on the checks). |
1889 | */ |
1890 | u16 cnt = min(orig_cnt, cnt_cap); |
1891 | int i, j; |
1892 | |
1893 | if (hc->fast) { |
1894 | /* |
1895 | * Each XMM holds two sparse banks, but do not count halves that |
1896 | * have already been consumed for hypercall parameters. |
1897 | */ |
1898 | if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves) |
1899 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
1900 | |
1901 | for (i = 0; i < cnt; i++) { |
1902 | j = i + hc->consumed_xmm_halves; |
1903 | if (j % 2) |
1904 | data[i] = sse128_hi(hc->xmm[j / 2]); |
1905 | else |
1906 | data[i] = sse128_lo(hc->xmm[j / 2]); |
1907 | } |
1908 | return 0; |
1909 | } |
1910 | |
1911 | return kvm_read_guest(kvm, gpa: hc->ingpa + hc->data_offset, data, |
1912 | len: cnt * sizeof(*data)); |
1913 | } |
1914 | |
1915 | static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, |
1916 | u64 *sparse_banks) |
1917 | { |
1918 | if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS) |
1919 | return -EINVAL; |
1920 | |
1921 | /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */ |
1922 | return kvm_hv_get_hc_data(kvm, hc, orig_cnt: hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS, |
1923 | data: sparse_banks); |
1924 | } |
1925 | |
1926 | static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[]) |
1927 | { |
1928 | return kvm_hv_get_hc_data(kvm, hc, orig_cnt: hc->rep_cnt, cnt_cap: hc->rep_cnt, data: entries); |
1929 | } |
1930 | |
1931 | static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, |
1932 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo, |
1933 | u64 *entries, int count) |
1934 | { |
1935 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1936 | u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY; |
1937 | |
1938 | if (!hv_vcpu) |
1939 | return; |
1940 | |
1941 | spin_lock(lock: &tlb_flush_fifo->write_lock); |
1942 | |
1943 | /* |
1944 | * All entries should fit on the fifo leaving one free for 'flush all' |
1945 | * entry in case another request comes in. In case there's not enough |
1946 | * space, just put 'flush all' entry there. |
1947 | */ |
1948 | if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) { |
1949 | WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count); |
1950 | goto out_unlock; |
1951 | } |
1952 | |
1953 | /* |
1954 | * Note: full fifo always contains 'flush all' entry, no need to check the |
1955 | * return value. |
1956 | */ |
1957 | kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1); |
1958 | |
1959 | out_unlock: |
1960 | spin_unlock(lock: &tlb_flush_fifo->write_lock); |
1961 | } |
1962 | |
1963 | int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) |
1964 | { |
1965 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; |
1966 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1967 | u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE]; |
1968 | int i, j, count; |
1969 | gva_t gva; |
1970 | |
1971 | if (!tdp_enabled || !hv_vcpu) |
1972 | return -EINVAL; |
1973 | |
1974 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode: is_guest_mode(vcpu)); |
1975 | |
1976 | count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE); |
1977 | |
1978 | for (i = 0; i < count; i++) { |
1979 | if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) |
1980 | goto out_flush_all; |
1981 | |
1982 | /* |
1983 | * Lower 12 bits of 'address' encode the number of additional |
1984 | * pages to flush. |
1985 | */ |
1986 | gva = entries[i] & PAGE_MASK; |
1987 | for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) |
1988 | static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); |
1989 | |
1990 | ++vcpu->stat.tlb_flush; |
1991 | } |
1992 | return 0; |
1993 | |
1994 | out_flush_all: |
1995 | kfifo_reset_out(&tlb_flush_fifo->entries); |
1996 | |
1997 | /* Fall back to full flush. */ |
1998 | return -ENOSPC; |
1999 | } |
2000 | |
2001 | static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
2002 | { |
2003 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
2004 | u64 *sparse_banks = hv_vcpu->sparse_banks; |
2005 | struct kvm *kvm = vcpu->kvm; |
2006 | struct hv_tlb_flush_ex flush_ex; |
2007 | struct hv_tlb_flush flush; |
2008 | DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); |
2009 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; |
2010 | /* |
2011 | * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE' |
2012 | * entries on the TLB flush fifo. The last entry, however, needs to be |
2013 | * always left free for 'flush all' entry which gets placed when |
2014 | * there is not enough space to put all the requested entries. |
2015 | */ |
2016 | u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1]; |
2017 | u64 *tlb_flush_entries; |
2018 | u64 valid_bank_mask; |
2019 | struct kvm_vcpu *v; |
2020 | unsigned long i; |
2021 | bool all_cpus; |
2022 | |
2023 | /* |
2024 | * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS |
2025 | * sparse banks. Fail the build if KVM's max allowed number of |
2026 | * vCPUs (>4096) exceeds this limit. |
2027 | */ |
2028 | BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS); |
2029 | |
2030 | /* |
2031 | * 'Slow' hypercall's first parameter is the address in guest's memory |
2032 | * where hypercall parameters are placed. This is either a GPA or a |
2033 | * nested GPA when KVM is handling the call from L2 ('direct' TLB |
2034 | * flush). Translate the address here so the memory can be uniformly |
2035 | * read with kvm_read_guest(). |
2036 | */ |
2037 | if (!hc->fast && is_guest_mode(vcpu)) { |
2038 | hc->ingpa = translate_nested_gpa(vcpu, gpa: hc->ingpa, access: 0, NULL); |
2039 | if (unlikely(hc->ingpa == INVALID_GPA)) |
2040 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2041 | } |
2042 | |
2043 | if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || |
2044 | hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) { |
2045 | if (hc->fast) { |
2046 | flush.address_space = hc->ingpa; |
2047 | flush.flags = hc->outgpa; |
2048 | flush.processor_mask = sse128_lo(hc->xmm[0]); |
2049 | hc->consumed_xmm_halves = 1; |
2050 | } else { |
2051 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, |
2052 | &flush, sizeof(flush)))) |
2053 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2054 | hc->data_offset = sizeof(flush); |
2055 | } |
2056 | |
2057 | trace_kvm_hv_flush_tlb(processor_mask: flush.processor_mask, |
2058 | address_space: flush.address_space, flags: flush.flags, |
2059 | guest_mode: is_guest_mode(vcpu)); |
2060 | |
2061 | valid_bank_mask = BIT_ULL(0); |
2062 | sparse_banks[0] = flush.processor_mask; |
2063 | |
2064 | /* |
2065 | * Work around possible WS2012 bug: it sends hypercalls |
2066 | * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, |
2067 | * while also expecting us to flush something and crashing if |
2068 | * we don't. Let's treat processor_mask == 0 same as |
2069 | * HV_FLUSH_ALL_PROCESSORS. |
2070 | */ |
2071 | all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || |
2072 | flush.processor_mask == 0; |
2073 | } else { |
2074 | if (hc->fast) { |
2075 | flush_ex.address_space = hc->ingpa; |
2076 | flush_ex.flags = hc->outgpa; |
2077 | memcpy(&flush_ex.hv_vp_set, |
2078 | &hc->xmm[0], sizeof(hc->xmm[0])); |
2079 | hc->consumed_xmm_halves = 2; |
2080 | } else { |
2081 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, |
2082 | sizeof(flush_ex)))) |
2083 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2084 | hc->data_offset = sizeof(flush_ex); |
2085 | } |
2086 | |
2087 | trace_kvm_hv_flush_tlb_ex(valid_bank_mask: flush_ex.hv_vp_set.valid_bank_mask, |
2088 | format: flush_ex.hv_vp_set.format, |
2089 | address_space: flush_ex.address_space, |
2090 | flags: flush_ex.flags, guest_mode: is_guest_mode(vcpu)); |
2091 | |
2092 | valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; |
2093 | all_cpus = flush_ex.hv_vp_set.format != |
2094 | HV_GENERIC_SET_SPARSE_4K; |
2095 | |
2096 | if (hc->var_cnt != hweight64(valid_bank_mask)) |
2097 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2098 | |
2099 | if (!all_cpus) { |
2100 | if (!hc->var_cnt) |
2101 | goto ret_success; |
2102 | |
2103 | if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) |
2104 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2105 | } |
2106 | |
2107 | /* |
2108 | * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU |
2109 | * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs' |
2110 | * case (HV_GENERIC_SET_ALL). Always adjust data_offset and |
2111 | * consumed_xmm_halves to make sure TLB flush entries are read |
2112 | * from the correct offset. |
2113 | */ |
2114 | if (hc->fast) |
2115 | hc->consumed_xmm_halves += hc->var_cnt; |
2116 | else |
2117 | hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]); |
2118 | } |
2119 | |
2120 | if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || |
2121 | hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || |
2122 | hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) { |
2123 | tlb_flush_entries = NULL; |
2124 | } else { |
2125 | if (kvm_hv_get_tlb_flush_entries(kvm, hc, entries: __tlb_flush_entries)) |
2126 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2127 | tlb_flush_entries = __tlb_flush_entries; |
2128 | } |
2129 | |
2130 | /* |
2131 | * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't |
2132 | * analyze it here, flush TLB regardless of the specified address space. |
2133 | */ |
2134 | if (all_cpus && !is_guest_mode(vcpu)) { |
2135 | kvm_for_each_vcpu(i, v, kvm) { |
2136 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu: v, is_guest_mode: false); |
2137 | hv_tlb_flush_enqueue(vcpu: v, tlb_flush_fifo, |
2138 | entries: tlb_flush_entries, count: hc->rep_cnt); |
2139 | } |
2140 | |
2141 | kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH); |
2142 | } else if (!is_guest_mode(vcpu)) { |
2143 | sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask); |
2144 | |
2145 | for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) { |
2146 | v = kvm_get_vcpu(kvm, i); |
2147 | if (!v) |
2148 | continue; |
2149 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu: v, is_guest_mode: false); |
2150 | hv_tlb_flush_enqueue(vcpu: v, tlb_flush_fifo, |
2151 | entries: tlb_flush_entries, count: hc->rep_cnt); |
2152 | } |
2153 | |
2154 | kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_bitmap: vcpu_mask); |
2155 | } else { |
2156 | struct kvm_vcpu_hv *hv_v; |
2157 | |
2158 | bitmap_zero(dst: vcpu_mask, KVM_MAX_VCPUS); |
2159 | |
2160 | kvm_for_each_vcpu(i, v, kvm) { |
2161 | hv_v = to_hv_vcpu(vcpu: v); |
2162 | |
2163 | /* |
2164 | * The following check races with nested vCPUs entering/exiting |
2165 | * and/or migrating between L1's vCPUs, however the only case when |
2166 | * KVM *must* flush the TLB is when the target L2 vCPU keeps |
2167 | * running on the same L1 vCPU from the moment of the request until |
2168 | * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other |
2169 | * cases, e.g. when the target L2 vCPU migrates to a different L1 |
2170 | * vCPU or when the corresponding L1 vCPU temporary switches to a |
2171 | * different L2 vCPU while the request is being processed. |
2172 | */ |
2173 | if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id) |
2174 | continue; |
2175 | |
2176 | if (!all_cpus && |
2177 | !hv_is_vp_in_sparse_set(vp_id: hv_v->nested.vp_id, valid_bank_mask, |
2178 | sparse_banks)) |
2179 | continue; |
2180 | |
2181 | __set_bit(i, vcpu_mask); |
2182 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu: v, is_guest_mode: true); |
2183 | hv_tlb_flush_enqueue(vcpu: v, tlb_flush_fifo, |
2184 | entries: tlb_flush_entries, count: hc->rep_cnt); |
2185 | } |
2186 | |
2187 | kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_bitmap: vcpu_mask); |
2188 | } |
2189 | |
2190 | ret_success: |
2191 | /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */ |
2192 | return (u64)HV_STATUS_SUCCESS | |
2193 | ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); |
2194 | } |
2195 | |
2196 | static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector, |
2197 | u64 *sparse_banks, u64 valid_bank_mask) |
2198 | { |
2199 | struct kvm_lapic_irq irq = { |
2200 | .delivery_mode = APIC_DM_FIXED, |
2201 | .vector = vector |
2202 | }; |
2203 | struct kvm_vcpu *vcpu; |
2204 | unsigned long i; |
2205 | |
2206 | kvm_for_each_vcpu(i, vcpu, kvm) { |
2207 | if (sparse_banks && |
2208 | !hv_is_vp_in_sparse_set(vp_id: kvm_hv_get_vpindex(vcpu), |
2209 | valid_bank_mask, sparse_banks)) |
2210 | continue; |
2211 | |
2212 | /* We fail only when APIC is disabled */ |
2213 | kvm_apic_set_irq(vcpu, irq: &irq, NULL); |
2214 | } |
2215 | } |
2216 | |
2217 | static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
2218 | { |
2219 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
2220 | u64 *sparse_banks = hv_vcpu->sparse_banks; |
2221 | struct kvm *kvm = vcpu->kvm; |
2222 | struct hv_send_ipi_ex send_ipi_ex; |
2223 | struct hv_send_ipi send_ipi; |
2224 | u64 valid_bank_mask; |
2225 | u32 vector; |
2226 | bool all_cpus; |
2227 | |
2228 | if (hc->code == HVCALL_SEND_IPI) { |
2229 | if (!hc->fast) { |
2230 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, |
2231 | sizeof(send_ipi)))) |
2232 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2233 | sparse_banks[0] = send_ipi.cpu_mask; |
2234 | vector = send_ipi.vector; |
2235 | } else { |
2236 | /* 'reserved' part of hv_send_ipi should be 0 */ |
2237 | if (unlikely(hc->ingpa >> 32 != 0)) |
2238 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2239 | sparse_banks[0] = hc->outgpa; |
2240 | vector = (u32)hc->ingpa; |
2241 | } |
2242 | all_cpus = false; |
2243 | valid_bank_mask = BIT_ULL(0); |
2244 | |
2245 | trace_kvm_hv_send_ipi(vector, processor_mask: sparse_banks[0]); |
2246 | } else { |
2247 | if (!hc->fast) { |
2248 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, |
2249 | sizeof(send_ipi_ex)))) |
2250 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2251 | } else { |
2252 | send_ipi_ex.vector = (u32)hc->ingpa; |
2253 | send_ipi_ex.vp_set.format = hc->outgpa; |
2254 | send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]); |
2255 | } |
2256 | |
2257 | trace_kvm_hv_send_ipi_ex(vector: send_ipi_ex.vector, |
2258 | format: send_ipi_ex.vp_set.format, |
2259 | valid_bank_mask: send_ipi_ex.vp_set.valid_bank_mask); |
2260 | |
2261 | vector = send_ipi_ex.vector; |
2262 | valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; |
2263 | all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; |
2264 | |
2265 | if (hc->var_cnt != hweight64(valid_bank_mask)) |
2266 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2267 | |
2268 | if (all_cpus) |
2269 | goto check_and_send_ipi; |
2270 | |
2271 | if (!hc->var_cnt) |
2272 | goto ret_success; |
2273 | |
2274 | if (!hc->fast) |
2275 | hc->data_offset = offsetof(struct hv_send_ipi_ex, |
2276 | vp_set.bank_contents); |
2277 | else |
2278 | hc->consumed_xmm_halves = 1; |
2279 | |
2280 | if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) |
2281 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2282 | } |
2283 | |
2284 | check_and_send_ipi: |
2285 | if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) |
2286 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2287 | |
2288 | if (all_cpus) |
2289 | kvm_hv_send_ipi_to_many(kvm, vector, NULL, valid_bank_mask: 0); |
2290 | else |
2291 | kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask); |
2292 | |
2293 | ret_success: |
2294 | return HV_STATUS_SUCCESS; |
2295 | } |
2296 | |
2297 | void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) |
2298 | { |
2299 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
2300 | struct kvm_cpuid_entry2 *entry; |
2301 | |
2302 | vcpu->arch.hyperv_enabled = hyperv_enabled; |
2303 | |
2304 | if (!hv_vcpu) { |
2305 | /* |
2306 | * KVM should have already allocated kvm_vcpu_hv if Hyper-V is |
2307 | * enabled in CPUID. |
2308 | */ |
2309 | WARN_ON_ONCE(vcpu->arch.hyperv_enabled); |
2310 | return; |
2311 | } |
2312 | |
2313 | memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache)); |
2314 | |
2315 | if (!vcpu->arch.hyperv_enabled) |
2316 | return; |
2317 | |
2318 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); |
2319 | if (entry) { |
2320 | hv_vcpu->cpuid_cache.features_eax = entry->eax; |
2321 | hv_vcpu->cpuid_cache.features_ebx = entry->ebx; |
2322 | hv_vcpu->cpuid_cache.features_edx = entry->edx; |
2323 | } |
2324 | |
2325 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); |
2326 | if (entry) { |
2327 | hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; |
2328 | hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; |
2329 | } |
2330 | |
2331 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); |
2332 | if (entry) |
2333 | hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; |
2334 | |
2335 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES); |
2336 | if (entry) { |
2337 | hv_vcpu->cpuid_cache.nested_eax = entry->eax; |
2338 | hv_vcpu->cpuid_cache.nested_ebx = entry->ebx; |
2339 | } |
2340 | } |
2341 | |
2342 | int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce) |
2343 | { |
2344 | struct kvm_vcpu_hv *hv_vcpu; |
2345 | int ret = 0; |
2346 | |
2347 | if (!to_hv_vcpu(vcpu)) { |
2348 | if (enforce) { |
2349 | ret = kvm_hv_vcpu_init(vcpu); |
2350 | if (ret) |
2351 | return ret; |
2352 | } else { |
2353 | return 0; |
2354 | } |
2355 | } |
2356 | |
2357 | hv_vcpu = to_hv_vcpu(vcpu); |
2358 | hv_vcpu->enforce_cpuid = enforce; |
2359 | |
2360 | return ret; |
2361 | } |
2362 | |
2363 | static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) |
2364 | { |
2365 | bool longmode; |
2366 | |
2367 | longmode = is_64_bit_hypercall(vcpu); |
2368 | if (longmode) |
2369 | kvm_rax_write(vcpu, val: result); |
2370 | else { |
2371 | kvm_rdx_write(vcpu, val: result >> 32); |
2372 | kvm_rax_write(vcpu, val: result & 0xffffffff); |
2373 | } |
2374 | } |
2375 | |
2376 | static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) |
2377 | { |
2378 | u32 tlb_lock_count = 0; |
2379 | int ret; |
2380 | |
2381 | if (hv_result_success(status: result) && is_guest_mode(vcpu) && |
2382 | kvm_hv_is_tlb_flush_hcall(vcpu) && |
2383 | kvm_read_guest(kvm: vcpu->kvm, gpa: to_hv_vcpu(vcpu)->nested.pa_page_gpa, |
2384 | data: &tlb_lock_count, len: sizeof(tlb_lock_count))) |
2385 | result = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2386 | |
2387 | trace_kvm_hv_hypercall_done(result); |
2388 | kvm_hv_hypercall_set_result(vcpu, result); |
2389 | ++vcpu->stat.hypercalls; |
2390 | |
2391 | ret = kvm_skip_emulated_instruction(vcpu); |
2392 | |
2393 | if (tlb_lock_count) |
2394 | kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu); |
2395 | |
2396 | return ret; |
2397 | } |
2398 | |
2399 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) |
2400 | { |
2401 | return kvm_hv_hypercall_complete(vcpu, result: vcpu->run->hyperv.u.hcall.result); |
2402 | } |
2403 | |
2404 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
2405 | { |
2406 | struct kvm_hv *hv = to_kvm_hv(kvm: vcpu->kvm); |
2407 | struct eventfd_ctx *eventfd; |
2408 | |
2409 | if (unlikely(!hc->fast)) { |
2410 | int ret; |
2411 | gpa_t gpa = hc->ingpa; |
2412 | |
2413 | if ((gpa & (__alignof__(hc->ingpa) - 1)) || |
2414 | offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE) |
2415 | return HV_STATUS_INVALID_ALIGNMENT; |
2416 | |
2417 | ret = kvm_vcpu_read_guest(vcpu, gpa, |
2418 | data: &hc->ingpa, len: sizeof(hc->ingpa)); |
2419 | if (ret < 0) |
2420 | return HV_STATUS_INVALID_ALIGNMENT; |
2421 | } |
2422 | |
2423 | /* |
2424 | * Per spec, bits 32-47 contain the extra "flag number". However, we |
2425 | * have no use for it, and in all known usecases it is zero, so just |
2426 | * report lookup failure if it isn't. |
2427 | */ |
2428 | if (hc->ingpa & 0xffff00000000ULL) |
2429 | return HV_STATUS_INVALID_PORT_ID; |
2430 | /* remaining bits are reserved-zero */ |
2431 | if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK) |
2432 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2433 | |
2434 | /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ |
2435 | rcu_read_lock(); |
2436 | eventfd = idr_find(&hv->conn_to_evt, id: hc->ingpa); |
2437 | rcu_read_unlock(); |
2438 | if (!eventfd) |
2439 | return HV_STATUS_INVALID_PORT_ID; |
2440 | |
2441 | eventfd_signal(ctx: eventfd); |
2442 | return HV_STATUS_SUCCESS; |
2443 | } |
2444 | |
2445 | static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc) |
2446 | { |
2447 | switch (hc->code) { |
2448 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
2449 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: |
2450 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: |
2451 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: |
2452 | case HVCALL_SEND_IPI_EX: |
2453 | return true; |
2454 | } |
2455 | |
2456 | return false; |
2457 | } |
2458 | |
2459 | static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc) |
2460 | { |
2461 | int reg; |
2462 | |
2463 | kvm_fpu_get(); |
2464 | for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++) |
2465 | _kvm_read_sse_reg(reg, data: &hc->xmm[reg]); |
2466 | kvm_fpu_put(); |
2467 | } |
2468 | |
2469 | static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) |
2470 | { |
2471 | if (!hv_vcpu->enforce_cpuid) |
2472 | return true; |
2473 | |
2474 | switch (code) { |
2475 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
2476 | return hv_vcpu->cpuid_cache.enlightenments_ebx && |
2477 | hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; |
2478 | case HVCALL_POST_MESSAGE: |
2479 | return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; |
2480 | case HVCALL_SIGNAL_EVENT: |
2481 | return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; |
2482 | case HVCALL_POST_DEBUG_DATA: |
2483 | case HVCALL_RETRIEVE_DEBUG_DATA: |
2484 | case HVCALL_RESET_DEBUG_SESSION: |
2485 | /* |
2486 | * Return 'true' when SynDBG is disabled so the resulting code |
2487 | * will be HV_STATUS_INVALID_HYPERCALL_CODE. |
2488 | */ |
2489 | return !kvm_hv_is_syndbg_enabled(vcpu: hv_vcpu->vcpu) || |
2490 | hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; |
2491 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: |
2492 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: |
2493 | if (!(hv_vcpu->cpuid_cache.enlightenments_eax & |
2494 | HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) |
2495 | return false; |
2496 | fallthrough; |
2497 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
2498 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: |
2499 | return hv_vcpu->cpuid_cache.enlightenments_eax & |
2500 | HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; |
2501 | case HVCALL_SEND_IPI_EX: |
2502 | if (!(hv_vcpu->cpuid_cache.enlightenments_eax & |
2503 | HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) |
2504 | return false; |
2505 | fallthrough; |
2506 | case HVCALL_SEND_IPI: |
2507 | return hv_vcpu->cpuid_cache.enlightenments_eax & |
2508 | HV_X64_CLUSTER_IPI_RECOMMENDED; |
2509 | case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX: |
2510 | return hv_vcpu->cpuid_cache.features_ebx & |
2511 | HV_ENABLE_EXTENDED_HYPERCALLS; |
2512 | default: |
2513 | break; |
2514 | } |
2515 | |
2516 | return true; |
2517 | } |
2518 | |
2519 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
2520 | { |
2521 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
2522 | struct kvm_hv_hcall hc; |
2523 | u64 ret = HV_STATUS_SUCCESS; |
2524 | |
2525 | /* |
2526 | * hypercall generates UD from non zero cpl and real mode |
2527 | * per HYPER-V spec |
2528 | */ |
2529 | if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { |
2530 | kvm_queue_exception(vcpu, UD_VECTOR); |
2531 | return 1; |
2532 | } |
2533 | |
2534 | #ifdef CONFIG_X86_64 |
2535 | if (is_64_bit_hypercall(vcpu)) { |
2536 | hc.param = kvm_rcx_read(vcpu); |
2537 | hc.ingpa = kvm_rdx_read(vcpu); |
2538 | hc.outgpa = kvm_r8_read(vcpu); |
2539 | } else |
2540 | #endif |
2541 | { |
2542 | hc.param = ((u64)kvm_rdx_read(vcpu) << 32) | |
2543 | (kvm_rax_read(vcpu) & 0xffffffff); |
2544 | hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) | |
2545 | (kvm_rcx_read(vcpu) & 0xffffffff); |
2546 | hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) | |
2547 | (kvm_rsi_read(vcpu) & 0xffffffff); |
2548 | } |
2549 | |
2550 | hc.code = hc.param & 0xffff; |
2551 | hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET; |
2552 | hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT); |
2553 | hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff; |
2554 | hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; |
2555 | hc.rep = !!(hc.rep_cnt || hc.rep_idx); |
2556 | |
2557 | trace_kvm_hv_hypercall(code: hc.code, fast: hc.fast, var_cnt: hc.var_cnt, rep_cnt: hc.rep_cnt, |
2558 | rep_idx: hc.rep_idx, ingpa: hc.ingpa, outgpa: hc.outgpa); |
2559 | |
2560 | if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { |
2561 | ret = HV_STATUS_ACCESS_DENIED; |
2562 | goto hypercall_complete; |
2563 | } |
2564 | |
2565 | if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) { |
2566 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2567 | goto hypercall_complete; |
2568 | } |
2569 | |
2570 | if (hc.fast && is_xmm_fast_hypercall(hc: &hc)) { |
2571 | if (unlikely(hv_vcpu->enforce_cpuid && |
2572 | !(hv_vcpu->cpuid_cache.features_edx & |
2573 | HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) { |
2574 | kvm_queue_exception(vcpu, UD_VECTOR); |
2575 | return 1; |
2576 | } |
2577 | |
2578 | kvm_hv_hypercall_read_xmm(hc: &hc); |
2579 | } |
2580 | |
2581 | switch (hc.code) { |
2582 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
2583 | if (unlikely(hc.rep || hc.var_cnt)) { |
2584 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2585 | break; |
2586 | } |
2587 | kvm_vcpu_on_spin(vcpu, yield_to_kernel_mode: true); |
2588 | break; |
2589 | case HVCALL_SIGNAL_EVENT: |
2590 | if (unlikely(hc.rep || hc.var_cnt)) { |
2591 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2592 | break; |
2593 | } |
2594 | ret = kvm_hvcall_signal_event(vcpu, hc: &hc); |
2595 | if (ret != HV_STATUS_INVALID_PORT_ID) |
2596 | break; |
2597 | fallthrough; /* maybe userspace knows this conn_id */ |
2598 | case HVCALL_POST_MESSAGE: |
2599 | /* don't bother userspace if it has no way to handle it */ |
2600 | if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) { |
2601 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2602 | break; |
2603 | } |
2604 | goto hypercall_userspace_exit; |
2605 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
2606 | if (unlikely(hc.var_cnt)) { |
2607 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2608 | break; |
2609 | } |
2610 | fallthrough; |
2611 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: |
2612 | if (unlikely(!hc.rep_cnt || hc.rep_idx)) { |
2613 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2614 | break; |
2615 | } |
2616 | ret = kvm_hv_flush_tlb(vcpu, hc: &hc); |
2617 | break; |
2618 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: |
2619 | if (unlikely(hc.var_cnt)) { |
2620 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2621 | break; |
2622 | } |
2623 | fallthrough; |
2624 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: |
2625 | if (unlikely(hc.rep)) { |
2626 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2627 | break; |
2628 | } |
2629 | ret = kvm_hv_flush_tlb(vcpu, hc: &hc); |
2630 | break; |
2631 | case HVCALL_SEND_IPI: |
2632 | if (unlikely(hc.var_cnt)) { |
2633 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2634 | break; |
2635 | } |
2636 | fallthrough; |
2637 | case HVCALL_SEND_IPI_EX: |
2638 | if (unlikely(hc.rep)) { |
2639 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2640 | break; |
2641 | } |
2642 | ret = kvm_hv_send_ipi(vcpu, hc: &hc); |
2643 | break; |
2644 | case HVCALL_POST_DEBUG_DATA: |
2645 | case HVCALL_RETRIEVE_DEBUG_DATA: |
2646 | if (unlikely(hc.fast)) { |
2647 | ret = HV_STATUS_INVALID_PARAMETER; |
2648 | break; |
2649 | } |
2650 | fallthrough; |
2651 | case HVCALL_RESET_DEBUG_SESSION: { |
2652 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
2653 | |
2654 | if (!kvm_hv_is_syndbg_enabled(vcpu)) { |
2655 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
2656 | break; |
2657 | } |
2658 | |
2659 | if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { |
2660 | ret = HV_STATUS_OPERATION_DENIED; |
2661 | break; |
2662 | } |
2663 | goto hypercall_userspace_exit; |
2664 | } |
2665 | case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX: |
2666 | if (unlikely(hc.fast)) { |
2667 | ret = HV_STATUS_INVALID_PARAMETER; |
2668 | break; |
2669 | } |
2670 | goto hypercall_userspace_exit; |
2671 | default: |
2672 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
2673 | break; |
2674 | } |
2675 | |
2676 | hypercall_complete: |
2677 | return kvm_hv_hypercall_complete(vcpu, result: ret); |
2678 | |
2679 | hypercall_userspace_exit: |
2680 | vcpu->run->exit_reason = KVM_EXIT_HYPERV; |
2681 | vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; |
2682 | vcpu->run->hyperv.u.hcall.input = hc.param; |
2683 | vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa; |
2684 | vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa; |
2685 | vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace; |
2686 | return 0; |
2687 | } |
2688 | |
2689 | void kvm_hv_init_vm(struct kvm *kvm) |
2690 | { |
2691 | struct kvm_hv *hv = to_kvm_hv(kvm); |
2692 | |
2693 | mutex_init(&hv->hv_lock); |
2694 | idr_init(idr: &hv->conn_to_evt); |
2695 | } |
2696 | |
2697 | void kvm_hv_destroy_vm(struct kvm *kvm) |
2698 | { |
2699 | struct kvm_hv *hv = to_kvm_hv(kvm); |
2700 | struct eventfd_ctx *eventfd; |
2701 | int i; |
2702 | |
2703 | idr_for_each_entry(&hv->conn_to_evt, eventfd, i) |
2704 | eventfd_ctx_put(ctx: eventfd); |
2705 | idr_destroy(&hv->conn_to_evt); |
2706 | } |
2707 | |
2708 | static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) |
2709 | { |
2710 | struct kvm_hv *hv = to_kvm_hv(kvm); |
2711 | struct eventfd_ctx *eventfd; |
2712 | int ret; |
2713 | |
2714 | eventfd = eventfd_ctx_fdget(fd); |
2715 | if (IS_ERR(ptr: eventfd)) |
2716 | return PTR_ERR(ptr: eventfd); |
2717 | |
2718 | mutex_lock(&hv->hv_lock); |
2719 | ret = idr_alloc(&hv->conn_to_evt, ptr: eventfd, start: conn_id, end: conn_id + 1, |
2720 | GFP_KERNEL_ACCOUNT); |
2721 | mutex_unlock(lock: &hv->hv_lock); |
2722 | |
2723 | if (ret >= 0) |
2724 | return 0; |
2725 | |
2726 | if (ret == -ENOSPC) |
2727 | ret = -EEXIST; |
2728 | eventfd_ctx_put(ctx: eventfd); |
2729 | return ret; |
2730 | } |
2731 | |
2732 | static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) |
2733 | { |
2734 | struct kvm_hv *hv = to_kvm_hv(kvm); |
2735 | struct eventfd_ctx *eventfd; |
2736 | |
2737 | mutex_lock(&hv->hv_lock); |
2738 | eventfd = idr_remove(&hv->conn_to_evt, id: conn_id); |
2739 | mutex_unlock(lock: &hv->hv_lock); |
2740 | |
2741 | if (!eventfd) |
2742 | return -ENOENT; |
2743 | |
2744 | synchronize_srcu(ssp: &kvm->srcu); |
2745 | eventfd_ctx_put(ctx: eventfd); |
2746 | return 0; |
2747 | } |
2748 | |
2749 | int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) |
2750 | { |
2751 | if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || |
2752 | (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) |
2753 | return -EINVAL; |
2754 | |
2755 | if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) |
2756 | return kvm_hv_eventfd_deassign(kvm, conn_id: args->conn_id); |
2757 | return kvm_hv_eventfd_assign(kvm, conn_id: args->conn_id, fd: args->fd); |
2758 | } |
2759 | |
2760 | int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, |
2761 | struct kvm_cpuid_entry2 __user *entries) |
2762 | { |
2763 | uint16_t evmcs_ver = 0; |
2764 | struct kvm_cpuid_entry2 cpuid_entries[] = { |
2765 | { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, |
2766 | { .function = HYPERV_CPUID_INTERFACE }, |
2767 | { .function = HYPERV_CPUID_VERSION }, |
2768 | { .function = HYPERV_CPUID_FEATURES }, |
2769 | { .function = HYPERV_CPUID_ENLIGHTMENT_INFO }, |
2770 | { .function = HYPERV_CPUID_IMPLEMENT_LIMITS }, |
2771 | { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS }, |
2772 | { .function = HYPERV_CPUID_SYNDBG_INTERFACE }, |
2773 | { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }, |
2774 | { .function = HYPERV_CPUID_NESTED_FEATURES }, |
2775 | }; |
2776 | int i, nent = ARRAY_SIZE(cpuid_entries); |
2777 | |
2778 | if (kvm_x86_ops.nested_ops->get_evmcs_version) |
2779 | evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); |
2780 | |
2781 | if (cpuid->nent < nent) |
2782 | return -E2BIG; |
2783 | |
2784 | if (cpuid->nent > nent) |
2785 | cpuid->nent = nent; |
2786 | |
2787 | for (i = 0; i < nent; i++) { |
2788 | struct kvm_cpuid_entry2 *ent = &cpuid_entries[i]; |
2789 | u32 signature[3]; |
2790 | |
2791 | switch (ent->function) { |
2792 | case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS: |
2793 | memcpy(signature, "Linux KVM Hv" , 12); |
2794 | |
2795 | ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; |
2796 | ent->ebx = signature[0]; |
2797 | ent->ecx = signature[1]; |
2798 | ent->edx = signature[2]; |
2799 | break; |
2800 | |
2801 | case HYPERV_CPUID_INTERFACE: |
2802 | ent->eax = HYPERV_CPUID_SIGNATURE_EAX; |
2803 | break; |
2804 | |
2805 | case HYPERV_CPUID_VERSION: |
2806 | /* |
2807 | * We implement some Hyper-V 2016 functions so let's use |
2808 | * this version. |
2809 | */ |
2810 | ent->eax = 0x00003839; |
2811 | ent->ebx = 0x000A0000; |
2812 | break; |
2813 | |
2814 | case HYPERV_CPUID_FEATURES: |
2815 | ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; |
2816 | ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; |
2817 | ent->eax |= HV_MSR_SYNIC_AVAILABLE; |
2818 | ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; |
2819 | ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; |
2820 | ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; |
2821 | ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; |
2822 | ent->eax |= HV_MSR_RESET_AVAILABLE; |
2823 | ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; |
2824 | ent->eax |= HV_ACCESS_FREQUENCY_MSRS; |
2825 | ent->eax |= HV_ACCESS_REENLIGHTENMENT; |
2826 | ent->eax |= HV_ACCESS_TSC_INVARIANT; |
2827 | |
2828 | ent->ebx |= HV_POST_MESSAGES; |
2829 | ent->ebx |= HV_SIGNAL_EVENTS; |
2830 | ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS; |
2831 | |
2832 | ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; |
2833 | ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; |
2834 | ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; |
2835 | |
2836 | ent->ebx |= HV_DEBUGGING; |
2837 | ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; |
2838 | ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; |
2839 | ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH; |
2840 | |
2841 | /* |
2842 | * Direct Synthetic timers only make sense with in-kernel |
2843 | * LAPIC |
2844 | */ |
2845 | if (!vcpu || lapic_in_kernel(vcpu)) |
2846 | ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; |
2847 | |
2848 | break; |
2849 | |
2850 | case HYPERV_CPUID_ENLIGHTMENT_INFO: |
2851 | ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; |
2852 | ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; |
2853 | ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; |
2854 | ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; |
2855 | ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; |
2856 | if (evmcs_ver) |
2857 | ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; |
2858 | if (!cpu_smt_possible()) |
2859 | ent->eax |= HV_X64_NO_NONARCH_CORESHARING; |
2860 | |
2861 | ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED; |
2862 | /* |
2863 | * Default number of spinlock retry attempts, matches |
2864 | * HyperV 2016. |
2865 | */ |
2866 | ent->ebx = 0x00000FFF; |
2867 | |
2868 | break; |
2869 | |
2870 | case HYPERV_CPUID_IMPLEMENT_LIMITS: |
2871 | /* Maximum number of virtual processors */ |
2872 | ent->eax = KVM_MAX_VCPUS; |
2873 | /* |
2874 | * Maximum number of logical processors, matches |
2875 | * HyperV 2016. |
2876 | */ |
2877 | ent->ebx = 64; |
2878 | |
2879 | break; |
2880 | |
2881 | case HYPERV_CPUID_NESTED_FEATURES: |
2882 | ent->eax = evmcs_ver; |
2883 | ent->eax |= HV_X64_NESTED_DIRECT_FLUSH; |
2884 | ent->eax |= HV_X64_NESTED_MSR_BITMAP; |
2885 | ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL; |
2886 | break; |
2887 | |
2888 | case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS: |
2889 | memcpy(signature, "Linux KVM Hv" , 12); |
2890 | |
2891 | ent->eax = 0; |
2892 | ent->ebx = signature[0]; |
2893 | ent->ecx = signature[1]; |
2894 | ent->edx = signature[2]; |
2895 | break; |
2896 | |
2897 | case HYPERV_CPUID_SYNDBG_INTERFACE: |
2898 | memcpy(signature, "VS#1\0\0\0\0\0\0\0\0" , 12); |
2899 | ent->eax = signature[0]; |
2900 | break; |
2901 | |
2902 | case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES: |
2903 | ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; |
2904 | break; |
2905 | |
2906 | default: |
2907 | break; |
2908 | } |
2909 | } |
2910 | |
2911 | if (copy_to_user(to: entries, from: cpuid_entries, |
2912 | n: nent * sizeof(struct kvm_cpuid_entry2))) |
2913 | return -EFAULT; |
2914 | |
2915 | return 0; |
2916 | } |
2917 | |