1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * cpuid support routines |
5 | * |
6 | * derived from arch/x86/kvm/x86.c |
7 | * |
8 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
9 | * Copyright IBM Corporation, 2008 |
10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/kvm_host.h> |
14 | #include "linux/lockdep.h" |
15 | #include <linux/export.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/sched/stat.h> |
19 | |
20 | #include <asm/processor.h> |
21 | #include <asm/user.h> |
22 | #include <asm/fpu/xstate.h> |
23 | #include <asm/sgx.h> |
24 | #include <asm/cpuid.h> |
25 | #include "cpuid.h" |
26 | #include "lapic.h" |
27 | #include "mmu.h" |
28 | #include "trace.h" |
29 | #include "pmu.h" |
30 | #include "xen.h" |
31 | |
32 | /* |
33 | * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be |
34 | * aligned to sizeof(unsigned long) because it's not accessed via bitops. |
35 | */ |
36 | u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; |
37 | EXPORT_SYMBOL_GPL(kvm_cpu_caps); |
38 | |
39 | u32 xstate_required_size(u64 xstate_bv, bool compacted) |
40 | { |
41 | int feature_bit = 0; |
42 | u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
43 | |
44 | xstate_bv &= XFEATURE_MASK_EXTEND; |
45 | while (xstate_bv) { |
46 | if (xstate_bv & 0x1) { |
47 | u32 eax, ebx, ecx, edx, offset; |
48 | cpuid_count(op: 0xD, count: feature_bit, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
49 | /* ECX[1]: 64B alignment in compacted form */ |
50 | if (compacted) |
51 | offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; |
52 | else |
53 | offset = ebx; |
54 | ret = max(ret, offset + eax); |
55 | } |
56 | |
57 | xstate_bv >>= 1; |
58 | feature_bit++; |
59 | } |
60 | |
61 | return ret; |
62 | } |
63 | |
64 | #define F feature_bit |
65 | |
66 | /* Scattered Flag - For features that are scattered by cpufeatures.h. */ |
67 | #define SF(name) \ |
68 | ({ \ |
69 | BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ |
70 | (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \ |
71 | }) |
72 | |
73 | /* |
74 | * Magic value used by KVM when querying userspace-provided CPUID entries and |
75 | * doesn't care about the CPIUD index because the index of the function in |
76 | * question is not significant. Note, this magic value must have at least one |
77 | * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find() |
78 | * to avoid false positives when processing guest CPUID input. |
79 | */ |
80 | #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull |
81 | |
82 | static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( |
83 | struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) |
84 | { |
85 | struct kvm_cpuid_entry2 *e; |
86 | int i; |
87 | |
88 | /* |
89 | * KVM has a semi-arbitrary rule that querying the guest's CPUID model |
90 | * with IRQs disabled is disallowed. The CPUID model can legitimately |
91 | * have over one hundred entries, i.e. the lookup is slow, and IRQs are |
92 | * typically disabled in KVM only when KVM is in a performance critical |
93 | * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break |
94 | * if this rule is violated, this assertion is purely to flag potential |
95 | * performance issues. If this fires, consider moving the lookup out |
96 | * of the hotpath, e.g. by caching information during CPUID updates. |
97 | */ |
98 | lockdep_assert_irqs_enabled(); |
99 | |
100 | for (i = 0; i < nent; i++) { |
101 | e = &entries[i]; |
102 | |
103 | if (e->function != function) |
104 | continue; |
105 | |
106 | /* |
107 | * If the index isn't significant, use the first entry with a |
108 | * matching function. It's userspace's responsibilty to not |
109 | * provide "duplicate" entries in all cases. |
110 | */ |
111 | if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index) |
112 | return e; |
113 | |
114 | |
115 | /* |
116 | * Similarly, use the first matching entry if KVM is doing a |
117 | * lookup (as opposed to emulating CPUID) for a function that's |
118 | * architecturally defined as not having a significant index. |
119 | */ |
120 | if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) { |
121 | /* |
122 | * Direct lookups from KVM should not diverge from what |
123 | * KVM defines internally (the architectural behavior). |
124 | */ |
125 | WARN_ON_ONCE(cpuid_function_is_indexed(function)); |
126 | return e; |
127 | } |
128 | } |
129 | |
130 | return NULL; |
131 | } |
132 | |
133 | static int kvm_check_cpuid(struct kvm_vcpu *vcpu, |
134 | struct kvm_cpuid_entry2 *entries, |
135 | int nent) |
136 | { |
137 | struct kvm_cpuid_entry2 *best; |
138 | u64 xfeatures; |
139 | |
140 | /* |
141 | * The existing code assumes virtual address is 48-bit or 57-bit in the |
142 | * canonical address checks; exit if it is ever changed. |
143 | */ |
144 | best = cpuid_entry2_find(entries, nent, function: 0x80000008, |
145 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
146 | if (best) { |
147 | int vaddr_bits = (best->eax & 0xff00) >> 8; |
148 | |
149 | if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) |
150 | return -EINVAL; |
151 | } |
152 | |
153 | /* |
154 | * Exposing dynamic xfeatures to the guest requires additional |
155 | * enabling in the FPU, e.g. to expand the guest XSAVE state size. |
156 | */ |
157 | best = cpuid_entry2_find(entries, nent, function: 0xd, index: 0); |
158 | if (!best) |
159 | return 0; |
160 | |
161 | xfeatures = best->eax | ((u64)best->edx << 32); |
162 | xfeatures &= XFEATURE_MASK_USER_DYNAMIC; |
163 | if (!xfeatures) |
164 | return 0; |
165 | |
166 | return fpu_enable_guest_xfd_features(guest_fpu: &vcpu->arch.guest_fpu, xfeatures); |
167 | } |
168 | |
169 | /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ |
170 | static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, |
171 | int nent) |
172 | { |
173 | struct kvm_cpuid_entry2 *orig; |
174 | int i; |
175 | |
176 | if (nent != vcpu->arch.cpuid_nent) |
177 | return -EINVAL; |
178 | |
179 | for (i = 0; i < nent; i++) { |
180 | orig = &vcpu->arch.cpuid_entries[i]; |
181 | if (e2[i].function != orig->function || |
182 | e2[i].index != orig->index || |
183 | e2[i].flags != orig->flags || |
184 | e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || |
185 | e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) |
186 | return -EINVAL; |
187 | } |
188 | |
189 | return 0; |
190 | } |
191 | |
192 | static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, |
193 | const char *sig) |
194 | { |
195 | struct kvm_hypervisor_cpuid cpuid = {}; |
196 | struct kvm_cpuid_entry2 *entry; |
197 | u32 base; |
198 | |
199 | for_each_possible_hypervisor_cpuid_base(base) { |
200 | entry = kvm_find_cpuid_entry(vcpu, function: base); |
201 | |
202 | if (entry) { |
203 | u32 signature[3]; |
204 | |
205 | signature[0] = entry->ebx; |
206 | signature[1] = entry->ecx; |
207 | signature[2] = entry->edx; |
208 | |
209 | if (!memcmp(p: signature, q: sig, size: sizeof(signature))) { |
210 | cpuid.base = base; |
211 | cpuid.limit = entry->eax; |
212 | break; |
213 | } |
214 | } |
215 | } |
216 | |
217 | return cpuid; |
218 | } |
219 | |
220 | static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu, |
221 | struct kvm_cpuid_entry2 *entries, int nent) |
222 | { |
223 | u32 base = vcpu->arch.kvm_cpuid.base; |
224 | |
225 | if (!base) |
226 | return NULL; |
227 | |
228 | return cpuid_entry2_find(entries, nent, function: base | KVM_CPUID_FEATURES, |
229 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
230 | } |
231 | |
232 | static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu) |
233 | { |
234 | return __kvm_find_kvm_cpuid_features(vcpu, entries: vcpu->arch.cpuid_entries, |
235 | nent: vcpu->arch.cpuid_nent); |
236 | } |
237 | |
238 | void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) |
239 | { |
240 | struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); |
241 | |
242 | /* |
243 | * save the feature bitmap to avoid cpuid lookup for every PV |
244 | * operation |
245 | */ |
246 | if (best) |
247 | vcpu->arch.pv_cpuid.features = best->eax; |
248 | } |
249 | |
250 | /* |
251 | * Calculate guest's supported XCR0 taking into account guest CPUID data and |
252 | * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). |
253 | */ |
254 | static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent) |
255 | { |
256 | struct kvm_cpuid_entry2 *best; |
257 | |
258 | best = cpuid_entry2_find(entries, nent, function: 0xd, index: 0); |
259 | if (!best) |
260 | return 0; |
261 | |
262 | return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; |
263 | } |
264 | |
265 | static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, |
266 | int nent) |
267 | { |
268 | struct kvm_cpuid_entry2 *best; |
269 | |
270 | best = cpuid_entry2_find(entries, nent, function: 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
271 | if (best) { |
272 | /* Update OSXSAVE bit */ |
273 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
274 | cpuid_entry_change(entry: best, X86_FEATURE_OSXSAVE, |
275 | set: kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); |
276 | |
277 | cpuid_entry_change(entry: best, X86_FEATURE_APIC, |
278 | set: vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); |
279 | } |
280 | |
281 | best = cpuid_entry2_find(entries, nent, function: 7, index: 0); |
282 | if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) |
283 | cpuid_entry_change(entry: best, X86_FEATURE_OSPKE, |
284 | set: kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); |
285 | |
286 | best = cpuid_entry2_find(entries, nent, function: 0xD, index: 0); |
287 | if (best) |
288 | best->ebx = xstate_required_size(xstate_bv: vcpu->arch.xcr0, compacted: false); |
289 | |
290 | best = cpuid_entry2_find(entries, nent, function: 0xD, index: 1); |
291 | if (best && (cpuid_entry_has(entry: best, X86_FEATURE_XSAVES) || |
292 | cpuid_entry_has(entry: best, X86_FEATURE_XSAVEC))) |
293 | best->ebx = xstate_required_size(xstate_bv: vcpu->arch.xcr0, compacted: true); |
294 | |
295 | best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent); |
296 | if (kvm_hlt_in_guest(kvm: vcpu->kvm) && best && |
297 | (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) |
298 | best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); |
299 | |
300 | if (!kvm_check_has_quirk(kvm: vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { |
301 | best = cpuid_entry2_find(entries, nent, function: 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
302 | if (best) |
303 | cpuid_entry_change(entry: best, X86_FEATURE_MWAIT, |
304 | set: vcpu->arch.ia32_misc_enable_msr & |
305 | MSR_IA32_MISC_ENABLE_MWAIT); |
306 | } |
307 | } |
308 | |
309 | void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) |
310 | { |
311 | __kvm_update_cpuid_runtime(vcpu, entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent); |
312 | } |
313 | EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); |
314 | |
315 | static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent) |
316 | { |
317 | struct kvm_cpuid_entry2 *entry; |
318 | |
319 | entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, |
320 | KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
321 | return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; |
322 | } |
323 | |
324 | static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
325 | { |
326 | struct kvm_lapic *apic = vcpu->arch.apic; |
327 | struct kvm_cpuid_entry2 *best; |
328 | bool allow_gbpages; |
329 | |
330 | BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES); |
331 | bitmap_zero(dst: vcpu->arch.governed_features.enabled, |
332 | KVM_MAX_NR_GOVERNED_FEATURES); |
333 | |
334 | /* |
335 | * If TDP is enabled, let the guest use GBPAGES if they're supported in |
336 | * hardware. The hardware page walker doesn't let KVM disable GBPAGES, |
337 | * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA |
338 | * walk for performance and complexity reasons. Not to mention KVM |
339 | * _can't_ solve the problem because GVA->GPA walks aren't visible to |
340 | * KVM once a TDP translation is installed. Mimic hardware behavior so |
341 | * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. |
342 | * If TDP is disabled, honor *only* guest CPUID as KVM has full control |
343 | * and can install smaller shadow pages if the host lacks 1GiB support. |
344 | */ |
345 | allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : |
346 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES); |
347 | if (allow_gbpages) |
348 | kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES); |
349 | |
350 | best = kvm_find_cpuid_entry(vcpu, function: 1); |
351 | if (best && apic) { |
352 | if (cpuid_entry_has(entry: best, X86_FEATURE_TSC_DEADLINE_TIMER)) |
353 | apic->lapic_timer.timer_mode_mask = 3 << 17; |
354 | else |
355 | apic->lapic_timer.timer_mode_mask = 1 << 17; |
356 | |
357 | kvm_apic_set_version(vcpu); |
358 | } |
359 | |
360 | vcpu->arch.guest_supported_xcr0 = |
361 | cpuid_get_supported_xcr0(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent); |
362 | |
363 | kvm_update_pv_runtime(vcpu); |
364 | |
365 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); |
366 | vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); |
367 | |
368 | kvm_pmu_refresh(vcpu); |
369 | vcpu->arch.cr4_guest_rsvd_bits = |
370 | __cr4_reserved_bits(guest_cpuid_has, vcpu); |
371 | |
372 | kvm_hv_set_cpuid(vcpu, hyperv_enabled: kvm_cpuid_has_hyperv(entries: vcpu->arch.cpuid_entries, |
373 | nent: vcpu->arch.cpuid_nent)); |
374 | |
375 | /* Invoke the vendor callback only after the above state is updated. */ |
376 | static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); |
377 | |
378 | /* |
379 | * Except for the MMU, which needs to do its thing any vendor specific |
380 | * adjustments to the reserved GPA bits. |
381 | */ |
382 | kvm_mmu_after_set_cpuid(vcpu); |
383 | } |
384 | |
385 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) |
386 | { |
387 | struct kvm_cpuid_entry2 *best; |
388 | |
389 | best = kvm_find_cpuid_entry(vcpu, function: 0x80000000); |
390 | if (!best || best->eax < 0x80000008) |
391 | goto not_found; |
392 | best = kvm_find_cpuid_entry(vcpu, function: 0x80000008); |
393 | if (best) |
394 | return best->eax & 0xff; |
395 | not_found: |
396 | return 36; |
397 | } |
398 | |
399 | /* |
400 | * This "raw" version returns the reserved GPA bits without any adjustments for |
401 | * encryption technologies that usurp bits. The raw mask should be used if and |
402 | * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. |
403 | */ |
404 | u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) |
405 | { |
406 | return rsvd_bits(s: cpuid_maxphyaddr(vcpu), e: 63); |
407 | } |
408 | |
409 | static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, |
410 | int nent) |
411 | { |
412 | int r; |
413 | |
414 | __kvm_update_cpuid_runtime(vcpu, entries: e2, nent); |
415 | |
416 | /* |
417 | * KVM does not correctly handle changing guest CPUID after KVM_RUN, as |
418 | * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't |
419 | * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page |
420 | * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with |
421 | * the core vCPU model on the fly. It would've been better to forbid any |
422 | * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately |
423 | * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do |
424 | * KVM_SET_CPUID{,2} again. To support this legacy behavior, check |
425 | * whether the supplied CPUID data is equal to what's already set. |
426 | */ |
427 | if (kvm_vcpu_has_run(vcpu)) { |
428 | r = kvm_cpuid_check_equal(vcpu, e2, nent); |
429 | if (r) |
430 | return r; |
431 | |
432 | kvfree(addr: e2); |
433 | return 0; |
434 | } |
435 | |
436 | if (kvm_cpuid_has_hyperv(entries: e2, nent)) { |
437 | r = kvm_hv_vcpu_init(vcpu); |
438 | if (r) |
439 | return r; |
440 | } |
441 | |
442 | r = kvm_check_cpuid(vcpu, entries: e2, nent); |
443 | if (r) |
444 | return r; |
445 | |
446 | kvfree(addr: vcpu->arch.cpuid_entries); |
447 | vcpu->arch.cpuid_entries = e2; |
448 | vcpu->arch.cpuid_nent = nent; |
449 | |
450 | vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE); |
451 | #ifdef CONFIG_KVM_XEN |
452 | vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE); |
453 | #endif |
454 | kvm_vcpu_after_set_cpuid(vcpu); |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | /* when an old userspace process fills a new kernel module */ |
460 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
461 | struct kvm_cpuid *cpuid, |
462 | struct kvm_cpuid_entry __user *entries) |
463 | { |
464 | int r, i; |
465 | struct kvm_cpuid_entry *e = NULL; |
466 | struct kvm_cpuid_entry2 *e2 = NULL; |
467 | |
468 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
469 | return -E2BIG; |
470 | |
471 | if (cpuid->nent) { |
472 | e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent)); |
473 | if (IS_ERR(ptr: e)) |
474 | return PTR_ERR(ptr: e); |
475 | |
476 | e2 = kvmalloc_array(n: cpuid->nent, size: sizeof(*e2), GFP_KERNEL_ACCOUNT); |
477 | if (!e2) { |
478 | r = -ENOMEM; |
479 | goto out_free_cpuid; |
480 | } |
481 | } |
482 | for (i = 0; i < cpuid->nent; i++) { |
483 | e2[i].function = e[i].function; |
484 | e2[i].eax = e[i].eax; |
485 | e2[i].ebx = e[i].ebx; |
486 | e2[i].ecx = e[i].ecx; |
487 | e2[i].edx = e[i].edx; |
488 | e2[i].index = 0; |
489 | e2[i].flags = 0; |
490 | e2[i].padding[0] = 0; |
491 | e2[i].padding[1] = 0; |
492 | e2[i].padding[2] = 0; |
493 | } |
494 | |
495 | r = kvm_set_cpuid(vcpu, e2, nent: cpuid->nent); |
496 | if (r) |
497 | kvfree(addr: e2); |
498 | |
499 | out_free_cpuid: |
500 | kvfree(addr: e); |
501 | |
502 | return r; |
503 | } |
504 | |
505 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
506 | struct kvm_cpuid2 *cpuid, |
507 | struct kvm_cpuid_entry2 __user *entries) |
508 | { |
509 | struct kvm_cpuid_entry2 *e2 = NULL; |
510 | int r; |
511 | |
512 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
513 | return -E2BIG; |
514 | |
515 | if (cpuid->nent) { |
516 | e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent)); |
517 | if (IS_ERR(ptr: e2)) |
518 | return PTR_ERR(ptr: e2); |
519 | } |
520 | |
521 | r = kvm_set_cpuid(vcpu, e2, nent: cpuid->nent); |
522 | if (r) |
523 | kvfree(addr: e2); |
524 | |
525 | return r; |
526 | } |
527 | |
528 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
529 | struct kvm_cpuid2 *cpuid, |
530 | struct kvm_cpuid_entry2 __user *entries) |
531 | { |
532 | if (cpuid->nent < vcpu->arch.cpuid_nent) |
533 | return -E2BIG; |
534 | |
535 | if (copy_to_user(to: entries, from: vcpu->arch.cpuid_entries, |
536 | n: vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) |
537 | return -EFAULT; |
538 | |
539 | cpuid->nent = vcpu->arch.cpuid_nent; |
540 | return 0; |
541 | } |
542 | |
543 | /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ |
544 | static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) |
545 | { |
546 | const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature: leaf * 32); |
547 | struct kvm_cpuid_entry2 entry; |
548 | |
549 | reverse_cpuid_check(x86_leaf: leaf); |
550 | |
551 | cpuid_count(op: cpuid.function, count: cpuid.index, |
552 | eax: &entry.eax, ebx: &entry.ebx, ecx: &entry.ecx, edx: &entry.edx); |
553 | |
554 | kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(entry: &entry, reg: cpuid.reg); |
555 | } |
556 | |
557 | static __always_inline |
558 | void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask) |
559 | { |
560 | /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */ |
561 | BUILD_BUG_ON(leaf < NCAPINTS); |
562 | |
563 | kvm_cpu_caps[leaf] = mask; |
564 | |
565 | __kvm_cpu_cap_mask(leaf); |
566 | } |
567 | |
568 | static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) |
569 | { |
570 | /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */ |
571 | BUILD_BUG_ON(leaf >= NCAPINTS); |
572 | |
573 | kvm_cpu_caps[leaf] &= mask; |
574 | |
575 | __kvm_cpu_cap_mask(leaf); |
576 | } |
577 | |
578 | void kvm_set_cpu_caps(void) |
579 | { |
580 | #ifdef CONFIG_X86_64 |
581 | unsigned int f_gbpages = F(GBPAGES); |
582 | unsigned int f_lm = F(LM); |
583 | unsigned int f_xfd = F(XFD); |
584 | #else |
585 | unsigned int f_gbpages = 0; |
586 | unsigned int f_lm = 0; |
587 | unsigned int f_xfd = 0; |
588 | #endif |
589 | memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); |
590 | |
591 | BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > |
592 | sizeof(boot_cpu_data.x86_capability)); |
593 | |
594 | memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, |
595 | sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); |
596 | |
597 | kvm_cpu_cap_mask(leaf: CPUID_1_ECX, |
598 | /* |
599 | * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* |
600 | * advertised to guests via CPUID! |
601 | */ |
602 | F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | |
603 | 0 /* DS-CPL, VMX, SMX, EST */ | |
604 | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | |
605 | F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | |
606 | F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | |
607 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
608 | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | |
609 | F(F16C) | F(RDRAND) |
610 | ); |
611 | /* KVM emulates x2apic in software irrespective of host support. */ |
612 | kvm_cpu_cap_set(X86_FEATURE_X2APIC); |
613 | |
614 | kvm_cpu_cap_mask(leaf: CPUID_1_EDX, |
615 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
616 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
617 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | |
618 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
619 | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | |
620 | 0 /* Reserved, DS, ACPI */ | F(MMX) | |
621 | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | |
622 | 0 /* HTT, TM, Reserved, PBE */ |
623 | ); |
624 | |
625 | kvm_cpu_cap_mask(leaf: CPUID_7_0_EBX, |
626 | F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | |
627 | F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) | |
628 | F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) | |
629 | F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | |
630 | F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) | |
631 | F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) | |
632 | F(AVX512VL)); |
633 | |
634 | kvm_cpu_cap_mask(leaf: CPUID_7_ECX, |
635 | F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | |
636 | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | |
637 | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | |
638 | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | |
639 | F(SGX_LC) | F(BUS_LOCK_DETECT) |
640 | ); |
641 | /* Set LA57 based on hardware capability. */ |
642 | if (cpuid_ecx(op: 7) & F(LA57)) |
643 | kvm_cpu_cap_set(X86_FEATURE_LA57); |
644 | |
645 | /* |
646 | * PKU not yet implemented for shadow paging and requires OSPKE |
647 | * to be set on the host. Clear it if that is not the case |
648 | */ |
649 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) |
650 | kvm_cpu_cap_clear(X86_FEATURE_PKU); |
651 | |
652 | kvm_cpu_cap_mask(leaf: CPUID_7_EDX, |
653 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
654 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | |
655 | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | |
656 | F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | |
657 | F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) |
658 | ); |
659 | |
660 | /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ |
661 | kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST); |
662 | kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES); |
663 | |
664 | if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) |
665 | kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); |
666 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
667 | kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); |
668 | if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
669 | kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); |
670 | |
671 | kvm_cpu_cap_mask(leaf: CPUID_7_1_EAX, |
672 | F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | |
673 | F(FZRM) | F(FSRS) | F(FSRC) | |
674 | F(AMX_FP16) | F(AVX_IFMA) |
675 | ); |
676 | |
677 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_7_1_EDX, |
678 | F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) | |
679 | F(AMX_COMPLEX) |
680 | ); |
681 | |
682 | kvm_cpu_cap_mask(leaf: CPUID_D_1_EAX, |
683 | F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd |
684 | ); |
685 | |
686 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_12_EAX, |
687 | SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA) |
688 | ); |
689 | |
690 | kvm_cpu_cap_mask(leaf: CPUID_8000_0001_ECX, |
691 | F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | |
692 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
693 | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | |
694 | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | |
695 | F(TOPOEXT) | 0 /* PERFCTR_CORE */ |
696 | ); |
697 | |
698 | kvm_cpu_cap_mask(leaf: CPUID_8000_0001_EDX, |
699 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
700 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
701 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | |
702 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
703 | F(PAT) | F(PSE36) | 0 /* Reserved */ | |
704 | F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | |
705 | F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | |
706 | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) |
707 | ); |
708 | |
709 | if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) |
710 | kvm_cpu_cap_set(X86_FEATURE_GBPAGES); |
711 | |
712 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_8000_0007_EDX, |
713 | SF(CONSTANT_TSC) |
714 | ); |
715 | |
716 | kvm_cpu_cap_mask(leaf: CPUID_8000_0008_EBX, |
717 | F(CLZERO) | F(XSAVEERPTR) | |
718 | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | |
719 | F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | |
720 | F(AMD_PSFD) |
721 | ); |
722 | |
723 | /* |
724 | * AMD has separate bits for each SPEC_CTRL bit. |
725 | * arch/x86/kernel/cpu/bugs.c is kind enough to |
726 | * record that in cpufeatures so use them. |
727 | */ |
728 | if (boot_cpu_has(X86_FEATURE_IBPB)) |
729 | kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); |
730 | if (boot_cpu_has(X86_FEATURE_IBRS)) |
731 | kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); |
732 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
733 | kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); |
734 | if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) |
735 | kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); |
736 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
737 | kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); |
738 | /* |
739 | * The preference is to use SPEC CTRL MSR instead of the |
740 | * VIRT_SPEC MSR. |
741 | */ |
742 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
743 | !boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
744 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
745 | |
746 | /* |
747 | * Hide all SVM features by default, SVM will set the cap bits for |
748 | * features it emulates and/or exposes for L1. |
749 | */ |
750 | kvm_cpu_cap_mask(leaf: CPUID_8000_000A_EDX, mask: 0); |
751 | |
752 | kvm_cpu_cap_mask(leaf: CPUID_8000_001F_EAX, |
753 | mask: 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | |
754 | F(SME_COHERENT)); |
755 | |
756 | kvm_cpu_cap_mask(leaf: CPUID_8000_0021_EAX, |
757 | F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | |
758 | F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | |
759 | F(WRMSR_XX_BASE_NS) |
760 | ); |
761 | |
762 | kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB); |
763 | kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE); |
764 | kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO); |
765 | |
766 | kvm_cpu_cap_init_kvm_defined(leaf: CPUID_8000_0022_EAX, |
767 | F(PERFMON_V2) |
768 | ); |
769 | |
770 | /* |
771 | * Synthesize "LFENCE is serializing" into the AMD-defined entry in |
772 | * KVM's supported CPUID if the feature is reported as supported by the |
773 | * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long |
774 | * before AMD joined the bandwagon, e.g. LFENCE is serializing on most |
775 | * CPUs that support SSE2. On CPUs that don't support AMD's leaf, |
776 | * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing |
777 | * the mask with the raw host CPUID, and reporting support in AMD's |
778 | * leaf can make it easier for userspace to detect the feature. |
779 | */ |
780 | if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) |
781 | kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC); |
782 | if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) |
783 | kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); |
784 | kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR); |
785 | |
786 | kvm_cpu_cap_mask(leaf: CPUID_C000_0001_EDX, |
787 | F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | |
788 | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | |
789 | F(PMM) | F(PMM_EN) |
790 | ); |
791 | |
792 | /* |
793 | * Hide RDTSCP and RDPID if either feature is reported as supported but |
794 | * probing MSR_TSC_AUX failed. This is purely a sanity check and |
795 | * should never happen, but the guest will likely crash if RDTSCP or |
796 | * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in |
797 | * the past. For example, the sanity check may fire if this instance of |
798 | * KVM is running as L1 on top of an older, broken KVM. |
799 | */ |
800 | if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || |
801 | kvm_cpu_cap_has(X86_FEATURE_RDPID)) && |
802 | !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { |
803 | kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); |
804 | kvm_cpu_cap_clear(X86_FEATURE_RDPID); |
805 | } |
806 | } |
807 | EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); |
808 | |
809 | struct kvm_cpuid_array { |
810 | struct kvm_cpuid_entry2 *entries; |
811 | int maxnent; |
812 | int nent; |
813 | }; |
814 | |
815 | static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) |
816 | { |
817 | if (array->nent >= array->maxnent) |
818 | return NULL; |
819 | |
820 | return &array->entries[array->nent++]; |
821 | } |
822 | |
823 | static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, |
824 | u32 function, u32 index) |
825 | { |
826 | struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); |
827 | |
828 | if (!entry) |
829 | return NULL; |
830 | |
831 | memset(entry, 0, sizeof(*entry)); |
832 | entry->function = function; |
833 | entry->index = index; |
834 | switch (function & 0xC0000000) { |
835 | case 0x40000000: |
836 | /* Hypervisor leaves are always synthesized by __do_cpuid_func. */ |
837 | return entry; |
838 | |
839 | case 0x80000000: |
840 | /* |
841 | * 0x80000021 is sometimes synthesized by __do_cpuid_func, which |
842 | * would result in out-of-bounds calls to do_host_cpuid. |
843 | */ |
844 | { |
845 | static int max_cpuid_80000000; |
846 | if (!READ_ONCE(max_cpuid_80000000)) |
847 | WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); |
848 | if (function > READ_ONCE(max_cpuid_80000000)) |
849 | return entry; |
850 | } |
851 | break; |
852 | |
853 | default: |
854 | break; |
855 | } |
856 | |
857 | cpuid_count(op: entry->function, count: entry->index, |
858 | eax: &entry->eax, ebx: &entry->ebx, ecx: &entry->ecx, edx: &entry->edx); |
859 | |
860 | if (cpuid_function_is_indexed(function)) |
861 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
862 | |
863 | return entry; |
864 | } |
865 | |
866 | static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) |
867 | { |
868 | struct kvm_cpuid_entry2 *entry; |
869 | |
870 | if (array->nent >= array->maxnent) |
871 | return -E2BIG; |
872 | |
873 | entry = &array->entries[array->nent]; |
874 | entry->function = func; |
875 | entry->index = 0; |
876 | entry->flags = 0; |
877 | |
878 | switch (func) { |
879 | case 0: |
880 | entry->eax = 7; |
881 | ++array->nent; |
882 | break; |
883 | case 1: |
884 | entry->ecx = F(MOVBE); |
885 | ++array->nent; |
886 | break; |
887 | case 7: |
888 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
889 | entry->eax = 0; |
890 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) |
891 | entry->ecx = F(RDPID); |
892 | ++array->nent; |
893 | break; |
894 | default: |
895 | break; |
896 | } |
897 | |
898 | return 0; |
899 | } |
900 | |
901 | static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) |
902 | { |
903 | struct kvm_cpuid_entry2 *entry; |
904 | int r, i, max_idx; |
905 | |
906 | /* all calls to cpuid_count() should be made on the same cpu */ |
907 | get_cpu(); |
908 | |
909 | r = -E2BIG; |
910 | |
911 | entry = do_host_cpuid(array, function, index: 0); |
912 | if (!entry) |
913 | goto out; |
914 | |
915 | switch (function) { |
916 | case 0: |
917 | /* Limited to the highest leaf implemented in KVM. */ |
918 | entry->eax = min(entry->eax, 0x1fU); |
919 | break; |
920 | case 1: |
921 | cpuid_entry_override(entry, leaf: CPUID_1_EDX); |
922 | cpuid_entry_override(entry, leaf: CPUID_1_ECX); |
923 | break; |
924 | case 2: |
925 | /* |
926 | * On ancient CPUs, function 2 entries are STATEFUL. That is, |
927 | * CPUID(function=2, index=0) may return different results each |
928 | * time, with the least-significant byte in EAX enumerating the |
929 | * number of times software should do CPUID(2, 0). |
930 | * |
931 | * Modern CPUs, i.e. every CPU KVM has *ever* run on are less |
932 | * idiotic. Intel's SDM states that EAX & 0xff "will always |
933 | * return 01H. Software should ignore this value and not |
934 | * interpret it as an informational descriptor", while AMD's |
935 | * APM states that CPUID(2) is reserved. |
936 | * |
937 | * WARN if a frankenstein CPU that supports virtualization and |
938 | * a stateful CPUID.0x2 is encountered. |
939 | */ |
940 | WARN_ON_ONCE((entry->eax & 0xff) > 1); |
941 | break; |
942 | /* functions 4 and 0x8000001d have additional index. */ |
943 | case 4: |
944 | case 0x8000001d: |
945 | /* |
946 | * Read entries until the cache type in the previous entry is |
947 | * zero, i.e. indicates an invalid entry. |
948 | */ |
949 | for (i = 1; entry->eax & 0x1f; ++i) { |
950 | entry = do_host_cpuid(array, function, index: i); |
951 | if (!entry) |
952 | goto out; |
953 | } |
954 | break; |
955 | case 6: /* Thermal management */ |
956 | entry->eax = 0x4; /* allow ARAT */ |
957 | entry->ebx = 0; |
958 | entry->ecx = 0; |
959 | entry->edx = 0; |
960 | break; |
961 | /* function 7 has additional index. */ |
962 | case 7: |
963 | entry->eax = min(entry->eax, 1u); |
964 | cpuid_entry_override(entry, leaf: CPUID_7_0_EBX); |
965 | cpuid_entry_override(entry, leaf: CPUID_7_ECX); |
966 | cpuid_entry_override(entry, leaf: CPUID_7_EDX); |
967 | |
968 | /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ |
969 | if (entry->eax == 1) { |
970 | entry = do_host_cpuid(array, function, index: 1); |
971 | if (!entry) |
972 | goto out; |
973 | |
974 | cpuid_entry_override(entry, leaf: CPUID_7_1_EAX); |
975 | cpuid_entry_override(entry, leaf: CPUID_7_1_EDX); |
976 | entry->ebx = 0; |
977 | entry->ecx = 0; |
978 | } |
979 | break; |
980 | case 0xa: { /* Architectural Performance Monitoring */ |
981 | union cpuid10_eax eax; |
982 | union cpuid10_edx edx; |
983 | |
984 | if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { |
985 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
986 | break; |
987 | } |
988 | |
989 | eax.split.version_id = kvm_pmu_cap.version; |
990 | eax.split.num_counters = kvm_pmu_cap.num_counters_gp; |
991 | eax.split.bit_width = kvm_pmu_cap.bit_width_gp; |
992 | eax.split.mask_length = kvm_pmu_cap.events_mask_len; |
993 | edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed; |
994 | edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed; |
995 | |
996 | if (kvm_pmu_cap.version) |
997 | edx.split.anythread_deprecated = 1; |
998 | edx.split.reserved1 = 0; |
999 | edx.split.reserved2 = 0; |
1000 | |
1001 | entry->eax = eax.full; |
1002 | entry->ebx = kvm_pmu_cap.events_mask; |
1003 | entry->ecx = 0; |
1004 | entry->edx = edx.full; |
1005 | break; |
1006 | } |
1007 | case 0x1f: |
1008 | case 0xb: |
1009 | /* |
1010 | * No topology; a valid topology is indicated by the presence |
1011 | * of subleaf 1. |
1012 | */ |
1013 | entry->eax = entry->ebx = entry->ecx = 0; |
1014 | break; |
1015 | case 0xd: { |
1016 | u64 permitted_xcr0 = kvm_get_filtered_xcr0(); |
1017 | u64 permitted_xss = kvm_caps.supported_xss; |
1018 | |
1019 | entry->eax &= permitted_xcr0; |
1020 | entry->ebx = xstate_required_size(xstate_bv: permitted_xcr0, compacted: false); |
1021 | entry->ecx = entry->ebx; |
1022 | entry->edx &= permitted_xcr0 >> 32; |
1023 | if (!permitted_xcr0) |
1024 | break; |
1025 | |
1026 | entry = do_host_cpuid(array, function, index: 1); |
1027 | if (!entry) |
1028 | goto out; |
1029 | |
1030 | cpuid_entry_override(entry, leaf: CPUID_D_1_EAX); |
1031 | if (entry->eax & (F(XSAVES)|F(XSAVEC))) |
1032 | entry->ebx = xstate_required_size(xstate_bv: permitted_xcr0 | permitted_xss, |
1033 | compacted: true); |
1034 | else { |
1035 | WARN_ON_ONCE(permitted_xss != 0); |
1036 | entry->ebx = 0; |
1037 | } |
1038 | entry->ecx &= permitted_xss; |
1039 | entry->edx &= permitted_xss >> 32; |
1040 | |
1041 | for (i = 2; i < 64; ++i) { |
1042 | bool s_state; |
1043 | if (permitted_xcr0 & BIT_ULL(i)) |
1044 | s_state = false; |
1045 | else if (permitted_xss & BIT_ULL(i)) |
1046 | s_state = true; |
1047 | else |
1048 | continue; |
1049 | |
1050 | entry = do_host_cpuid(array, function, index: i); |
1051 | if (!entry) |
1052 | goto out; |
1053 | |
1054 | /* |
1055 | * The supported check above should have filtered out |
1056 | * invalid sub-leafs. Only valid sub-leafs should |
1057 | * reach this point, and they should have a non-zero |
1058 | * save state size. Furthermore, check whether the |
1059 | * processor agrees with permitted_xcr0/permitted_xss |
1060 | * on whether this is an XCR0- or IA32_XSS-managed area. |
1061 | */ |
1062 | if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { |
1063 | --array->nent; |
1064 | continue; |
1065 | } |
1066 | |
1067 | if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) |
1068 | entry->ecx &= ~BIT_ULL(2); |
1069 | entry->edx = 0; |
1070 | } |
1071 | break; |
1072 | } |
1073 | case 0x12: |
1074 | /* Intel SGX */ |
1075 | if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { |
1076 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1077 | break; |
1078 | } |
1079 | |
1080 | /* |
1081 | * Index 0: Sub-features, MISCSELECT (a.k.a extended features) |
1082 | * and max enclave sizes. The SGX sub-features and MISCSELECT |
1083 | * are restricted by kernel and KVM capabilities (like most |
1084 | * feature flags), while enclave size is unrestricted. |
1085 | */ |
1086 | cpuid_entry_override(entry, leaf: CPUID_12_EAX); |
1087 | entry->ebx &= SGX_MISC_EXINFO; |
1088 | |
1089 | entry = do_host_cpuid(array, function, index: 1); |
1090 | if (!entry) |
1091 | goto out; |
1092 | |
1093 | /* |
1094 | * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la |
1095 | * feature flags. Advertise all supported flags, including |
1096 | * privileged attributes that require explicit opt-in from |
1097 | * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is |
1098 | * expected to derive it from supported XCR0. |
1099 | */ |
1100 | entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK; |
1101 | entry->ebx &= 0; |
1102 | break; |
1103 | /* Intel PT */ |
1104 | case 0x14: |
1105 | if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { |
1106 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1107 | break; |
1108 | } |
1109 | |
1110 | for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { |
1111 | if (!do_host_cpuid(array, function, index: i)) |
1112 | goto out; |
1113 | } |
1114 | break; |
1115 | /* Intel AMX TILE */ |
1116 | case 0x1d: |
1117 | if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { |
1118 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1119 | break; |
1120 | } |
1121 | |
1122 | for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { |
1123 | if (!do_host_cpuid(array, function, index: i)) |
1124 | goto out; |
1125 | } |
1126 | break; |
1127 | case 0x1e: /* TMUL information */ |
1128 | if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { |
1129 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1130 | break; |
1131 | } |
1132 | break; |
1133 | case KVM_CPUID_SIGNATURE: { |
1134 | const u32 *sigptr = (const u32 *)KVM_SIGNATURE; |
1135 | entry->eax = KVM_CPUID_FEATURES; |
1136 | entry->ebx = sigptr[0]; |
1137 | entry->ecx = sigptr[1]; |
1138 | entry->edx = sigptr[2]; |
1139 | break; |
1140 | } |
1141 | case KVM_CPUID_FEATURES: |
1142 | entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | |
1143 | (1 << KVM_FEATURE_NOP_IO_DELAY) | |
1144 | (1 << KVM_FEATURE_CLOCKSOURCE2) | |
1145 | (1 << KVM_FEATURE_ASYNC_PF) | |
1146 | (1 << KVM_FEATURE_PV_EOI) | |
1147 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
1148 | (1 << KVM_FEATURE_PV_UNHALT) | |
1149 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
1150 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | |
1151 | (1 << KVM_FEATURE_PV_SEND_IPI) | |
1152 | (1 << KVM_FEATURE_POLL_CONTROL) | |
1153 | (1 << KVM_FEATURE_PV_SCHED_YIELD) | |
1154 | (1 << KVM_FEATURE_ASYNC_PF_INT); |
1155 | |
1156 | if (sched_info_on()) |
1157 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
1158 | |
1159 | entry->ebx = 0; |
1160 | entry->ecx = 0; |
1161 | entry->edx = 0; |
1162 | break; |
1163 | case 0x80000000: |
1164 | entry->eax = min(entry->eax, 0x80000022); |
1165 | /* |
1166 | * Serializing LFENCE is reported in a multitude of ways, and |
1167 | * NullSegClearsBase is not reported in CPUID on Zen2; help |
1168 | * userspace by providing the CPUID leaf ourselves. |
1169 | * |
1170 | * However, only do it if the host has CPUID leaf 0x8000001d. |
1171 | * QEMU thinks that it can query the host blindly for that |
1172 | * CPUID leaf if KVM reports that it supports 0x8000001d or |
1173 | * above. The processor merrily returns values from the |
1174 | * highest Intel leaf which QEMU tries to use as the guest's |
1175 | * 0x8000001d. Even worse, this can result in an infinite |
1176 | * loop if said highest leaf has no subleaves indexed by ECX. |
1177 | */ |
1178 | if (entry->eax >= 0x8000001d && |
1179 | (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) |
1180 | || !static_cpu_has_bug(X86_BUG_NULL_SEG))) |
1181 | entry->eax = max(entry->eax, 0x80000021); |
1182 | break; |
1183 | case 0x80000001: |
1184 | entry->ebx &= ~GENMASK(27, 16); |
1185 | cpuid_entry_override(entry, leaf: CPUID_8000_0001_EDX); |
1186 | cpuid_entry_override(entry, leaf: CPUID_8000_0001_ECX); |
1187 | break; |
1188 | case 0x80000005: |
1189 | /* Pass host L1 cache and TLB info. */ |
1190 | break; |
1191 | case 0x80000006: |
1192 | /* Drop reserved bits, pass host L2 cache and TLB info. */ |
1193 | entry->edx &= ~GENMASK(17, 16); |
1194 | break; |
1195 | case 0x80000007: /* Advanced power management */ |
1196 | cpuid_entry_override(entry, leaf: CPUID_8000_0007_EDX); |
1197 | |
1198 | /* mask against host */ |
1199 | entry->edx &= boot_cpu_data.x86_power; |
1200 | entry->eax = entry->ebx = entry->ecx = 0; |
1201 | break; |
1202 | case 0x80000008: { |
1203 | unsigned g_phys_as = (entry->eax >> 16) & 0xff; |
1204 | unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); |
1205 | unsigned phys_as = entry->eax & 0xff; |
1206 | |
1207 | /* |
1208 | * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as |
1209 | * the guest operates in the same PA space as the host, i.e. |
1210 | * reductions in MAXPHYADDR for memory encryption affect shadow |
1211 | * paging, too. |
1212 | * |
1213 | * If TDP is enabled but an explicit guest MAXPHYADDR is not |
1214 | * provided, use the raw bare metal MAXPHYADDR as reductions to |
1215 | * the HPAs do not affect GPAs. |
1216 | */ |
1217 | if (!tdp_enabled) |
1218 | g_phys_as = boot_cpu_data.x86_phys_bits; |
1219 | else if (!g_phys_as) |
1220 | g_phys_as = phys_as; |
1221 | |
1222 | entry->eax = g_phys_as | (virt_as << 8); |
1223 | entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); |
1224 | entry->edx = 0; |
1225 | cpuid_entry_override(entry, leaf: CPUID_8000_0008_EBX); |
1226 | break; |
1227 | } |
1228 | case 0x8000000A: |
1229 | if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { |
1230 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1231 | break; |
1232 | } |
1233 | entry->eax = 1; /* SVM revision 1 */ |
1234 | entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper |
1235 | ASID emulation to nested SVM */ |
1236 | entry->ecx = 0; /* Reserved */ |
1237 | cpuid_entry_override(entry, leaf: CPUID_8000_000A_EDX); |
1238 | break; |
1239 | case 0x80000019: |
1240 | entry->ecx = entry->edx = 0; |
1241 | break; |
1242 | case 0x8000001a: |
1243 | entry->eax &= GENMASK(2, 0); |
1244 | entry->ebx = entry->ecx = entry->edx = 0; |
1245 | break; |
1246 | case 0x8000001e: |
1247 | /* Do not return host topology information. */ |
1248 | entry->eax = entry->ebx = entry->ecx = 0; |
1249 | entry->edx = 0; /* reserved */ |
1250 | break; |
1251 | case 0x8000001F: |
1252 | if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { |
1253 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1254 | } else { |
1255 | cpuid_entry_override(entry, leaf: CPUID_8000_001F_EAX); |
1256 | /* Clear NumVMPL since KVM does not support VMPL. */ |
1257 | entry->ebx &= ~GENMASK(31, 12); |
1258 | /* |
1259 | * Enumerate '0' for "PA bits reduction", the adjusted |
1260 | * MAXPHYADDR is enumerated directly (see 0x80000008). |
1261 | */ |
1262 | entry->ebx &= ~GENMASK(11, 6); |
1263 | } |
1264 | break; |
1265 | case 0x80000020: |
1266 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1267 | break; |
1268 | case 0x80000021: |
1269 | entry->ebx = entry->ecx = entry->edx = 0; |
1270 | cpuid_entry_override(entry, leaf: CPUID_8000_0021_EAX); |
1271 | break; |
1272 | /* AMD Extended Performance Monitoring and Debug */ |
1273 | case 0x80000022: { |
1274 | union cpuid_0x80000022_ebx ebx; |
1275 | |
1276 | entry->ecx = entry->edx = 0; |
1277 | if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { |
1278 | entry->eax = entry->ebx; |
1279 | break; |
1280 | } |
1281 | |
1282 | cpuid_entry_override(entry, leaf: CPUID_8000_0022_EAX); |
1283 | |
1284 | if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) |
1285 | ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp; |
1286 | else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) |
1287 | ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE; |
1288 | else |
1289 | ebx.split.num_core_pmc = AMD64_NUM_COUNTERS; |
1290 | |
1291 | entry->ebx = ebx.full; |
1292 | break; |
1293 | } |
1294 | /*Add support for Centaur's CPUID instruction*/ |
1295 | case 0xC0000000: |
1296 | /*Just support up to 0xC0000004 now*/ |
1297 | entry->eax = min(entry->eax, 0xC0000004); |
1298 | break; |
1299 | case 0xC0000001: |
1300 | cpuid_entry_override(entry, leaf: CPUID_C000_0001_EDX); |
1301 | break; |
1302 | case 3: /* Processor serial number */ |
1303 | case 5: /* MONITOR/MWAIT */ |
1304 | case 0xC0000002: |
1305 | case 0xC0000003: |
1306 | case 0xC0000004: |
1307 | default: |
1308 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
1309 | break; |
1310 | } |
1311 | |
1312 | r = 0; |
1313 | |
1314 | out: |
1315 | put_cpu(); |
1316 | |
1317 | return r; |
1318 | } |
1319 | |
1320 | static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, |
1321 | unsigned int type) |
1322 | { |
1323 | if (type == KVM_GET_EMULATED_CPUID) |
1324 | return __do_cpuid_func_emulated(array, func); |
1325 | |
1326 | return __do_cpuid_func(array, function: func); |
1327 | } |
1328 | |
1329 | #define CENTAUR_CPUID_SIGNATURE 0xC0000000 |
1330 | |
1331 | static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, |
1332 | unsigned int type) |
1333 | { |
1334 | u32 limit; |
1335 | int r; |
1336 | |
1337 | if (func == CENTAUR_CPUID_SIGNATURE && |
1338 | boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) |
1339 | return 0; |
1340 | |
1341 | r = do_cpuid_func(array, func, type); |
1342 | if (r) |
1343 | return r; |
1344 | |
1345 | limit = array->entries[array->nent - 1].eax; |
1346 | for (func = func + 1; func <= limit; ++func) { |
1347 | r = do_cpuid_func(array, func, type); |
1348 | if (r) |
1349 | break; |
1350 | } |
1351 | |
1352 | return r; |
1353 | } |
1354 | |
1355 | static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, |
1356 | __u32 num_entries, unsigned int ioctl_type) |
1357 | { |
1358 | int i; |
1359 | __u32 pad[3]; |
1360 | |
1361 | if (ioctl_type != KVM_GET_EMULATED_CPUID) |
1362 | return false; |
1363 | |
1364 | /* |
1365 | * We want to make sure that ->padding is being passed clean from |
1366 | * userspace in case we want to use it for something in the future. |
1367 | * |
1368 | * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we |
1369 | * have to give ourselves satisfied only with the emulated side. /me |
1370 | * sheds a tear. |
1371 | */ |
1372 | for (i = 0; i < num_entries; i++) { |
1373 | if (copy_from_user(to: pad, from: entries[i].padding, n: sizeof(pad))) |
1374 | return true; |
1375 | |
1376 | if (pad[0] || pad[1] || pad[2]) |
1377 | return true; |
1378 | } |
1379 | return false; |
1380 | } |
1381 | |
1382 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
1383 | struct kvm_cpuid_entry2 __user *entries, |
1384 | unsigned int type) |
1385 | { |
1386 | static const u32 funcs[] = { |
1387 | 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, |
1388 | }; |
1389 | |
1390 | struct kvm_cpuid_array array = { |
1391 | .nent = 0, |
1392 | }; |
1393 | int r, i; |
1394 | |
1395 | if (cpuid->nent < 1) |
1396 | return -E2BIG; |
1397 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
1398 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; |
1399 | |
1400 | if (sanity_check_entries(entries, num_entries: cpuid->nent, ioctl_type: type)) |
1401 | return -EINVAL; |
1402 | |
1403 | array.entries = kvcalloc(n: cpuid->nent, size: sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); |
1404 | if (!array.entries) |
1405 | return -ENOMEM; |
1406 | |
1407 | array.maxnent = cpuid->nent; |
1408 | |
1409 | for (i = 0; i < ARRAY_SIZE(funcs); i++) { |
1410 | r = get_cpuid_func(array: &array, func: funcs[i], type); |
1411 | if (r) |
1412 | goto out_free; |
1413 | } |
1414 | cpuid->nent = array.nent; |
1415 | |
1416 | if (copy_to_user(to: entries, from: array.entries, |
1417 | n: array.nent * sizeof(struct kvm_cpuid_entry2))) |
1418 | r = -EFAULT; |
1419 | |
1420 | out_free: |
1421 | kvfree(addr: array.entries); |
1422 | return r; |
1423 | } |
1424 | |
1425 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, |
1426 | u32 function, u32 index) |
1427 | { |
1428 | return cpuid_entry2_find(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent, |
1429 | function, index); |
1430 | } |
1431 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); |
1432 | |
1433 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
1434 | u32 function) |
1435 | { |
1436 | return cpuid_entry2_find(entries: vcpu->arch.cpuid_entries, nent: vcpu->arch.cpuid_nent, |
1437 | function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); |
1438 | } |
1439 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); |
1440 | |
1441 | /* |
1442 | * Intel CPUID semantics treats any query for an out-of-range leaf as if the |
1443 | * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics |
1444 | * returns all zeroes for any undefined leaf, whether or not the leaf is in |
1445 | * range. Centaur/VIA follows Intel semantics. |
1446 | * |
1447 | * A leaf is considered out-of-range if its function is higher than the maximum |
1448 | * supported leaf of its associated class or if its associated class does not |
1449 | * exist. |
1450 | * |
1451 | * There are three primary classes to be considered, with their respective |
1452 | * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary |
1453 | * class exists if a guest CPUID entry for its <base> leaf exists. For a given |
1454 | * class, CPUID.<base>.EAX contains the max supported leaf for the class. |
1455 | * |
1456 | * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff |
1457 | * - Hypervisor: 0x40000000 - 0x4fffffff |
1458 | * - Extended: 0x80000000 - 0xbfffffff |
1459 | * - Centaur: 0xc0000000 - 0xcfffffff |
1460 | * |
1461 | * The Hypervisor class is further subdivided into sub-classes that each act as |
1462 | * their own independent class associated with a 0x100 byte range. E.g. if Qemu |
1463 | * is advertising support for both HyperV and KVM, the resulting Hypervisor |
1464 | * CPUID sub-classes are: |
1465 | * |
1466 | * - HyperV: 0x40000000 - 0x400000ff |
1467 | * - KVM: 0x40000100 - 0x400001ff |
1468 | */ |
1469 | static struct kvm_cpuid_entry2 * |
1470 | get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) |
1471 | { |
1472 | struct kvm_cpuid_entry2 *basic, *class; |
1473 | u32 function = *fn_ptr; |
1474 | |
1475 | basic = kvm_find_cpuid_entry(vcpu, 0); |
1476 | if (!basic) |
1477 | return NULL; |
1478 | |
1479 | if (is_guest_vendor_amd(ebx: basic->ebx, ecx: basic->ecx, edx: basic->edx) || |
1480 | is_guest_vendor_hygon(ebx: basic->ebx, ecx: basic->ecx, edx: basic->edx)) |
1481 | return NULL; |
1482 | |
1483 | if (function >= 0x40000000 && function <= 0x4fffffff) |
1484 | class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); |
1485 | else if (function >= 0xc0000000) |
1486 | class = kvm_find_cpuid_entry(vcpu, 0xc0000000); |
1487 | else |
1488 | class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); |
1489 | |
1490 | if (class && function <= class->eax) |
1491 | return NULL; |
1492 | |
1493 | /* |
1494 | * Leaf specific adjustments are also applied when redirecting to the |
1495 | * max basic entry, e.g. if the max basic leaf is 0xb but there is no |
1496 | * entry for CPUID.0xb.index (see below), then the output value for EDX |
1497 | * needs to be pulled from CPUID.0xb.1. |
1498 | */ |
1499 | *fn_ptr = basic->eax; |
1500 | |
1501 | /* |
1502 | * The class does not exist or the requested function is out of range; |
1503 | * the effective CPUID entry is the max basic leaf. Note, the index of |
1504 | * the original requested leaf is observed! |
1505 | */ |
1506 | return kvm_find_cpuid_entry_index(vcpu, basic->eax, index); |
1507 | } |
1508 | |
1509 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
1510 | u32 *ecx, u32 *edx, bool exact_only) |
1511 | { |
1512 | u32 orig_function = *eax, function = *eax, index = *ecx; |
1513 | struct kvm_cpuid_entry2 *entry; |
1514 | bool exact, used_max_basic = false; |
1515 | |
1516 | entry = kvm_find_cpuid_entry_index(vcpu, function, index); |
1517 | exact = !!entry; |
1518 | |
1519 | if (!entry && !exact_only) { |
1520 | entry = get_out_of_range_cpuid_entry(vcpu, fn_ptr: &function, index); |
1521 | used_max_basic = !!entry; |
1522 | } |
1523 | |
1524 | if (entry) { |
1525 | *eax = entry->eax; |
1526 | *ebx = entry->ebx; |
1527 | *ecx = entry->ecx; |
1528 | *edx = entry->edx; |
1529 | if (function == 7 && index == 0) { |
1530 | u64 data; |
1531 | if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, data: &data, host_initiated: true) && |
1532 | (data & TSX_CTRL_CPUID_CLEAR)) |
1533 | *ebx &= ~(F(RTM) | F(HLE)); |
1534 | } else if (function == 0x80000007) { |
1535 | if (kvm_hv_invtsc_suppressed(vcpu)) |
1536 | *edx &= ~SF(CONSTANT_TSC); |
1537 | } |
1538 | } else { |
1539 | *eax = *ebx = *ecx = *edx = 0; |
1540 | /* |
1541 | * When leaf 0BH or 1FH is defined, CL is pass-through |
1542 | * and EDX is always the x2APIC ID, even for undefined |
1543 | * subleaves. Index 1 will exist iff the leaf is |
1544 | * implemented, so we pass through CL iff leaf 1 |
1545 | * exists. EDX can be copied from any existing index. |
1546 | */ |
1547 | if (function == 0xb || function == 0x1f) { |
1548 | entry = kvm_find_cpuid_entry_index(vcpu, function, 1); |
1549 | if (entry) { |
1550 | *ecx = index & 0xff; |
1551 | *edx = entry->edx; |
1552 | } |
1553 | } |
1554 | } |
1555 | trace_kvm_cpuid(function: orig_function, index, rax: *eax, rbx: *ebx, rcx: *ecx, rdx: *edx, found: exact, |
1556 | used_max_basic); |
1557 | return exact; |
1558 | } |
1559 | EXPORT_SYMBOL_GPL(kvm_cpuid); |
1560 | |
1561 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) |
1562 | { |
1563 | u32 eax, ebx, ecx, edx; |
1564 | |
1565 | if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, required_cpl: 0)) |
1566 | return 1; |
1567 | |
1568 | eax = kvm_rax_read(vcpu); |
1569 | ecx = kvm_rcx_read(vcpu); |
1570 | kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); |
1571 | kvm_rax_write(vcpu, val: eax); |
1572 | kvm_rbx_write(vcpu, val: ebx); |
1573 | kvm_rcx_write(vcpu, val: ecx); |
1574 | kvm_rdx_write(vcpu, val: edx); |
1575 | return kvm_skip_emulated_instruction(vcpu); |
1576 | } |
1577 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
1578 | |