1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * cpuid support routines |
4 | * |
5 | * derived from arch/x86/kvm/x86.c |
6 | * |
7 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
8 | * Copyright IBM Corporation, 2008 |
9 | * |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See |
11 | * the COPYING file in the top-level directory. |
12 | * |
13 | */ |
14 | |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/export.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/sched/stat.h> |
20 | |
21 | #include <asm/processor.h> |
22 | #include <asm/user.h> |
23 | #include <asm/fpu/xstate.h> |
24 | #include "cpuid.h" |
25 | #include "lapic.h" |
26 | #include "mmu.h" |
27 | #include "trace.h" |
28 | #include "pmu.h" |
29 | |
30 | static u32 xstate_required_size(u64 xstate_bv, bool compacted) |
31 | { |
32 | int feature_bit = 0; |
33 | u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
34 | |
35 | xstate_bv &= XFEATURE_MASK_EXTEND; |
36 | while (xstate_bv) { |
37 | if (xstate_bv & 0x1) { |
38 | u32 eax, ebx, ecx, edx, offset; |
39 | cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); |
40 | offset = compacted ? ret : ebx; |
41 | ret = max(ret, offset + eax); |
42 | } |
43 | |
44 | xstate_bv >>= 1; |
45 | feature_bit++; |
46 | } |
47 | |
48 | return ret; |
49 | } |
50 | |
51 | bool kvm_mpx_supported(void) |
52 | { |
53 | return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) |
54 | && kvm_x86_ops->mpx_supported()); |
55 | } |
56 | EXPORT_SYMBOL_GPL(kvm_mpx_supported); |
57 | |
58 | u64 kvm_supported_xcr0(void) |
59 | { |
60 | u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; |
61 | |
62 | if (!kvm_mpx_supported()) |
63 | xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
64 | |
65 | return xcr0; |
66 | } |
67 | |
68 | #define F(x) bit(X86_FEATURE_##x) |
69 | |
70 | int kvm_update_cpuid(struct kvm_vcpu *vcpu) |
71 | { |
72 | struct kvm_cpuid_entry2 *best; |
73 | struct kvm_lapic *apic = vcpu->arch.apic; |
74 | |
75 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
76 | if (!best) |
77 | return 0; |
78 | |
79 | /* Update OSXSAVE bit */ |
80 | if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) { |
81 | best->ecx &= ~F(OSXSAVE); |
82 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) |
83 | best->ecx |= F(OSXSAVE); |
84 | } |
85 | |
86 | best->edx &= ~F(APIC); |
87 | if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE) |
88 | best->edx |= F(APIC); |
89 | |
90 | if (apic) { |
91 | if (best->ecx & F(TSC_DEADLINE_TIMER)) |
92 | apic->lapic_timer.timer_mode_mask = 3 << 17; |
93 | else |
94 | apic->lapic_timer.timer_mode_mask = 1 << 17; |
95 | } |
96 | |
97 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
98 | if (best) { |
99 | /* Update OSPKE bit */ |
100 | if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) { |
101 | best->ecx &= ~F(OSPKE); |
102 | if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) |
103 | best->ecx |= F(OSPKE); |
104 | } |
105 | } |
106 | |
107 | best = kvm_find_cpuid_entry(vcpu, 0xD, 0); |
108 | if (!best) { |
109 | vcpu->arch.guest_supported_xcr0 = 0; |
110 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
111 | } else { |
112 | vcpu->arch.guest_supported_xcr0 = |
113 | (best->eax | ((u64)best->edx << 32)) & |
114 | kvm_supported_xcr0(); |
115 | vcpu->arch.guest_xstate_size = best->ebx = |
116 | xstate_required_size(vcpu->arch.xcr0, false); |
117 | } |
118 | |
119 | best = kvm_find_cpuid_entry(vcpu, 0xD, 1); |
120 | if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) |
121 | best->ebx = xstate_required_size(vcpu->arch.xcr0, true); |
122 | |
123 | /* |
124 | * The existing code assumes virtual address is 48-bit or 57-bit in the |
125 | * canonical address checks; exit if it is ever changed. |
126 | */ |
127 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
128 | if (best) { |
129 | int vaddr_bits = (best->eax & 0xff00) >> 8; |
130 | |
131 | if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) |
132 | return -EINVAL; |
133 | } |
134 | |
135 | best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); |
136 | if (kvm_hlt_in_guest(vcpu->kvm) && best && |
137 | (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) |
138 | best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); |
139 | |
140 | /* Update physical-address width */ |
141 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); |
142 | kvm_mmu_reset_context(vcpu); |
143 | |
144 | kvm_pmu_refresh(vcpu); |
145 | return 0; |
146 | } |
147 | |
148 | static int is_efer_nx(void) |
149 | { |
150 | unsigned long long efer = 0; |
151 | |
152 | rdmsrl_safe(MSR_EFER, &efer); |
153 | return efer & EFER_NX; |
154 | } |
155 | |
156 | static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) |
157 | { |
158 | int i; |
159 | struct kvm_cpuid_entry2 *e, *entry; |
160 | |
161 | entry = NULL; |
162 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { |
163 | e = &vcpu->arch.cpuid_entries[i]; |
164 | if (e->function == 0x80000001) { |
165 | entry = e; |
166 | break; |
167 | } |
168 | } |
169 | if (entry && (entry->edx & F(NX)) && !is_efer_nx()) { |
170 | entry->edx &= ~F(NX); |
171 | printk(KERN_INFO "kvm: guest NX capability removed\n" ); |
172 | } |
173 | } |
174 | |
175 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) |
176 | { |
177 | struct kvm_cpuid_entry2 *best; |
178 | |
179 | best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); |
180 | if (!best || best->eax < 0x80000008) |
181 | goto not_found; |
182 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
183 | if (best) |
184 | return best->eax & 0xff; |
185 | not_found: |
186 | return 36; |
187 | } |
188 | EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr); |
189 | |
190 | /* when an old userspace process fills a new kernel module */ |
191 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
192 | struct kvm_cpuid *cpuid, |
193 | struct kvm_cpuid_entry __user *entries) |
194 | { |
195 | int r, i; |
196 | struct kvm_cpuid_entry *cpuid_entries = NULL; |
197 | |
198 | r = -E2BIG; |
199 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
200 | goto out; |
201 | r = -ENOMEM; |
202 | if (cpuid->nent) { |
203 | cpuid_entries = |
204 | vmalloc(array_size(sizeof(struct kvm_cpuid_entry), |
205 | cpuid->nent)); |
206 | if (!cpuid_entries) |
207 | goto out; |
208 | r = -EFAULT; |
209 | if (copy_from_user(cpuid_entries, entries, |
210 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) |
211 | goto out; |
212 | } |
213 | for (i = 0; i < cpuid->nent; i++) { |
214 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; |
215 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; |
216 | vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; |
217 | vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; |
218 | vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; |
219 | vcpu->arch.cpuid_entries[i].index = 0; |
220 | vcpu->arch.cpuid_entries[i].flags = 0; |
221 | vcpu->arch.cpuid_entries[i].padding[0] = 0; |
222 | vcpu->arch.cpuid_entries[i].padding[1] = 0; |
223 | vcpu->arch.cpuid_entries[i].padding[2] = 0; |
224 | } |
225 | vcpu->arch.cpuid_nent = cpuid->nent; |
226 | cpuid_fix_nx_cap(vcpu); |
227 | kvm_apic_set_version(vcpu); |
228 | kvm_x86_ops->cpuid_update(vcpu); |
229 | r = kvm_update_cpuid(vcpu); |
230 | |
231 | out: |
232 | vfree(cpuid_entries); |
233 | return r; |
234 | } |
235 | |
236 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
237 | struct kvm_cpuid2 *cpuid, |
238 | struct kvm_cpuid_entry2 __user *entries) |
239 | { |
240 | int r; |
241 | |
242 | r = -E2BIG; |
243 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
244 | goto out; |
245 | r = -EFAULT; |
246 | if (copy_from_user(&vcpu->arch.cpuid_entries, entries, |
247 | cpuid->nent * sizeof(struct kvm_cpuid_entry2))) |
248 | goto out; |
249 | vcpu->arch.cpuid_nent = cpuid->nent; |
250 | kvm_apic_set_version(vcpu); |
251 | kvm_x86_ops->cpuid_update(vcpu); |
252 | r = kvm_update_cpuid(vcpu); |
253 | out: |
254 | return r; |
255 | } |
256 | |
257 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
258 | struct kvm_cpuid2 *cpuid, |
259 | struct kvm_cpuid_entry2 __user *entries) |
260 | { |
261 | int r; |
262 | |
263 | r = -E2BIG; |
264 | if (cpuid->nent < vcpu->arch.cpuid_nent) |
265 | goto out; |
266 | r = -EFAULT; |
267 | if (copy_to_user(entries, &vcpu->arch.cpuid_entries, |
268 | vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) |
269 | goto out; |
270 | return 0; |
271 | |
272 | out: |
273 | cpuid->nent = vcpu->arch.cpuid_nent; |
274 | return r; |
275 | } |
276 | |
277 | static void cpuid_mask(u32 *word, int wordnum) |
278 | { |
279 | *word &= boot_cpu_data.x86_capability[wordnum]; |
280 | } |
281 | |
282 | static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
283 | u32 index) |
284 | { |
285 | entry->function = function; |
286 | entry->index = index; |
287 | cpuid_count(entry->function, entry->index, |
288 | &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); |
289 | entry->flags = 0; |
290 | } |
291 | |
292 | static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, |
293 | u32 func, u32 index, int *nent, int maxnent) |
294 | { |
295 | switch (func) { |
296 | case 0: |
297 | entry->eax = 7; |
298 | ++*nent; |
299 | break; |
300 | case 1: |
301 | entry->ecx = F(MOVBE); |
302 | ++*nent; |
303 | break; |
304 | case 7: |
305 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
306 | if (index == 0) |
307 | entry->ecx = F(RDPID); |
308 | ++*nent; |
309 | default: |
310 | break; |
311 | } |
312 | |
313 | entry->function = func; |
314 | entry->index = index; |
315 | |
316 | return 0; |
317 | } |
318 | |
319 | static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
320 | u32 index, int *nent, int maxnent) |
321 | { |
322 | int r; |
323 | unsigned f_nx = is_efer_nx() ? F(NX) : 0; |
324 | #ifdef CONFIG_X86_64 |
325 | unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) |
326 | ? F(GBPAGES) : 0; |
327 | unsigned f_lm = F(LM); |
328 | #else |
329 | unsigned f_gbpages = 0; |
330 | unsigned f_lm = 0; |
331 | #endif |
332 | unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; |
333 | unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; |
334 | unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; |
335 | unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; |
336 | unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; |
337 | unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; |
338 | unsigned f_la57 = 0; |
339 | |
340 | /* cpuid 1.edx */ |
341 | const u32 kvm_cpuid_1_edx_x86_features = |
342 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
343 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
344 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | |
345 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
346 | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | |
347 | 0 /* Reserved, DS, ACPI */ | F(MMX) | |
348 | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | |
349 | 0 /* HTT, TM, Reserved, PBE */; |
350 | /* cpuid 0x80000001.edx */ |
351 | const u32 kvm_cpuid_8000_0001_edx_x86_features = |
352 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
353 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
354 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | |
355 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
356 | F(PAT) | F(PSE36) | 0 /* Reserved */ | |
357 | f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | |
358 | F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | |
359 | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); |
360 | /* cpuid 1.ecx */ |
361 | const u32 kvm_cpuid_1_ecx_x86_features = |
362 | /* NOTE: MONITOR (and MWAIT) are emulated as NOP, |
363 | * but *not* advertised to guests via CPUID ! */ |
364 | F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | |
365 | 0 /* DS-CPL, VMX, SMX, EST */ | |
366 | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | |
367 | F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ | |
368 | F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | |
369 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
370 | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | |
371 | F(F16C) | F(RDRAND); |
372 | /* cpuid 0x80000001.ecx */ |
373 | const u32 kvm_cpuid_8000_0001_ecx_x86_features = |
374 | F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | |
375 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
376 | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | |
377 | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | |
378 | F(TOPOEXT) | F(PERFCTR_CORE); |
379 | |
380 | /* cpuid 0x80000008.ebx */ |
381 | const u32 kvm_cpuid_8000_0008_ebx_x86_features = |
382 | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | |
383 | F(AMD_SSB_NO) | F(AMD_STIBP); |
384 | |
385 | /* cpuid 0xC0000001.edx */ |
386 | const u32 kvm_cpuid_C000_0001_edx_x86_features = |
387 | F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | |
388 | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | |
389 | F(PMM) | F(PMM_EN); |
390 | |
391 | /* cpuid 7.0.ebx */ |
392 | const u32 kvm_cpuid_7_0_ebx_x86_features = |
393 | F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | |
394 | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | |
395 | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | |
396 | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | |
397 | F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt; |
398 | |
399 | /* cpuid 0xD.1.eax */ |
400 | const u32 kvm_cpuid_D_1_eax_x86_features = |
401 | F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves; |
402 | |
403 | /* cpuid 7.0.ecx*/ |
404 | const u32 kvm_cpuid_7_0_ecx_x86_features = |
405 | F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | |
406 | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | |
407 | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | |
408 | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B); |
409 | |
410 | /* cpuid 7.0.edx*/ |
411 | const u32 kvm_cpuid_7_0_edx_x86_features = |
412 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
413 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP); |
414 | |
415 | /* all calls to cpuid_count() should be made on the same cpu */ |
416 | get_cpu(); |
417 | |
418 | r = -E2BIG; |
419 | |
420 | if (*nent >= maxnent) |
421 | goto out; |
422 | |
423 | do_cpuid_1_ent(entry, function, index); |
424 | ++*nent; |
425 | |
426 | switch (function) { |
427 | case 0: |
428 | entry->eax = min(entry->eax, (u32)(f_intel_pt ? 0x14 : 0xd)); |
429 | break; |
430 | case 1: |
431 | entry->edx &= kvm_cpuid_1_edx_x86_features; |
432 | cpuid_mask(&entry->edx, CPUID_1_EDX); |
433 | entry->ecx &= kvm_cpuid_1_ecx_x86_features; |
434 | cpuid_mask(&entry->ecx, CPUID_1_ECX); |
435 | /* we support x2apic emulation even if host does not support |
436 | * it since we emulate x2apic in software */ |
437 | entry->ecx |= F(X2APIC); |
438 | break; |
439 | /* function 2 entries are STATEFUL. That is, repeated cpuid commands |
440 | * may return different values. This forces us to get_cpu() before |
441 | * issuing the first command, and also to emulate this annoying behavior |
442 | * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ |
443 | case 2: { |
444 | int t, times = entry->eax & 0xff; |
445 | |
446 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
447 | entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
448 | for (t = 1; t < times; ++t) { |
449 | if (*nent >= maxnent) |
450 | goto out; |
451 | |
452 | do_cpuid_1_ent(&entry[t], function, 0); |
453 | entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
454 | ++*nent; |
455 | } |
456 | break; |
457 | } |
458 | /* function 4 has additional index. */ |
459 | case 4: { |
460 | int i, cache_type; |
461 | |
462 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
463 | /* read more entries until cache_type is zero */ |
464 | for (i = 1; ; ++i) { |
465 | if (*nent >= maxnent) |
466 | goto out; |
467 | |
468 | cache_type = entry[i - 1].eax & 0x1f; |
469 | if (!cache_type) |
470 | break; |
471 | do_cpuid_1_ent(&entry[i], function, i); |
472 | entry[i].flags |= |
473 | KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
474 | ++*nent; |
475 | } |
476 | break; |
477 | } |
478 | case 6: /* Thermal management */ |
479 | entry->eax = 0x4; /* allow ARAT */ |
480 | entry->ebx = 0; |
481 | entry->ecx = 0; |
482 | entry->edx = 0; |
483 | break; |
484 | case 7: { |
485 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
486 | /* Mask ebx against host capability word 9 */ |
487 | if (index == 0) { |
488 | entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; |
489 | cpuid_mask(&entry->ebx, CPUID_7_0_EBX); |
490 | // TSC_ADJUST is emulated |
491 | entry->ebx |= F(TSC_ADJUST); |
492 | entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; |
493 | f_la57 = entry->ecx & F(LA57); |
494 | cpuid_mask(&entry->ecx, CPUID_7_ECX); |
495 | /* Set LA57 based on hardware capability. */ |
496 | entry->ecx |= f_la57; |
497 | entry->ecx |= f_umip; |
498 | /* PKU is not yet implemented for shadow paging. */ |
499 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) |
500 | entry->ecx &= ~F(PKU); |
501 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; |
502 | cpuid_mask(&entry->edx, CPUID_7_EDX); |
503 | /* |
504 | * We emulate ARCH_CAPABILITIES in software even |
505 | * if the host doesn't support it. |
506 | */ |
507 | entry->edx |= F(ARCH_CAPABILITIES); |
508 | } else { |
509 | entry->ebx = 0; |
510 | entry->ecx = 0; |
511 | entry->edx = 0; |
512 | } |
513 | entry->eax = 0; |
514 | break; |
515 | } |
516 | case 9: |
517 | break; |
518 | case 0xa: { /* Architectural Performance Monitoring */ |
519 | struct x86_pmu_capability cap; |
520 | union cpuid10_eax eax; |
521 | union cpuid10_edx edx; |
522 | |
523 | perf_get_x86_pmu_capability(&cap); |
524 | |
525 | /* |
526 | * Only support guest architectural pmu on a host |
527 | * with architectural pmu. |
528 | */ |
529 | if (!cap.version) |
530 | memset(&cap, 0, sizeof(cap)); |
531 | |
532 | eax.split.version_id = min(cap.version, 2); |
533 | eax.split.num_counters = cap.num_counters_gp; |
534 | eax.split.bit_width = cap.bit_width_gp; |
535 | eax.split.mask_length = cap.events_mask_len; |
536 | |
537 | edx.split.num_counters_fixed = cap.num_counters_fixed; |
538 | edx.split.bit_width_fixed = cap.bit_width_fixed; |
539 | edx.split.reserved = 0; |
540 | |
541 | entry->eax = eax.full; |
542 | entry->ebx = cap.events_mask; |
543 | entry->ecx = 0; |
544 | entry->edx = edx.full; |
545 | break; |
546 | } |
547 | /* function 0xb has additional index. */ |
548 | case 0xb: { |
549 | int i, level_type; |
550 | |
551 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
552 | /* read more entries until level_type is zero */ |
553 | for (i = 1; ; ++i) { |
554 | if (*nent >= maxnent) |
555 | goto out; |
556 | |
557 | level_type = entry[i - 1].ecx & 0xff00; |
558 | if (!level_type) |
559 | break; |
560 | do_cpuid_1_ent(&entry[i], function, i); |
561 | entry[i].flags |= |
562 | KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
563 | ++*nent; |
564 | } |
565 | break; |
566 | } |
567 | case 0xd: { |
568 | int idx, i; |
569 | u64 supported = kvm_supported_xcr0(); |
570 | |
571 | entry->eax &= supported; |
572 | entry->ebx = xstate_required_size(supported, false); |
573 | entry->ecx = entry->ebx; |
574 | entry->edx &= supported >> 32; |
575 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
576 | if (!supported) |
577 | break; |
578 | |
579 | for (idx = 1, i = 1; idx < 64; ++idx) { |
580 | u64 mask = ((u64)1 << idx); |
581 | if (*nent >= maxnent) |
582 | goto out; |
583 | |
584 | do_cpuid_1_ent(&entry[i], function, idx); |
585 | if (idx == 1) { |
586 | entry[i].eax &= kvm_cpuid_D_1_eax_x86_features; |
587 | cpuid_mask(&entry[i].eax, CPUID_D_1_EAX); |
588 | entry[i].ebx = 0; |
589 | if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) |
590 | entry[i].ebx = |
591 | xstate_required_size(supported, |
592 | true); |
593 | } else { |
594 | if (entry[i].eax == 0 || !(supported & mask)) |
595 | continue; |
596 | if (WARN_ON_ONCE(entry[i].ecx & 1)) |
597 | continue; |
598 | } |
599 | entry[i].ecx = 0; |
600 | entry[i].edx = 0; |
601 | entry[i].flags |= |
602 | KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
603 | ++*nent; |
604 | ++i; |
605 | } |
606 | break; |
607 | } |
608 | /* Intel PT */ |
609 | case 0x14: { |
610 | int t, times = entry->eax; |
611 | |
612 | if (!f_intel_pt) |
613 | break; |
614 | |
615 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
616 | for (t = 1; t <= times; ++t) { |
617 | if (*nent >= maxnent) |
618 | goto out; |
619 | do_cpuid_1_ent(&entry[t], function, t); |
620 | entry[t].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
621 | ++*nent; |
622 | } |
623 | break; |
624 | } |
625 | case KVM_CPUID_SIGNATURE: { |
626 | static const char signature[12] = "KVMKVMKVM\0\0" ; |
627 | const u32 *sigptr = (const u32 *)signature; |
628 | entry->eax = KVM_CPUID_FEATURES; |
629 | entry->ebx = sigptr[0]; |
630 | entry->ecx = sigptr[1]; |
631 | entry->edx = sigptr[2]; |
632 | break; |
633 | } |
634 | case KVM_CPUID_FEATURES: |
635 | entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | |
636 | (1 << KVM_FEATURE_NOP_IO_DELAY) | |
637 | (1 << KVM_FEATURE_CLOCKSOURCE2) | |
638 | (1 << KVM_FEATURE_ASYNC_PF) | |
639 | (1 << KVM_FEATURE_PV_EOI) | |
640 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
641 | (1 << KVM_FEATURE_PV_UNHALT) | |
642 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
643 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | |
644 | (1 << KVM_FEATURE_PV_SEND_IPI); |
645 | |
646 | if (sched_info_on()) |
647 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
648 | |
649 | entry->ebx = 0; |
650 | entry->ecx = 0; |
651 | entry->edx = 0; |
652 | break; |
653 | case 0x80000000: |
654 | entry->eax = min(entry->eax, 0x8000001f); |
655 | break; |
656 | case 0x80000001: |
657 | entry->edx &= kvm_cpuid_8000_0001_edx_x86_features; |
658 | cpuid_mask(&entry->edx, CPUID_8000_0001_EDX); |
659 | entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features; |
660 | cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX); |
661 | break; |
662 | case 0x80000007: /* Advanced power management */ |
663 | /* invariant TSC is CPUID.80000007H:EDX[8] */ |
664 | entry->edx &= (1 << 8); |
665 | /* mask against host */ |
666 | entry->edx &= boot_cpu_data.x86_power; |
667 | entry->eax = entry->ebx = entry->ecx = 0; |
668 | break; |
669 | case 0x80000008: { |
670 | unsigned g_phys_as = (entry->eax >> 16) & 0xff; |
671 | unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); |
672 | unsigned phys_as = entry->eax & 0xff; |
673 | |
674 | if (!g_phys_as) |
675 | g_phys_as = phys_as; |
676 | entry->eax = g_phys_as | (virt_as << 8); |
677 | entry->edx = 0; |
678 | /* |
679 | * IBRS, IBPB and VIRT_SSBD aren't necessarily present in |
680 | * hardware cpuid |
681 | */ |
682 | if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) |
683 | entry->ebx |= F(AMD_IBPB); |
684 | if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) |
685 | entry->ebx |= F(AMD_IBRS); |
686 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
687 | entry->ebx |= F(VIRT_SSBD); |
688 | entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; |
689 | cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); |
690 | /* |
691 | * The preference is to use SPEC CTRL MSR instead of the |
692 | * VIRT_SPEC MSR. |
693 | */ |
694 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
695 | !boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
696 | entry->ebx |= F(VIRT_SSBD); |
697 | break; |
698 | } |
699 | case 0x80000019: |
700 | entry->ecx = entry->edx = 0; |
701 | break; |
702 | case 0x8000001a: |
703 | break; |
704 | case 0x8000001d: |
705 | break; |
706 | /*Add support for Centaur's CPUID instruction*/ |
707 | case 0xC0000000: |
708 | /*Just support up to 0xC0000004 now*/ |
709 | entry->eax = min(entry->eax, 0xC0000004); |
710 | break; |
711 | case 0xC0000001: |
712 | entry->edx &= kvm_cpuid_C000_0001_edx_x86_features; |
713 | cpuid_mask(&entry->edx, CPUID_C000_0001_EDX); |
714 | break; |
715 | case 3: /* Processor serial number */ |
716 | case 5: /* MONITOR/MWAIT */ |
717 | case 0xC0000002: |
718 | case 0xC0000003: |
719 | case 0xC0000004: |
720 | default: |
721 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
722 | break; |
723 | } |
724 | |
725 | kvm_x86_ops->set_supported_cpuid(function, entry); |
726 | |
727 | r = 0; |
728 | |
729 | out: |
730 | put_cpu(); |
731 | |
732 | return r; |
733 | } |
734 | |
735 | static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, |
736 | u32 idx, int *nent, int maxnent, unsigned int type) |
737 | { |
738 | if (type == KVM_GET_EMULATED_CPUID) |
739 | return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); |
740 | |
741 | return __do_cpuid_ent(entry, func, idx, nent, maxnent); |
742 | } |
743 | |
744 | #undef F |
745 | |
746 | struct kvm_cpuid_param { |
747 | u32 func; |
748 | u32 idx; |
749 | bool has_leaf_count; |
750 | bool (*qualifier)(const struct kvm_cpuid_param *param); |
751 | }; |
752 | |
753 | static bool is_centaur_cpu(const struct kvm_cpuid_param *param) |
754 | { |
755 | return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR; |
756 | } |
757 | |
758 | static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, |
759 | __u32 num_entries, unsigned int ioctl_type) |
760 | { |
761 | int i; |
762 | __u32 pad[3]; |
763 | |
764 | if (ioctl_type != KVM_GET_EMULATED_CPUID) |
765 | return false; |
766 | |
767 | /* |
768 | * We want to make sure that ->padding is being passed clean from |
769 | * userspace in case we want to use it for something in the future. |
770 | * |
771 | * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we |
772 | * have to give ourselves satisfied only with the emulated side. /me |
773 | * sheds a tear. |
774 | */ |
775 | for (i = 0; i < num_entries; i++) { |
776 | if (copy_from_user(pad, entries[i].padding, sizeof(pad))) |
777 | return true; |
778 | |
779 | if (pad[0] || pad[1] || pad[2]) |
780 | return true; |
781 | } |
782 | return false; |
783 | } |
784 | |
785 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
786 | struct kvm_cpuid_entry2 __user *entries, |
787 | unsigned int type) |
788 | { |
789 | struct kvm_cpuid_entry2 *cpuid_entries; |
790 | int limit, nent = 0, r = -E2BIG, i; |
791 | u32 func; |
792 | static const struct kvm_cpuid_param param[] = { |
793 | { .func = 0, .has_leaf_count = true }, |
794 | { .func = 0x80000000, .has_leaf_count = true }, |
795 | { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true }, |
796 | { .func = KVM_CPUID_SIGNATURE }, |
797 | { .func = KVM_CPUID_FEATURES }, |
798 | }; |
799 | |
800 | if (cpuid->nent < 1) |
801 | goto out; |
802 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
803 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; |
804 | |
805 | if (sanity_check_entries(entries, cpuid->nent, type)) |
806 | return -EINVAL; |
807 | |
808 | r = -ENOMEM; |
809 | cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), |
810 | cpuid->nent)); |
811 | if (!cpuid_entries) |
812 | goto out; |
813 | |
814 | r = 0; |
815 | for (i = 0; i < ARRAY_SIZE(param); i++) { |
816 | const struct kvm_cpuid_param *ent = ¶m[i]; |
817 | |
818 | if (ent->qualifier && !ent->qualifier(ent)) |
819 | continue; |
820 | |
821 | r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx, |
822 | &nent, cpuid->nent, type); |
823 | |
824 | if (r) |
825 | goto out_free; |
826 | |
827 | if (!ent->has_leaf_count) |
828 | continue; |
829 | |
830 | limit = cpuid_entries[nent - 1].eax; |
831 | for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) |
832 | r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx, |
833 | &nent, cpuid->nent, type); |
834 | |
835 | if (r) |
836 | goto out_free; |
837 | } |
838 | |
839 | r = -EFAULT; |
840 | if (copy_to_user(entries, cpuid_entries, |
841 | nent * sizeof(struct kvm_cpuid_entry2))) |
842 | goto out_free; |
843 | cpuid->nent = nent; |
844 | r = 0; |
845 | |
846 | out_free: |
847 | vfree(cpuid_entries); |
848 | out: |
849 | return r; |
850 | } |
851 | |
852 | static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) |
853 | { |
854 | struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; |
855 | struct kvm_cpuid_entry2 *ej; |
856 | int j = i; |
857 | int nent = vcpu->arch.cpuid_nent; |
858 | |
859 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; |
860 | /* when no next entry is found, the current entry[i] is reselected */ |
861 | do { |
862 | j = (j + 1) % nent; |
863 | ej = &vcpu->arch.cpuid_entries[j]; |
864 | } while (ej->function != e->function); |
865 | |
866 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
867 | |
868 | return j; |
869 | } |
870 | |
871 | /* find an entry with matching function, matching index (if needed), and that |
872 | * should be read next (if it's stateful) */ |
873 | static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, |
874 | u32 function, u32 index) |
875 | { |
876 | if (e->function != function) |
877 | return 0; |
878 | if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) |
879 | return 0; |
880 | if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && |
881 | !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) |
882 | return 0; |
883 | return 1; |
884 | } |
885 | |
886 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
887 | u32 function, u32 index) |
888 | { |
889 | int i; |
890 | struct kvm_cpuid_entry2 *best = NULL; |
891 | |
892 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { |
893 | struct kvm_cpuid_entry2 *e; |
894 | |
895 | e = &vcpu->arch.cpuid_entries[i]; |
896 | if (is_matching_cpuid_entry(e, function, index)) { |
897 | if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) |
898 | move_to_next_stateful_cpuid_entry(vcpu, i); |
899 | best = e; |
900 | break; |
901 | } |
902 | } |
903 | return best; |
904 | } |
905 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); |
906 | |
907 | /* |
908 | * If no match is found, check whether we exceed the vCPU's limit |
909 | * and return the content of the highest valid _standard_ leaf instead. |
910 | * This is to satisfy the CPUID specification. |
911 | */ |
912 | static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, |
913 | u32 function, u32 index) |
914 | { |
915 | struct kvm_cpuid_entry2 *maxlevel; |
916 | |
917 | maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); |
918 | if (!maxlevel || maxlevel->eax >= function) |
919 | return NULL; |
920 | if (function & 0x80000000) { |
921 | maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); |
922 | if (!maxlevel) |
923 | return NULL; |
924 | } |
925 | return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); |
926 | } |
927 | |
928 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
929 | u32 *ecx, u32 *edx, bool check_limit) |
930 | { |
931 | u32 function = *eax, index = *ecx; |
932 | struct kvm_cpuid_entry2 *best; |
933 | bool entry_found = true; |
934 | |
935 | best = kvm_find_cpuid_entry(vcpu, function, index); |
936 | |
937 | if (!best) { |
938 | entry_found = false; |
939 | if (!check_limit) |
940 | goto out; |
941 | |
942 | best = check_cpuid_limit(vcpu, function, index); |
943 | } |
944 | |
945 | out: |
946 | if (best) { |
947 | *eax = best->eax; |
948 | *ebx = best->ebx; |
949 | *ecx = best->ecx; |
950 | *edx = best->edx; |
951 | } else |
952 | *eax = *ebx = *ecx = *edx = 0; |
953 | trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found); |
954 | return entry_found; |
955 | } |
956 | EXPORT_SYMBOL_GPL(kvm_cpuid); |
957 | |
958 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) |
959 | { |
960 | u32 eax, ebx, ecx, edx; |
961 | |
962 | if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) |
963 | return 1; |
964 | |
965 | eax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
966 | ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); |
967 | kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true); |
968 | kvm_register_write(vcpu, VCPU_REGS_RAX, eax); |
969 | kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); |
970 | kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); |
971 | kvm_register_write(vcpu, VCPU_REGS_RDX, edx); |
972 | return kvm_skip_emulated_instruction(vcpu); |
973 | } |
974 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
975 | |