1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef ARCH_X86_KVM_CPUID_H |
3 | #define ARCH_X86_KVM_CPUID_H |
4 | |
5 | #include "x86.h" |
6 | #include "reverse_cpuid.h" |
7 | #include <asm/cpu.h> |
8 | #include <asm/processor.h> |
9 | #include <uapi/asm/kvm_para.h> |
10 | |
11 | extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; |
12 | void kvm_set_cpu_caps(void); |
13 | |
14 | void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); |
15 | void kvm_update_pv_runtime(struct kvm_vcpu *vcpu); |
16 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, |
17 | u32 function, u32 index); |
18 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
19 | u32 function); |
20 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
21 | struct kvm_cpuid_entry2 __user *entries, |
22 | unsigned int type); |
23 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
24 | struct kvm_cpuid *cpuid, |
25 | struct kvm_cpuid_entry __user *entries); |
26 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
27 | struct kvm_cpuid2 *cpuid, |
28 | struct kvm_cpuid_entry2 __user *entries); |
29 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
30 | struct kvm_cpuid2 *cpuid, |
31 | struct kvm_cpuid_entry2 __user *entries); |
32 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
33 | u32 *ecx, u32 *edx, bool exact_only); |
34 | |
35 | u32 xstate_required_size(u64 xstate_bv, bool compacted); |
36 | |
37 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); |
38 | u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu); |
39 | |
40 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) |
41 | { |
42 | return vcpu->arch.maxphyaddr; |
43 | } |
44 | |
45 | static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
46 | { |
47 | return !(gpa & vcpu->arch.reserved_gpa_bits); |
48 | } |
49 | |
50 | static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
51 | { |
52 | return !kvm_vcpu_is_legal_gpa(vcpu, gpa); |
53 | } |
54 | |
55 | static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, |
56 | gpa_t gpa, gpa_t alignment) |
57 | { |
58 | return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); |
59 | } |
60 | |
61 | static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) |
62 | { |
63 | return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); |
64 | } |
65 | |
66 | static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, |
67 | unsigned int leaf) |
68 | { |
69 | u32 *reg = cpuid_entry_get_reg(entry, x86_feature: leaf * 32); |
70 | |
71 | BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); |
72 | *reg = kvm_cpu_caps[leaf]; |
73 | } |
74 | |
75 | static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, |
76 | unsigned int x86_feature) |
77 | { |
78 | const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); |
79 | struct kvm_cpuid_entry2 *entry; |
80 | |
81 | entry = kvm_find_cpuid_entry_index(vcpu, function: cpuid.function, index: cpuid.index); |
82 | if (!entry) |
83 | return NULL; |
84 | |
85 | return __cpuid_entry_get_reg(entry, reg: cpuid.reg); |
86 | } |
87 | |
88 | static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, |
89 | unsigned int x86_feature) |
90 | { |
91 | u32 *reg; |
92 | |
93 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
94 | if (!reg) |
95 | return false; |
96 | |
97 | return *reg & __feature_bit(x86_feature); |
98 | } |
99 | |
100 | static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, |
101 | unsigned int x86_feature) |
102 | { |
103 | u32 *reg; |
104 | |
105 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
106 | if (reg) |
107 | *reg &= ~__feature_bit(x86_feature); |
108 | } |
109 | |
110 | static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) |
111 | { |
112 | struct kvm_cpuid_entry2 *best; |
113 | |
114 | best = kvm_find_cpuid_entry(vcpu, function: 0); |
115 | return best && |
116 | (is_guest_vendor_amd(ebx: best->ebx, ecx: best->ecx, edx: best->edx) || |
117 | is_guest_vendor_hygon(ebx: best->ebx, ecx: best->ecx, edx: best->edx)); |
118 | } |
119 | |
120 | static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu) |
121 | { |
122 | struct kvm_cpuid_entry2 *best; |
123 | |
124 | best = kvm_find_cpuid_entry(vcpu, function: 0); |
125 | return best && is_guest_vendor_intel(ebx: best->ebx, ecx: best->ecx, edx: best->edx); |
126 | } |
127 | |
128 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) |
129 | { |
130 | struct kvm_cpuid_entry2 *best; |
131 | |
132 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
133 | if (!best) |
134 | return -1; |
135 | |
136 | return x86_family(sig: best->eax); |
137 | } |
138 | |
139 | static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) |
140 | { |
141 | struct kvm_cpuid_entry2 *best; |
142 | |
143 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
144 | if (!best) |
145 | return -1; |
146 | |
147 | return x86_model(sig: best->eax); |
148 | } |
149 | |
150 | static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu) |
151 | { |
152 | return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); |
153 | } |
154 | |
155 | static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) |
156 | { |
157 | struct kvm_cpuid_entry2 *best; |
158 | |
159 | best = kvm_find_cpuid_entry(vcpu, function: 0x1); |
160 | if (!best) |
161 | return -1; |
162 | |
163 | return x86_stepping(sig: best->eax); |
164 | } |
165 | |
166 | static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) |
167 | { |
168 | return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || |
169 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || |
170 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || |
171 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); |
172 | } |
173 | |
174 | static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) |
175 | { |
176 | return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || |
177 | guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) || |
178 | guest_cpuid_has(vcpu, X86_FEATURE_SBPB)); |
179 | } |
180 | |
181 | static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) |
182 | { |
183 | return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; |
184 | } |
185 | |
186 | static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) |
187 | { |
188 | return vcpu->arch.msr_misc_features_enables & |
189 | MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; |
190 | } |
191 | |
192 | static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) |
193 | { |
194 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
195 | |
196 | reverse_cpuid_check(x86_leaf); |
197 | kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); |
198 | } |
199 | |
200 | static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) |
201 | { |
202 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
203 | |
204 | reverse_cpuid_check(x86_leaf); |
205 | kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); |
206 | } |
207 | |
208 | static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) |
209 | { |
210 | unsigned int x86_leaf = __feature_leaf(x86_feature); |
211 | |
212 | reverse_cpuid_check(x86_leaf); |
213 | return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); |
214 | } |
215 | |
216 | static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature) |
217 | { |
218 | return !!kvm_cpu_cap_get(x86_feature); |
219 | } |
220 | |
221 | static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) |
222 | { |
223 | if (boot_cpu_has(x86_feature)) |
224 | kvm_cpu_cap_set(x86_feature); |
225 | } |
226 | |
227 | static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, |
228 | unsigned int kvm_feature) |
229 | { |
230 | if (!vcpu->arch.pv_cpuid.enforce) |
231 | return true; |
232 | |
233 | return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); |
234 | } |
235 | |
236 | enum kvm_governed_features { |
237 | #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x, |
238 | #include "governed_features.h" |
239 | KVM_NR_GOVERNED_FEATURES |
240 | }; |
241 | |
242 | static __always_inline int kvm_governed_feature_index(unsigned int x86_feature) |
243 | { |
244 | switch (x86_feature) { |
245 | #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x; |
246 | #include "governed_features.h" |
247 | default: |
248 | return -1; |
249 | } |
250 | } |
251 | |
252 | static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature) |
253 | { |
254 | return kvm_governed_feature_index(x86_feature) >= 0; |
255 | } |
256 | |
257 | static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, |
258 | unsigned int x86_feature) |
259 | { |
260 | BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); |
261 | |
262 | __set_bit(kvm_governed_feature_index(x86_feature), |
263 | vcpu->arch.governed_features.enabled); |
264 | } |
265 | |
266 | static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, |
267 | unsigned int x86_feature) |
268 | { |
269 | if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature)) |
270 | kvm_governed_feature_set(vcpu, x86_feature); |
271 | } |
272 | |
273 | static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, |
274 | unsigned int x86_feature) |
275 | { |
276 | BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); |
277 | |
278 | return test_bit(kvm_governed_feature_index(x86_feature), |
279 | vcpu->arch.governed_features.enabled); |
280 | } |
281 | |
282 | #endif |
283 | |