1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_MMU_H |
3 | #define __KVM_X86_MMU_H |
4 | |
5 | #include <linux/kvm_host.h> |
6 | #include "kvm_cache_regs.h" |
7 | #include "cpuid.h" |
8 | |
9 | extern bool __read_mostly enable_mmio_caching; |
10 | |
11 | #define PT_WRITABLE_SHIFT 1 |
12 | #define PT_USER_SHIFT 2 |
13 | |
14 | #define PT_PRESENT_MASK (1ULL << 0) |
15 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) |
16 | #define PT_USER_MASK (1ULL << PT_USER_SHIFT) |
17 | #define PT_PWT_MASK (1ULL << 3) |
18 | #define PT_PCD_MASK (1ULL << 4) |
19 | #define PT_ACCESSED_SHIFT 5 |
20 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) |
21 | #define PT_DIRTY_SHIFT 6 |
22 | #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) |
23 | #define PT_PAGE_SIZE_SHIFT 7 |
24 | #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) |
25 | #define PT_PAT_MASK (1ULL << 7) |
26 | #define PT_GLOBAL_MASK (1ULL << 8) |
27 | #define PT64_NX_SHIFT 63 |
28 | #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) |
29 | |
30 | #define PT_PAT_SHIFT 7 |
31 | #define PT_DIR_PAT_SHIFT 12 |
32 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) |
33 | |
34 | #define PT64_ROOT_5LEVEL 5 |
35 | #define PT64_ROOT_4LEVEL 4 |
36 | #define PT32_ROOT_LEVEL 2 |
37 | #define PT32E_ROOT_LEVEL 3 |
38 | |
39 | #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \ |
40 | X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE) |
41 | |
42 | #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) |
43 | #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX) |
44 | |
45 | static __always_inline u64 rsvd_bits(int s, int e) |
46 | { |
47 | BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); |
48 | |
49 | if (__builtin_constant_p(e)) |
50 | BUILD_BUG_ON(e > 63); |
51 | else |
52 | e &= 63; |
53 | |
54 | if (e < s) |
55 | return 0; |
56 | |
57 | return ((2ULL << (e - s)) - 1) << s; |
58 | } |
59 | |
60 | /* |
61 | * The number of non-reserved physical address bits irrespective of features |
62 | * that repurpose legal bits, e.g. MKTME. |
63 | */ |
64 | extern u8 __read_mostly shadow_phys_bits; |
65 | |
66 | static inline gfn_t kvm_mmu_max_gfn(void) |
67 | { |
68 | /* |
69 | * Note that this uses the host MAXPHYADDR, not the guest's. |
70 | * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR; |
71 | * assuming KVM is running on bare metal, guest accesses beyond |
72 | * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit |
73 | * (either EPT Violation/Misconfig or #NPF), and so KVM will never |
74 | * install a SPTE for such addresses. If KVM is running as a VM |
75 | * itself, on the other hand, it might see a MAXPHYADDR that is less |
76 | * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR |
77 | * disallows such SPTEs entirely and simplifies the TDP MMU. |
78 | */ |
79 | int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52; |
80 | |
81 | return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; |
82 | } |
83 | |
84 | static inline u8 kvm_get_shadow_phys_bits(void) |
85 | { |
86 | /* |
87 | * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected |
88 | * in CPU detection code, but the processor treats those reduced bits as |
89 | * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at |
90 | * the physical address bits reported by CPUID. |
91 | */ |
92 | if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) |
93 | return cpuid_eax(op: 0x80000008) & 0xff; |
94 | |
95 | /* |
96 | * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with |
97 | * custom CPUID. Proceed with whatever the kernel found since these features |
98 | * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). |
99 | */ |
100 | return boot_cpu_data.x86_phys_bits; |
101 | } |
102 | |
103 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); |
104 | void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask); |
105 | void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); |
106 | |
107 | void kvm_init_mmu(struct kvm_vcpu *vcpu); |
108 | void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, |
109 | unsigned long cr4, u64 efer, gpa_t nested_cr3); |
110 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
111 | int huge_page_level, bool accessed_dirty, |
112 | gpa_t new_eptp); |
113 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); |
114 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
115 | u64 fault_address, char *insn, int insn_len); |
116 | void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, |
117 | struct kvm_mmu *mmu); |
118 | |
119 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
120 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
121 | void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu); |
122 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
123 | void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu); |
124 | void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, |
125 | int bytes); |
126 | |
127 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
128 | { |
129 | if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) |
130 | return 0; |
131 | |
132 | return kvm_mmu_load(vcpu); |
133 | } |
134 | |
135 | static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) |
136 | { |
137 | BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); |
138 | |
139 | return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE) |
140 | ? cr3 & X86_CR3_PCID_MASK |
141 | : 0; |
142 | } |
143 | |
144 | static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) |
145 | { |
146 | return kvm_get_pcid(vcpu, cr3: kvm_read_cr3(vcpu)); |
147 | } |
148 | |
149 | static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) |
150 | { |
151 | u64 root_hpa = vcpu->arch.mmu->root.hpa; |
152 | |
153 | if (!VALID_PAGE(root_hpa)) |
154 | return; |
155 | |
156 | static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa, |
157 | vcpu->arch.mmu->root_role.level); |
158 | } |
159 | |
160 | static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, |
161 | struct kvm_mmu *mmu) |
162 | { |
163 | /* |
164 | * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e. |
165 | * @mmu's snapshot of CR0.WP and thus all related paging metadata may |
166 | * be stale. Refresh CR0.WP and the metadata on-demand when checking |
167 | * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing |
168 | * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does |
169 | * need to refresh nested_mmu, a.k.a. the walker used to translate L2 |
170 | * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP. |
171 | */ |
172 | if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) |
173 | return; |
174 | |
175 | __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); |
176 | } |
177 | |
178 | /* |
179 | * Check if a given access (described through the I/D, W/R and U/S bits of a |
180 | * page fault error code pfec) causes a permission fault with the given PTE |
181 | * access rights (in ACC_* format). |
182 | * |
183 | * Return zero if the access does not fault; return the page fault error code |
184 | * if the access faults. |
185 | */ |
186 | static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
187 | unsigned pte_access, unsigned pte_pkey, |
188 | u64 access) |
189 | { |
190 | /* strip nested paging fault error codes */ |
191 | unsigned int pfec = access; |
192 | unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); |
193 | |
194 | /* |
195 | * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. |
196 | * For implicit supervisor accesses, SMAP cannot be overridden. |
197 | * |
198 | * SMAP works on supervisor accesses only, and not_smap can |
199 | * be set or not set when user access with neither has any bearing |
200 | * on the result. |
201 | * |
202 | * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit; |
203 | * this bit will always be zero in pfec, but it will be one in index |
204 | * if SMAP checks are being disabled. |
205 | */ |
206 | u64 implicit_access = access & PFERR_IMPLICIT_ACCESS; |
207 | bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC; |
208 | int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1; |
209 | u32 errcode = PFERR_PRESENT_MASK; |
210 | bool fault; |
211 | |
212 | kvm_mmu_refresh_passthrough_bits(vcpu, mmu); |
213 | |
214 | fault = (mmu->permissions[index] >> pte_access) & 1; |
215 | |
216 | WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); |
217 | if (unlikely(mmu->pkru_mask)) { |
218 | u32 pkru_bits, offset; |
219 | |
220 | /* |
221 | * PKRU defines 32 bits, there are 16 domains and 2 |
222 | * attribute bits per domain in pkru. pte_pkey is the |
223 | * index of the protection domain, so pte_pkey * 2 is |
224 | * is the index of the first bit for the domain. |
225 | */ |
226 | pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; |
227 | |
228 | /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ |
229 | offset = (pfec & ~1) + |
230 | ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); |
231 | |
232 | pkru_bits &= mmu->pkru_mask >> offset; |
233 | errcode |= -pkru_bits & PFERR_PK_MASK; |
234 | fault |= (pkru_bits != 0); |
235 | } |
236 | |
237 | return -(u32)fault & errcode; |
238 | } |
239 | |
240 | bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma); |
241 | |
242 | static inline bool kvm_mmu_honors_guest_mtrrs(struct kvm *kvm) |
243 | { |
244 | return __kvm_mmu_honors_guest_mtrrs(vm_has_noncoherent_dma: kvm_arch_has_noncoherent_dma(kvm)); |
245 | } |
246 | |
247 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); |
248 | |
249 | int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); |
250 | |
251 | int kvm_mmu_post_init_vm(struct kvm *kvm); |
252 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm); |
253 | |
254 | static inline bool kvm_shadow_root_allocated(struct kvm *kvm) |
255 | { |
256 | /* |
257 | * Read shadow_root_allocated before related pointers. Hence, threads |
258 | * reading shadow_root_allocated in any lock context are guaranteed to |
259 | * see the pointers. Pairs with smp_store_release in |
260 | * mmu_first_shadow_root_alloc. |
261 | */ |
262 | return smp_load_acquire(&kvm->arch.shadow_root_allocated); |
263 | } |
264 | |
265 | #ifdef CONFIG_X86_64 |
266 | extern bool tdp_mmu_enabled; |
267 | #else |
268 | #define tdp_mmu_enabled false |
269 | #endif |
270 | |
271 | static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) |
272 | { |
273 | return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm); |
274 | } |
275 | |
276 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
277 | { |
278 | /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */ |
279 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - |
280 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
281 | } |
282 | |
283 | static inline unsigned long |
284 | __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages, |
285 | int level) |
286 | { |
287 | return gfn_to_index(gfn: slot->base_gfn + npages - 1, |
288 | base_gfn: slot->base_gfn, level) + 1; |
289 | } |
290 | |
291 | static inline unsigned long |
292 | kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) |
293 | { |
294 | return __kvm_mmu_slot_lpages(slot, npages: slot->npages, level); |
295 | } |
296 | |
297 | static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) |
298 | { |
299 | atomic64_add(i: count, v: &kvm->stat.pages[level - 1]); |
300 | } |
301 | |
302 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, |
303 | struct x86_exception *exception); |
304 | |
305 | static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, |
306 | struct kvm_mmu *mmu, |
307 | gpa_t gpa, u64 access, |
308 | struct x86_exception *exception) |
309 | { |
310 | if (mmu != &vcpu->arch.nested_mmu) |
311 | return gpa; |
312 | return translate_nested_gpa(vcpu, gpa, access, exception); |
313 | } |
314 | #endif |
315 | |