1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3 | |
4 | #include "mmu.h" |
5 | #include "mmu_internal.h" |
6 | #include "mmutrace.h" |
7 | #include "tdp_iter.h" |
8 | #include "tdp_mmu.h" |
9 | #include "spte.h" |
10 | |
11 | #include <asm/cmpxchg.h> |
12 | #include <trace/events/kvm.h> |
13 | |
14 | /* Initializes the TDP MMU for the VM, if enabled. */ |
15 | void kvm_mmu_init_tdp_mmu(struct kvm *kvm) |
16 | { |
17 | INIT_LIST_HEAD(list: &kvm->arch.tdp_mmu_roots); |
18 | spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); |
19 | } |
20 | |
21 | /* Arbitrarily returns true so that this may be used in if statements. */ |
22 | static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, |
23 | bool shared) |
24 | { |
25 | if (shared) |
26 | lockdep_assert_held_read(&kvm->mmu_lock); |
27 | else |
28 | lockdep_assert_held_write(&kvm->mmu_lock); |
29 | |
30 | return true; |
31 | } |
32 | |
33 | void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) |
34 | { |
35 | /* |
36 | * Invalidate all roots, which besides the obvious, schedules all roots |
37 | * for zapping and thus puts the TDP MMU's reference to each root, i.e. |
38 | * ultimately frees all roots. |
39 | */ |
40 | kvm_tdp_mmu_invalidate_all_roots(kvm); |
41 | kvm_tdp_mmu_zap_invalidated_roots(kvm); |
42 | |
43 | WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); |
44 | WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); |
45 | |
46 | /* |
47 | * Ensure that all the outstanding RCU callbacks to free shadow pages |
48 | * can run before the VM is torn down. Putting the last reference to |
49 | * zapped roots will create new callbacks. |
50 | */ |
51 | rcu_barrier(); |
52 | } |
53 | |
54 | static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) |
55 | { |
56 | free_page((unsigned long)sp->spt); |
57 | kmem_cache_free(s: mmu_page_header_cache, objp: sp); |
58 | } |
59 | |
60 | /* |
61 | * This is called through call_rcu in order to free TDP page table memory |
62 | * safely with respect to other kernel threads that may be operating on |
63 | * the memory. |
64 | * By only accessing TDP MMU page table memory in an RCU read critical |
65 | * section, and freeing it after a grace period, lockless access to that |
66 | * memory won't use it after it is freed. |
67 | */ |
68 | static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head) |
69 | { |
70 | struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, |
71 | rcu_head); |
72 | |
73 | tdp_mmu_free_sp(sp); |
74 | } |
75 | |
76 | void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) |
77 | { |
78 | if (!refcount_dec_and_test(r: &root->tdp_mmu_root_count)) |
79 | return; |
80 | |
81 | /* |
82 | * The TDP MMU itself holds a reference to each root until the root is |
83 | * explicitly invalidated, i.e. the final reference should be never be |
84 | * put for a valid root. |
85 | */ |
86 | KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm); |
87 | |
88 | spin_lock(lock: &kvm->arch.tdp_mmu_pages_lock); |
89 | list_del_rcu(entry: &root->link); |
90 | spin_unlock(lock: &kvm->arch.tdp_mmu_pages_lock); |
91 | call_rcu(head: &root->rcu_head, func: tdp_mmu_free_sp_rcu_callback); |
92 | } |
93 | |
94 | /* |
95 | * Returns the next root after @prev_root (or the first root if @prev_root is |
96 | * NULL). A reference to the returned root is acquired, and the reference to |
97 | * @prev_root is released (the caller obviously must hold a reference to |
98 | * @prev_root if it's non-NULL). |
99 | * |
100 | * If @only_valid is true, invalid roots are skipped. |
101 | * |
102 | * Returns NULL if the end of tdp_mmu_roots was reached. |
103 | */ |
104 | static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, |
105 | struct kvm_mmu_page *prev_root, |
106 | bool only_valid) |
107 | { |
108 | struct kvm_mmu_page *next_root; |
109 | |
110 | /* |
111 | * While the roots themselves are RCU-protected, fields such as |
112 | * role.invalid are protected by mmu_lock. |
113 | */ |
114 | lockdep_assert_held(&kvm->mmu_lock); |
115 | |
116 | rcu_read_lock(); |
117 | |
118 | if (prev_root) |
119 | next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, |
120 | &prev_root->link, |
121 | typeof(*prev_root), link); |
122 | else |
123 | next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, |
124 | typeof(*next_root), link); |
125 | |
126 | while (next_root) { |
127 | if ((!only_valid || !next_root->role.invalid) && |
128 | kvm_tdp_mmu_get_root(root: next_root)) |
129 | break; |
130 | |
131 | next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, |
132 | &next_root->link, typeof(*next_root), link); |
133 | } |
134 | |
135 | rcu_read_unlock(); |
136 | |
137 | if (prev_root) |
138 | kvm_tdp_mmu_put_root(kvm, root: prev_root); |
139 | |
140 | return next_root; |
141 | } |
142 | |
143 | /* |
144 | * Note: this iterator gets and puts references to the roots it iterates over. |
145 | * This makes it safe to release the MMU lock and yield within the loop, but |
146 | * if exiting the loop early, the caller must drop the reference to the most |
147 | * recent root. (Unless keeping a live reference is desirable.) |
148 | * |
149 | * If shared is set, this function is operating under the MMU lock in read |
150 | * mode. |
151 | */ |
152 | #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \ |
153 | for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \ |
154 | ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ |
155 | _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \ |
156 | if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \ |
157 | } else |
158 | |
159 | #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ |
160 | __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true) |
161 | |
162 | #define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \ |
163 | for (_root = tdp_mmu_next_root(_kvm, NULL, false); \ |
164 | ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ |
165 | _root = tdp_mmu_next_root(_kvm, _root, false)) |
166 | |
167 | /* |
168 | * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, |
169 | * the implication being that any flow that holds mmu_lock for read is |
170 | * inherently yield-friendly and should use the yield-safe variant above. |
171 | * Holding mmu_lock for write obviates the need for RCU protection as the list |
172 | * is guaranteed to be stable. |
173 | */ |
174 | #define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid) \ |
175 | list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \ |
176 | if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \ |
177 | ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \ |
178 | ((_only_valid) && (_root)->role.invalid))) { \ |
179 | } else |
180 | |
181 | #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ |
182 | __for_each_tdp_mmu_root(_kvm, _root, _as_id, false) |
183 | |
184 | #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \ |
185 | __for_each_tdp_mmu_root(_kvm, _root, _as_id, true) |
186 | |
187 | static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) |
188 | { |
189 | struct kvm_mmu_page *sp; |
190 | |
191 | sp = kvm_mmu_memory_cache_alloc(mc: &vcpu->arch.mmu_page_header_cache); |
192 | sp->spt = kvm_mmu_memory_cache_alloc(mc: &vcpu->arch.mmu_shadow_page_cache); |
193 | |
194 | return sp; |
195 | } |
196 | |
197 | static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, |
198 | gfn_t gfn, union kvm_mmu_page_role role) |
199 | { |
200 | INIT_LIST_HEAD(list: &sp->possible_nx_huge_page_link); |
201 | |
202 | set_page_private(virt_to_page(sp->spt), private: (unsigned long)sp); |
203 | |
204 | sp->role = role; |
205 | sp->gfn = gfn; |
206 | sp->ptep = sptep; |
207 | sp->tdp_mmu_page = true; |
208 | |
209 | trace_kvm_mmu_get_page(sp, created: true); |
210 | } |
211 | |
212 | static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, |
213 | struct tdp_iter *iter) |
214 | { |
215 | struct kvm_mmu_page *parent_sp; |
216 | union kvm_mmu_page_role role; |
217 | |
218 | parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); |
219 | |
220 | role = parent_sp->role; |
221 | role.level--; |
222 | |
223 | tdp_mmu_init_sp(sp: child_sp, sptep: iter->sptep, gfn: iter->gfn, role); |
224 | } |
225 | |
226 | int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu) |
227 | { |
228 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
229 | union kvm_mmu_page_role role = mmu->root_role; |
230 | int as_id = kvm_mmu_role_as_id(role); |
231 | struct kvm *kvm = vcpu->kvm; |
232 | struct kvm_mmu_page *root; |
233 | |
234 | /* |
235 | * Check for an existing root before acquiring the pages lock to avoid |
236 | * unnecessary serialization if multiple vCPUs are loading a new root. |
237 | * E.g. when bringing up secondary vCPUs, KVM will already have created |
238 | * a valid root on behalf of the primary vCPU. |
239 | */ |
240 | read_lock(&kvm->mmu_lock); |
241 | |
242 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) { |
243 | if (root->role.word == role.word) |
244 | goto out_read_unlock; |
245 | } |
246 | |
247 | spin_lock(lock: &kvm->arch.tdp_mmu_pages_lock); |
248 | |
249 | /* |
250 | * Recheck for an existing root after acquiring the pages lock, another |
251 | * vCPU may have raced ahead and created a new usable root. Manually |
252 | * walk the list of roots as the standard macros assume that the pages |
253 | * lock is *not* held. WARN if grabbing a reference to a usable root |
254 | * fails, as the last reference to a root can only be put *after* the |
255 | * root has been invalidated, which requires holding mmu_lock for write. |
256 | */ |
257 | list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { |
258 | if (root->role.word == role.word && |
259 | !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) |
260 | goto out_spin_unlock; |
261 | } |
262 | |
263 | root = tdp_mmu_alloc_sp(vcpu); |
264 | tdp_mmu_init_sp(sp: root, NULL, gfn: 0, role); |
265 | |
266 | /* |
267 | * TDP MMU roots are kept until they are explicitly invalidated, either |
268 | * by a memslot update or by the destruction of the VM. Initialize the |
269 | * refcount to two; one reference for the vCPU, and one reference for |
270 | * the TDP MMU itself, which is held until the root is invalidated and |
271 | * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots(). |
272 | */ |
273 | refcount_set(r: &root->tdp_mmu_root_count, n: 2); |
274 | list_add_rcu(new: &root->link, head: &kvm->arch.tdp_mmu_roots); |
275 | |
276 | out_spin_unlock: |
277 | spin_unlock(lock: &kvm->arch.tdp_mmu_pages_lock); |
278 | out_read_unlock: |
279 | read_unlock(&kvm->mmu_lock); |
280 | /* |
281 | * Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest |
282 | * and actually consuming the root if it's invalidated after dropping |
283 | * mmu_lock, and the root can't be freed as this vCPU holds a reference. |
284 | */ |
285 | mmu->root.hpa = __pa(root->spt); |
286 | mmu->root.pgd = 0; |
287 | return 0; |
288 | } |
289 | |
290 | static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, |
291 | u64 old_spte, u64 new_spte, int level, |
292 | bool shared); |
293 | |
294 | static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
295 | { |
296 | kvm_account_pgtable_pages(virt: (void *)sp->spt, nr: +1); |
297 | atomic64_inc(v: &kvm->arch.tdp_mmu_pages); |
298 | } |
299 | |
300 | static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
301 | { |
302 | kvm_account_pgtable_pages(virt: (void *)sp->spt, nr: -1); |
303 | atomic64_dec(v: &kvm->arch.tdp_mmu_pages); |
304 | } |
305 | |
306 | /** |
307 | * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages |
308 | * |
309 | * @kvm: kvm instance |
310 | * @sp: the page to be removed |
311 | */ |
312 | static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
313 | { |
314 | tdp_unaccount_mmu_page(kvm, sp); |
315 | |
316 | if (!sp->nx_huge_page_disallowed) |
317 | return; |
318 | |
319 | spin_lock(lock: &kvm->arch.tdp_mmu_pages_lock); |
320 | sp->nx_huge_page_disallowed = false; |
321 | untrack_possible_nx_huge_page(kvm, sp); |
322 | spin_unlock(lock: &kvm->arch.tdp_mmu_pages_lock); |
323 | } |
324 | |
325 | /** |
326 | * handle_removed_pt() - handle a page table removed from the TDP structure |
327 | * |
328 | * @kvm: kvm instance |
329 | * @pt: the page removed from the paging structure |
330 | * @shared: This operation may not be running under the exclusive use |
331 | * of the MMU lock and the operation must synchronize with other |
332 | * threads that might be modifying SPTEs. |
333 | * |
334 | * Given a page table that has been removed from the TDP paging structure, |
335 | * iterates through the page table to clear SPTEs and free child page tables. |
336 | * |
337 | * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU |
338 | * protection. Since this thread removed it from the paging structure, |
339 | * this thread will be responsible for ensuring the page is freed. Hence the |
340 | * early rcu_dereferences in the function. |
341 | */ |
342 | static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) |
343 | { |
344 | struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); |
345 | int level = sp->role.level; |
346 | gfn_t base_gfn = sp->gfn; |
347 | int i; |
348 | |
349 | trace_kvm_mmu_prepare_zap_page(sp); |
350 | |
351 | tdp_mmu_unlink_sp(kvm, sp); |
352 | |
353 | for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { |
354 | tdp_ptep_t sptep = pt + i; |
355 | gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); |
356 | u64 old_spte; |
357 | |
358 | if (shared) { |
359 | /* |
360 | * Set the SPTE to a nonpresent value that other |
361 | * threads will not overwrite. If the SPTE was |
362 | * already marked as removed then another thread |
363 | * handling a page fault could overwrite it, so |
364 | * set the SPTE until it is set from some other |
365 | * value to the removed SPTE value. |
366 | */ |
367 | for (;;) { |
368 | old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE); |
369 | if (!is_removed_spte(spte: old_spte)) |
370 | break; |
371 | cpu_relax(); |
372 | } |
373 | } else { |
374 | /* |
375 | * If the SPTE is not MMU-present, there is no backing |
376 | * page associated with the SPTE and so no side effects |
377 | * that need to be recorded, and exclusive ownership of |
378 | * mmu_lock ensures the SPTE can't be made present. |
379 | * Note, zapping MMIO SPTEs is also unnecessary as they |
380 | * are guarded by the memslots generation, not by being |
381 | * unreachable. |
382 | */ |
383 | old_spte = kvm_tdp_mmu_read_spte(sptep); |
384 | if (!is_shadow_present_pte(pte: old_spte)) |
385 | continue; |
386 | |
387 | /* |
388 | * Use the common helper instead of a raw WRITE_ONCE as |
389 | * the SPTE needs to be updated atomically if it can be |
390 | * modified by a different vCPU outside of mmu_lock. |
391 | * Even though the parent SPTE is !PRESENT, the TLB |
392 | * hasn't yet been flushed, and both Intel and AMD |
393 | * document that A/D assists can use upper-level PxE |
394 | * entries that are cached in the TLB, i.e. the CPU can |
395 | * still access the page and mark it dirty. |
396 | * |
397 | * No retry is needed in the atomic update path as the |
398 | * sole concern is dropping a Dirty bit, i.e. no other |
399 | * task can zap/remove the SPTE as mmu_lock is held for |
400 | * write. Marking the SPTE as a removed SPTE is not |
401 | * strictly necessary for the same reason, but using |
402 | * the remove SPTE value keeps the shared/exclusive |
403 | * paths consistent and allows the handle_changed_spte() |
404 | * call below to hardcode the new value to REMOVED_SPTE. |
405 | * |
406 | * Note, even though dropping a Dirty bit is the only |
407 | * scenario where a non-atomic update could result in a |
408 | * functional bug, simply checking the Dirty bit isn't |
409 | * sufficient as a fast page fault could read the upper |
410 | * level SPTE before it is zapped, and then make this |
411 | * target SPTE writable, resume the guest, and set the |
412 | * Dirty bit between reading the SPTE above and writing |
413 | * it here. |
414 | */ |
415 | old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, |
416 | REMOVED_SPTE, level); |
417 | } |
418 | handle_changed_spte(kvm, as_id: kvm_mmu_page_as_id(sp), gfn, |
419 | old_spte, REMOVED_SPTE, level, shared); |
420 | } |
421 | |
422 | call_rcu(head: &sp->rcu_head, func: tdp_mmu_free_sp_rcu_callback); |
423 | } |
424 | |
425 | /** |
426 | * handle_changed_spte - handle bookkeeping associated with an SPTE change |
427 | * @kvm: kvm instance |
428 | * @as_id: the address space of the paging structure the SPTE was a part of |
429 | * @gfn: the base GFN that was mapped by the SPTE |
430 | * @old_spte: The value of the SPTE before the change |
431 | * @new_spte: The value of the SPTE after the change |
432 | * @level: the level of the PT the SPTE is part of in the paging structure |
433 | * @shared: This operation may not be running under the exclusive use of |
434 | * the MMU lock and the operation must synchronize with other |
435 | * threads that might be modifying SPTEs. |
436 | * |
437 | * Handle bookkeeping that might result from the modification of a SPTE. Note, |
438 | * dirty logging updates are handled in common code, not here (see make_spte() |
439 | * and fast_pf_fix_direct_spte()). |
440 | */ |
441 | static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, |
442 | u64 old_spte, u64 new_spte, int level, |
443 | bool shared) |
444 | { |
445 | bool was_present = is_shadow_present_pte(pte: old_spte); |
446 | bool is_present = is_shadow_present_pte(pte: new_spte); |
447 | bool was_leaf = was_present && is_last_spte(pte: old_spte, level); |
448 | bool is_leaf = is_present && is_last_spte(pte: new_spte, level); |
449 | bool pfn_changed = spte_to_pfn(pte: old_spte) != spte_to_pfn(pte: new_spte); |
450 | |
451 | WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL); |
452 | WARN_ON_ONCE(level < PG_LEVEL_4K); |
453 | WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); |
454 | |
455 | /* |
456 | * If this warning were to trigger it would indicate that there was a |
457 | * missing MMU notifier or a race with some notifier handler. |
458 | * A present, leaf SPTE should never be directly replaced with another |
459 | * present leaf SPTE pointing to a different PFN. A notifier handler |
460 | * should be zapping the SPTE before the main MM's page table is |
461 | * changed, or the SPTE should be zeroed, and the TLBs flushed by the |
462 | * thread before replacement. |
463 | */ |
464 | if (was_leaf && is_leaf && pfn_changed) { |
465 | pr_err("Invalid SPTE change: cannot replace a present leaf\n" |
466 | "SPTE with another present leaf SPTE mapping a\n" |
467 | "different PFN!\n" |
468 | "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d" , |
469 | as_id, gfn, old_spte, new_spte, level); |
470 | |
471 | /* |
472 | * Crash the host to prevent error propagation and guest data |
473 | * corruption. |
474 | */ |
475 | BUG(); |
476 | } |
477 | |
478 | if (old_spte == new_spte) |
479 | return; |
480 | |
481 | trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte); |
482 | |
483 | if (is_leaf) |
484 | check_spte_writable_invariants(spte: new_spte); |
485 | |
486 | /* |
487 | * The only times a SPTE should be changed from a non-present to |
488 | * non-present state is when an MMIO entry is installed/modified/ |
489 | * removed. In that case, there is nothing to do here. |
490 | */ |
491 | if (!was_present && !is_present) { |
492 | /* |
493 | * If this change does not involve a MMIO SPTE or removed SPTE, |
494 | * it is unexpected. Log the change, though it should not |
495 | * impact the guest since both the former and current SPTEs |
496 | * are nonpresent. |
497 | */ |
498 | if (WARN_ON_ONCE(!is_mmio_spte(old_spte) && |
499 | !is_mmio_spte(new_spte) && |
500 | !is_removed_spte(new_spte))) |
501 | pr_err("Unexpected SPTE change! Nonpresent SPTEs\n" |
502 | "should not be replaced with another,\n" |
503 | "different nonpresent SPTE, unless one or both\n" |
504 | "are MMIO SPTEs, or the new SPTE is\n" |
505 | "a temporary removed SPTE.\n" |
506 | "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d" , |
507 | as_id, gfn, old_spte, new_spte, level); |
508 | return; |
509 | } |
510 | |
511 | if (is_leaf != was_leaf) |
512 | kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); |
513 | |
514 | if (was_leaf && is_dirty_spte(spte: old_spte) && |
515 | (!is_present || !is_dirty_spte(spte: new_spte) || pfn_changed)) |
516 | kvm_set_pfn_dirty(pfn: spte_to_pfn(pte: old_spte)); |
517 | |
518 | /* |
519 | * Recursively handle child PTs if the change removed a subtree from |
520 | * the paging structure. Note the WARN on the PFN changing without the |
521 | * SPTE being converted to a hugepage (leaf) or being zapped. Shadow |
522 | * pages are kernel allocations and should never be migrated. |
523 | */ |
524 | if (was_present && !was_leaf && |
525 | (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) |
526 | handle_removed_pt(kvm, pt: spte_to_child_pt(pte: old_spte, level), shared); |
527 | |
528 | if (was_leaf && is_accessed_spte(spte: old_spte) && |
529 | (!is_present || !is_accessed_spte(spte: new_spte) || pfn_changed)) |
530 | kvm_set_pfn_accessed(pfn: spte_to_pfn(pte: old_spte)); |
531 | } |
532 | |
533 | /* |
534 | * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically |
535 | * and handle the associated bookkeeping. Do not mark the page dirty |
536 | * in KVM's dirty bitmaps. |
537 | * |
538 | * If setting the SPTE fails because it has changed, iter->old_spte will be |
539 | * refreshed to the current value of the spte. |
540 | * |
541 | * @kvm: kvm instance |
542 | * @iter: a tdp_iter instance currently on the SPTE that should be set |
543 | * @new_spte: The value the SPTE should be set to |
544 | * Return: |
545 | * * 0 - If the SPTE was set. |
546 | * * -EBUSY - If the SPTE cannot be set. In this case this function will have |
547 | * no side-effects other than setting iter->old_spte to the last |
548 | * known value of the spte. |
549 | */ |
550 | static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, |
551 | struct tdp_iter *iter, |
552 | u64 new_spte) |
553 | { |
554 | u64 *sptep = rcu_dereference(iter->sptep); |
555 | |
556 | /* |
557 | * The caller is responsible for ensuring the old SPTE is not a REMOVED |
558 | * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, |
559 | * and pre-checking before inserting a new SPTE is advantageous as it |
560 | * avoids unnecessary work. |
561 | */ |
562 | WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); |
563 | |
564 | lockdep_assert_held_read(&kvm->mmu_lock); |
565 | |
566 | /* |
567 | * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and |
568 | * does not hold the mmu_lock. On failure, i.e. if a different logical |
569 | * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with |
570 | * the current value, so the caller operates on fresh data, e.g. if it |
571 | * retries tdp_mmu_set_spte_atomic() |
572 | */ |
573 | if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) |
574 | return -EBUSY; |
575 | |
576 | handle_changed_spte(kvm, as_id: iter->as_id, gfn: iter->gfn, old_spte: iter->old_spte, |
577 | new_spte, level: iter->level, shared: true); |
578 | |
579 | return 0; |
580 | } |
581 | |
582 | static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, |
583 | struct tdp_iter *iter) |
584 | { |
585 | int ret; |
586 | |
587 | /* |
588 | * Freeze the SPTE by setting it to a special, |
589 | * non-present value. This will stop other threads from |
590 | * immediately installing a present entry in its place |
591 | * before the TLBs are flushed. |
592 | */ |
593 | ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); |
594 | if (ret) |
595 | return ret; |
596 | |
597 | kvm_flush_remote_tlbs_gfn(kvm, gfn: iter->gfn, level: iter->level); |
598 | |
599 | /* |
600 | * No other thread can overwrite the removed SPTE as they must either |
601 | * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not |
602 | * overwrite the special removed SPTE value. No bookkeeping is needed |
603 | * here since the SPTE is going from non-present to non-present. Use |
604 | * the raw write helper to avoid an unnecessary check on volatile bits. |
605 | */ |
606 | __kvm_tdp_mmu_write_spte(sptep: iter->sptep, new_spte: 0); |
607 | |
608 | return 0; |
609 | } |
610 | |
611 | |
612 | /* |
613 | * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping |
614 | * @kvm: KVM instance |
615 | * @as_id: Address space ID, i.e. regular vs. SMM |
616 | * @sptep: Pointer to the SPTE |
617 | * @old_spte: The current value of the SPTE |
618 | * @new_spte: The new value that will be set for the SPTE |
619 | * @gfn: The base GFN that was (or will be) mapped by the SPTE |
620 | * @level: The level _containing_ the SPTE (its parent PT's level) |
621 | * |
622 | * Returns the old SPTE value, which _may_ be different than @old_spte if the |
623 | * SPTE had voldatile bits. |
624 | */ |
625 | static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, |
626 | u64 old_spte, u64 new_spte, gfn_t gfn, int level) |
627 | { |
628 | lockdep_assert_held_write(&kvm->mmu_lock); |
629 | |
630 | /* |
631 | * No thread should be using this function to set SPTEs to or from the |
632 | * temporary removed SPTE value. |
633 | * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic |
634 | * should be used. If operating under the MMU lock in write mode, the |
635 | * use of the removed SPTE should not be necessary. |
636 | */ |
637 | WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte)); |
638 | |
639 | old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); |
640 | |
641 | handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, shared: false); |
642 | return old_spte; |
643 | } |
644 | |
645 | static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, |
646 | u64 new_spte) |
647 | { |
648 | WARN_ON_ONCE(iter->yielded); |
649 | iter->old_spte = tdp_mmu_set_spte(kvm, as_id: iter->as_id, sptep: iter->sptep, |
650 | old_spte: iter->old_spte, new_spte, |
651 | gfn: iter->gfn, level: iter->level); |
652 | } |
653 | |
654 | #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ |
655 | for_each_tdp_pte(_iter, _root, _start, _end) |
656 | |
657 | #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \ |
658 | tdp_root_for_each_pte(_iter, _root, _start, _end) \ |
659 | if (!is_shadow_present_pte(_iter.old_spte) || \ |
660 | !is_last_spte(_iter.old_spte, _iter.level)) \ |
661 | continue; \ |
662 | else |
663 | |
664 | #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \ |
665 | for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end) |
666 | |
667 | /* |
668 | * Yield if the MMU lock is contended or this thread needs to return control |
669 | * to the scheduler. |
670 | * |
671 | * If this function should yield and flush is set, it will perform a remote |
672 | * TLB flush before yielding. |
673 | * |
674 | * If this function yields, iter->yielded is set and the caller must skip to |
675 | * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk |
676 | * over the paging structures to allow the iterator to continue its traversal |
677 | * from the paging structure root. |
678 | * |
679 | * Returns true if this function yielded. |
680 | */ |
681 | static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, |
682 | struct tdp_iter *iter, |
683 | bool flush, bool shared) |
684 | { |
685 | WARN_ON_ONCE(iter->yielded); |
686 | |
687 | /* Ensure forward progress has been made before yielding. */ |
688 | if (iter->next_last_level_gfn == iter->yielded_gfn) |
689 | return false; |
690 | |
691 | if (need_resched() || rwlock_needbreak(lock: &kvm->mmu_lock)) { |
692 | if (flush) |
693 | kvm_flush_remote_tlbs(kvm); |
694 | |
695 | rcu_read_unlock(); |
696 | |
697 | if (shared) |
698 | cond_resched_rwlock_read(&kvm->mmu_lock); |
699 | else |
700 | cond_resched_rwlock_write(&kvm->mmu_lock); |
701 | |
702 | rcu_read_lock(); |
703 | |
704 | WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn); |
705 | |
706 | iter->yielded = true; |
707 | } |
708 | |
709 | return iter->yielded; |
710 | } |
711 | |
712 | static inline gfn_t tdp_mmu_max_gfn_exclusive(void) |
713 | { |
714 | /* |
715 | * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with |
716 | * a gpa range that would exceed the max gfn, and KVM does not create |
717 | * MMIO SPTEs for "impossible" gfns, instead sending such accesses down |
718 | * the slow emulation path every time. |
719 | */ |
720 | return kvm_mmu_max_gfn() + 1; |
721 | } |
722 | |
723 | static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, |
724 | bool shared, int zap_level) |
725 | { |
726 | struct tdp_iter iter; |
727 | |
728 | gfn_t end = tdp_mmu_max_gfn_exclusive(); |
729 | gfn_t start = 0; |
730 | |
731 | for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { |
732 | retry: |
733 | if (tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush: false, shared)) |
734 | continue; |
735 | |
736 | if (!is_shadow_present_pte(pte: iter.old_spte)) |
737 | continue; |
738 | |
739 | if (iter.level > zap_level) |
740 | continue; |
741 | |
742 | if (!shared) |
743 | tdp_mmu_iter_set_spte(kvm, iter: &iter, new_spte: 0); |
744 | else if (tdp_mmu_set_spte_atomic(kvm, iter: &iter, new_spte: 0)) |
745 | goto retry; |
746 | } |
747 | } |
748 | |
749 | static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, |
750 | bool shared) |
751 | { |
752 | |
753 | /* |
754 | * The root must have an elevated refcount so that it's reachable via |
755 | * mmu_notifier callbacks, which allows this path to yield and drop |
756 | * mmu_lock. When handling an unmap/release mmu_notifier command, KVM |
757 | * must drop all references to relevant pages prior to completing the |
758 | * callback. Dropping mmu_lock with an unreachable root would result |
759 | * in zapping SPTEs after a relevant mmu_notifier callback completes |
760 | * and lead to use-after-free as zapping a SPTE triggers "writeback" of |
761 | * dirty accessed bits to the SPTE's associated struct page. |
762 | */ |
763 | WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); |
764 | |
765 | kvm_lockdep_assert_mmu_lock_held(kvm, shared); |
766 | |
767 | rcu_read_lock(); |
768 | |
769 | /* |
770 | * Zap roots in multiple passes of decreasing granularity, i.e. zap at |
771 | * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all |
772 | * preempt models) or mmu_lock contention (full or real-time models). |
773 | * Zapping at finer granularity marginally increases the total time of |
774 | * the zap, but in most cases the zap itself isn't latency sensitive. |
775 | * |
776 | * If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps |
777 | * in order to mimic the page fault path, which can replace a 1GiB page |
778 | * table with an equivalent 1GiB hugepage, i.e. can get saddled with |
779 | * zapping a 1GiB region that's fully populated with 4KiB SPTEs. This |
780 | * allows verifying that KVM can safely zap 1GiB regions, e.g. without |
781 | * inducing RCU stalls, without relying on a relatively rare event |
782 | * (zapping roots is orders of magnitude more common). Note, because |
783 | * zapping a SP recurses on its children, stepping down to PG_LEVEL_4K |
784 | * in the iterator itself is unnecessary. |
785 | */ |
786 | if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) { |
787 | __tdp_mmu_zap_root(kvm, root, shared, zap_level: PG_LEVEL_4K); |
788 | __tdp_mmu_zap_root(kvm, root, shared, zap_level: PG_LEVEL_2M); |
789 | } |
790 | __tdp_mmu_zap_root(kvm, root, shared, zap_level: PG_LEVEL_1G); |
791 | __tdp_mmu_zap_root(kvm, root, shared, zap_level: root->role.level); |
792 | |
793 | rcu_read_unlock(); |
794 | } |
795 | |
796 | bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
797 | { |
798 | u64 old_spte; |
799 | |
800 | /* |
801 | * This helper intentionally doesn't allow zapping a root shadow page, |
802 | * which doesn't have a parent page table and thus no associated entry. |
803 | */ |
804 | if (WARN_ON_ONCE(!sp->ptep)) |
805 | return false; |
806 | |
807 | old_spte = kvm_tdp_mmu_read_spte(sptep: sp->ptep); |
808 | if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) |
809 | return false; |
810 | |
811 | tdp_mmu_set_spte(kvm, as_id: kvm_mmu_page_as_id(sp), sptep: sp->ptep, old_spte, new_spte: 0, |
812 | gfn: sp->gfn, level: sp->role.level + 1); |
813 | |
814 | return true; |
815 | } |
816 | |
817 | /* |
818 | * If can_yield is true, will release the MMU lock and reschedule if the |
819 | * scheduler needs the CPU or there is contention on the MMU lock. If this |
820 | * function cannot yield, it will not release the MMU lock or reschedule and |
821 | * the caller must ensure it does not supply too large a GFN range, or the |
822 | * operation can cause a soft lockup. |
823 | */ |
824 | static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, |
825 | gfn_t start, gfn_t end, bool can_yield, bool flush) |
826 | { |
827 | struct tdp_iter iter; |
828 | |
829 | end = min(end, tdp_mmu_max_gfn_exclusive()); |
830 | |
831 | lockdep_assert_held_write(&kvm->mmu_lock); |
832 | |
833 | rcu_read_lock(); |
834 | |
835 | for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { |
836 | if (can_yield && |
837 | tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush, shared: false)) { |
838 | flush = false; |
839 | continue; |
840 | } |
841 | |
842 | if (!is_shadow_present_pte(pte: iter.old_spte) || |
843 | !is_last_spte(pte: iter.old_spte, level: iter.level)) |
844 | continue; |
845 | |
846 | tdp_mmu_iter_set_spte(kvm, iter: &iter, new_spte: 0); |
847 | |
848 | /* |
849 | * Zappings SPTEs in invalid roots doesn't require a TLB flush, |
850 | * see kvm_tdp_mmu_zap_invalidated_roots() for details. |
851 | */ |
852 | if (!root->role.invalid) |
853 | flush = true; |
854 | } |
855 | |
856 | rcu_read_unlock(); |
857 | |
858 | /* |
859 | * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need |
860 | * to provide RCU protection as no 'struct kvm_mmu_page' will be freed. |
861 | */ |
862 | return flush; |
863 | } |
864 | |
865 | /* |
866 | * Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots. |
867 | * Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if |
868 | * one or more SPTEs were zapped since the MMU lock was last acquired. |
869 | */ |
870 | bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush) |
871 | { |
872 | struct kvm_mmu_page *root; |
873 | |
874 | lockdep_assert_held_write(&kvm->mmu_lock); |
875 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1) |
876 | flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield: true, flush); |
877 | |
878 | return flush; |
879 | } |
880 | |
881 | void kvm_tdp_mmu_zap_all(struct kvm *kvm) |
882 | { |
883 | struct kvm_mmu_page *root; |
884 | |
885 | /* |
886 | * Zap all roots, including invalid roots, as all SPTEs must be dropped |
887 | * before returning to the caller. Zap directly even if the root is |
888 | * also being zapped by a worker. Walking zapped top-level SPTEs isn't |
889 | * all that expensive and mmu_lock is already held, which means the |
890 | * worker has yielded, i.e. flushing the work instead of zapping here |
891 | * isn't guaranteed to be any faster. |
892 | * |
893 | * A TLB flush is unnecessary, KVM zaps everything if and only the VM |
894 | * is being destroyed or the userspace VMM has exited. In both cases, |
895 | * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. |
896 | */ |
897 | lockdep_assert_held_write(&kvm->mmu_lock); |
898 | for_each_tdp_mmu_root_yield_safe(kvm, root) |
899 | tdp_mmu_zap_root(kvm, root, shared: false); |
900 | } |
901 | |
902 | /* |
903 | * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast |
904 | * zap" completes. |
905 | */ |
906 | void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) |
907 | { |
908 | struct kvm_mmu_page *root; |
909 | |
910 | read_lock(&kvm->mmu_lock); |
911 | |
912 | for_each_tdp_mmu_root_yield_safe(kvm, root) { |
913 | if (!root->tdp_mmu_scheduled_root_to_zap) |
914 | continue; |
915 | |
916 | root->tdp_mmu_scheduled_root_to_zap = false; |
917 | KVM_BUG_ON(!root->role.invalid, kvm); |
918 | |
919 | /* |
920 | * A TLB flush is not necessary as KVM performs a local TLB |
921 | * flush when allocating a new root (see kvm_mmu_load()), and |
922 | * when migrating a vCPU to a different pCPU. Note, the local |
923 | * TLB flush on reuse also invalidates paging-structure-cache |
924 | * entries, i.e. TLB entries for intermediate paging structures, |
925 | * that may be zapped, as such entries are associated with the |
926 | * ASID on both VMX and SVM. |
927 | */ |
928 | tdp_mmu_zap_root(kvm, root, shared: true); |
929 | |
930 | /* |
931 | * The referenced needs to be put *after* zapping the root, as |
932 | * the root must be reachable by mmu_notifiers while it's being |
933 | * zapped |
934 | */ |
935 | kvm_tdp_mmu_put_root(kvm, root); |
936 | } |
937 | |
938 | read_unlock(&kvm->mmu_lock); |
939 | } |
940 | |
941 | /* |
942 | * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that |
943 | * is about to be zapped, e.g. in response to a memslots update. The actual |
944 | * zapping is done separately so that it happens with mmu_lock with read, |
945 | * whereas invalidating roots must be done with mmu_lock held for write (unless |
946 | * the VM is being destroyed). |
947 | * |
948 | * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference. |
949 | * See kvm_tdp_mmu_alloc_root(). |
950 | */ |
951 | void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) |
952 | { |
953 | struct kvm_mmu_page *root; |
954 | |
955 | /* |
956 | * mmu_lock must be held for write to ensure that a root doesn't become |
957 | * invalid while there are active readers (invalidating a root while |
958 | * there are active readers may or may not be problematic in practice, |
959 | * but it's uncharted territory and not supported). |
960 | * |
961 | * Waive the assertion if there are no users of @kvm, i.e. the VM is |
962 | * being destroyed after all references have been put, or if no vCPUs |
963 | * have been created (which means there are no roots), i.e. the VM is |
964 | * being destroyed in an error path of KVM_CREATE_VM. |
965 | */ |
966 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && |
967 | refcount_read(r: &kvm->users_count) && kvm->created_vcpus) |
968 | lockdep_assert_held_write(&kvm->mmu_lock); |
969 | |
970 | /* |
971 | * As above, mmu_lock isn't held when destroying the VM! There can't |
972 | * be other references to @kvm, i.e. nothing else can invalidate roots |
973 | * or get/put references to roots. |
974 | */ |
975 | list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { |
976 | /* |
977 | * Note, invalid roots can outlive a memslot update! Invalid |
978 | * roots must be *zapped* before the memslot update completes, |
979 | * but a different task can acquire a reference and keep the |
980 | * root alive after its been zapped. |
981 | */ |
982 | if (!root->role.invalid) { |
983 | root->tdp_mmu_scheduled_root_to_zap = true; |
984 | root->role.invalid = true; |
985 | } |
986 | } |
987 | } |
988 | |
989 | /* |
990 | * Installs a last-level SPTE to handle a TDP page fault. |
991 | * (NPT/EPT violation/misconfiguration) |
992 | */ |
993 | static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, |
994 | struct kvm_page_fault *fault, |
995 | struct tdp_iter *iter) |
996 | { |
997 | struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); |
998 | u64 new_spte; |
999 | int ret = RET_PF_FIXED; |
1000 | bool wrprot = false; |
1001 | |
1002 | if (WARN_ON_ONCE(sp->role.level != fault->goal_level)) |
1003 | return RET_PF_RETRY; |
1004 | |
1005 | if (unlikely(!fault->slot)) |
1006 | new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); |
1007 | else |
1008 | wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, |
1009 | fault->pfn, iter->old_spte, fault->prefetch, true, |
1010 | fault->map_writable, &new_spte); |
1011 | |
1012 | if (new_spte == iter->old_spte) |
1013 | ret = RET_PF_SPURIOUS; |
1014 | else if (tdp_mmu_set_spte_atomic(kvm: vcpu->kvm, iter, new_spte)) |
1015 | return RET_PF_RETRY; |
1016 | else if (is_shadow_present_pte(pte: iter->old_spte) && |
1017 | !is_last_spte(pte: iter->old_spte, level: iter->level)) |
1018 | kvm_flush_remote_tlbs_gfn(kvm: vcpu->kvm, gfn: iter->gfn, level: iter->level); |
1019 | |
1020 | /* |
1021 | * If the page fault was caused by a write but the page is write |
1022 | * protected, emulation is needed. If the emulation was skipped, |
1023 | * the vCPU would have the same fault again. |
1024 | */ |
1025 | if (wrprot) { |
1026 | if (fault->write) |
1027 | ret = RET_PF_EMULATE; |
1028 | } |
1029 | |
1030 | /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ |
1031 | if (unlikely(is_mmio_spte(new_spte))) { |
1032 | vcpu->stat.pf_mmio_spte_created++; |
1033 | trace_mark_mmio_spte(rcu_dereference(iter->sptep), gfn: iter->gfn, |
1034 | spte: new_spte); |
1035 | ret = RET_PF_EMULATE; |
1036 | } else { |
1037 | trace_kvm_mmu_set_spte(level: iter->level, gfn: iter->gfn, |
1038 | rcu_dereference(iter->sptep)); |
1039 | } |
1040 | |
1041 | return ret; |
1042 | } |
1043 | |
1044 | /* |
1045 | * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the |
1046 | * provided page table. |
1047 | * |
1048 | * @kvm: kvm instance |
1049 | * @iter: a tdp_iter instance currently on the SPTE that should be set |
1050 | * @sp: The new TDP page table to install. |
1051 | * @shared: This operation is running under the MMU lock in read mode. |
1052 | * |
1053 | * Returns: 0 if the new page table was installed. Non-0 if the page table |
1054 | * could not be installed (e.g. the atomic compare-exchange failed). |
1055 | */ |
1056 | static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, |
1057 | struct kvm_mmu_page *sp, bool shared) |
1058 | { |
1059 | u64 spte = make_nonleaf_spte(child_pt: sp->spt, ad_disabled: !kvm_ad_enabled()); |
1060 | int ret = 0; |
1061 | |
1062 | if (shared) { |
1063 | ret = tdp_mmu_set_spte_atomic(kvm, iter, new_spte: spte); |
1064 | if (ret) |
1065 | return ret; |
1066 | } else { |
1067 | tdp_mmu_iter_set_spte(kvm, iter, new_spte: spte); |
1068 | } |
1069 | |
1070 | tdp_account_mmu_page(kvm, sp); |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, |
1076 | struct kvm_mmu_page *sp, bool shared); |
1077 | |
1078 | /* |
1079 | * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing |
1080 | * page tables and SPTEs to translate the faulting guest physical address. |
1081 | */ |
1082 | int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
1083 | { |
1084 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
1085 | struct kvm *kvm = vcpu->kvm; |
1086 | struct tdp_iter iter; |
1087 | struct kvm_mmu_page *sp; |
1088 | int ret = RET_PF_RETRY; |
1089 | |
1090 | kvm_mmu_hugepage_adjust(vcpu, fault); |
1091 | |
1092 | trace_kvm_mmu_spte_requested(fault); |
1093 | |
1094 | rcu_read_lock(); |
1095 | |
1096 | tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { |
1097 | int r; |
1098 | |
1099 | if (fault->nx_huge_page_workaround_enabled) |
1100 | disallowed_hugepage_adjust(fault, spte: iter.old_spte, cur_level: iter.level); |
1101 | |
1102 | /* |
1103 | * If SPTE has been frozen by another thread, just give up and |
1104 | * retry, avoiding unnecessary page table allocation and free. |
1105 | */ |
1106 | if (is_removed_spte(spte: iter.old_spte)) |
1107 | goto retry; |
1108 | |
1109 | if (iter.level == fault->goal_level) |
1110 | goto map_target_level; |
1111 | |
1112 | /* Step down into the lower level page table if it exists. */ |
1113 | if (is_shadow_present_pte(pte: iter.old_spte) && |
1114 | !is_large_pte(pte: iter.old_spte)) |
1115 | continue; |
1116 | |
1117 | /* |
1118 | * The SPTE is either non-present or points to a huge page that |
1119 | * needs to be split. |
1120 | */ |
1121 | sp = tdp_mmu_alloc_sp(vcpu); |
1122 | tdp_mmu_init_child_sp(child_sp: sp, iter: &iter); |
1123 | |
1124 | sp->nx_huge_page_disallowed = fault->huge_page_disallowed; |
1125 | |
1126 | if (is_shadow_present_pte(pte: iter.old_spte)) |
1127 | r = tdp_mmu_split_huge_page(kvm, iter: &iter, sp, shared: true); |
1128 | else |
1129 | r = tdp_mmu_link_sp(kvm, iter: &iter, sp, shared: true); |
1130 | |
1131 | /* |
1132 | * Force the guest to retry if installing an upper level SPTE |
1133 | * failed, e.g. because a different task modified the SPTE. |
1134 | */ |
1135 | if (r) { |
1136 | tdp_mmu_free_sp(sp); |
1137 | goto retry; |
1138 | } |
1139 | |
1140 | if (fault->huge_page_disallowed && |
1141 | fault->req_level >= iter.level) { |
1142 | spin_lock(lock: &kvm->arch.tdp_mmu_pages_lock); |
1143 | if (sp->nx_huge_page_disallowed) |
1144 | track_possible_nx_huge_page(kvm, sp); |
1145 | spin_unlock(lock: &kvm->arch.tdp_mmu_pages_lock); |
1146 | } |
1147 | } |
1148 | |
1149 | /* |
1150 | * The walk aborted before reaching the target level, e.g. because the |
1151 | * iterator detected an upper level SPTE was frozen during traversal. |
1152 | */ |
1153 | WARN_ON_ONCE(iter.level == fault->goal_level); |
1154 | goto retry; |
1155 | |
1156 | map_target_level: |
1157 | ret = tdp_mmu_map_handle_target_level(vcpu, fault, iter: &iter); |
1158 | |
1159 | retry: |
1160 | rcu_read_unlock(); |
1161 | return ret; |
1162 | } |
1163 | |
1164 | bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, |
1165 | bool flush) |
1166 | { |
1167 | struct kvm_mmu_page *root; |
1168 | |
1169 | __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) |
1170 | flush = tdp_mmu_zap_leafs(kvm, root, start: range->start, end: range->end, |
1171 | can_yield: range->may_block, flush); |
1172 | |
1173 | return flush; |
1174 | } |
1175 | |
1176 | typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, |
1177 | struct kvm_gfn_range *range); |
1178 | |
1179 | static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, |
1180 | struct kvm_gfn_range *range, |
1181 | tdp_handler_t handler) |
1182 | { |
1183 | struct kvm_mmu_page *root; |
1184 | struct tdp_iter iter; |
1185 | bool ret = false; |
1186 | |
1187 | /* |
1188 | * Don't support rescheduling, none of the MMU notifiers that funnel |
1189 | * into this helper allow blocking; it'd be dead, wasteful code. |
1190 | */ |
1191 | for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { |
1192 | rcu_read_lock(); |
1193 | |
1194 | tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) |
1195 | ret |= handler(kvm, &iter, range); |
1196 | |
1197 | rcu_read_unlock(); |
1198 | } |
1199 | |
1200 | return ret; |
1201 | } |
1202 | |
1203 | /* |
1204 | * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero |
1205 | * if any of the GFNs in the range have been accessed. |
1206 | * |
1207 | * No need to mark the corresponding PFN as accessed as this call is coming |
1208 | * from the clear_young() or clear_flush_young() notifier, which uses the |
1209 | * return value to determine if the page has been accessed. |
1210 | */ |
1211 | static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, |
1212 | struct kvm_gfn_range *range) |
1213 | { |
1214 | u64 new_spte; |
1215 | |
1216 | /* If we have a non-accessed entry we don't need to change the pte. */ |
1217 | if (!is_accessed_spte(spte: iter->old_spte)) |
1218 | return false; |
1219 | |
1220 | if (spte_ad_enabled(spte: iter->old_spte)) { |
1221 | iter->old_spte = tdp_mmu_clear_spte_bits(sptep: iter->sptep, |
1222 | old_spte: iter->old_spte, |
1223 | mask: shadow_accessed_mask, |
1224 | level: iter->level); |
1225 | new_spte = iter->old_spte & ~shadow_accessed_mask; |
1226 | } else { |
1227 | /* |
1228 | * Capture the dirty status of the page, so that it doesn't get |
1229 | * lost when the SPTE is marked for access tracking. |
1230 | */ |
1231 | if (is_writable_pte(pte: iter->old_spte)) |
1232 | kvm_set_pfn_dirty(pfn: spte_to_pfn(pte: iter->old_spte)); |
1233 | |
1234 | new_spte = mark_spte_for_access_track(spte: iter->old_spte); |
1235 | iter->old_spte = kvm_tdp_mmu_write_spte(sptep: iter->sptep, |
1236 | old_spte: iter->old_spte, new_spte, |
1237 | level: iter->level); |
1238 | } |
1239 | |
1240 | trace_kvm_tdp_mmu_spte_changed(as_id: iter->as_id, gfn: iter->gfn, level: iter->level, |
1241 | old_spte: iter->old_spte, new_spte); |
1242 | return true; |
1243 | } |
1244 | |
1245 | bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
1246 | { |
1247 | return kvm_tdp_mmu_handle_gfn(kvm, range, handler: age_gfn_range); |
1248 | } |
1249 | |
1250 | static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, |
1251 | struct kvm_gfn_range *range) |
1252 | { |
1253 | return is_accessed_spte(spte: iter->old_spte); |
1254 | } |
1255 | |
1256 | bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
1257 | { |
1258 | return kvm_tdp_mmu_handle_gfn(kvm, range, handler: test_age_gfn); |
1259 | } |
1260 | |
1261 | static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, |
1262 | struct kvm_gfn_range *range) |
1263 | { |
1264 | u64 new_spte; |
1265 | |
1266 | /* Huge pages aren't expected to be modified without first being zapped. */ |
1267 | WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end); |
1268 | |
1269 | if (iter->level != PG_LEVEL_4K || |
1270 | !is_shadow_present_pte(pte: iter->old_spte)) |
1271 | return false; |
1272 | |
1273 | /* |
1274 | * Note, when changing a read-only SPTE, it's not strictly necessary to |
1275 | * zero the SPTE before setting the new PFN, but doing so preserves the |
1276 | * invariant that the PFN of a present * leaf SPTE can never change. |
1277 | * See handle_changed_spte(). |
1278 | */ |
1279 | tdp_mmu_iter_set_spte(kvm, iter, new_spte: 0); |
1280 | |
1281 | if (!pte_write(pte: range->arg.pte)) { |
1282 | new_spte = kvm_mmu_changed_pte_notifier_make_spte(old_spte: iter->old_spte, |
1283 | new_pfn: pte_pfn(pte: range->arg.pte)); |
1284 | |
1285 | tdp_mmu_iter_set_spte(kvm, iter, new_spte); |
1286 | } |
1287 | |
1288 | return true; |
1289 | } |
1290 | |
1291 | /* |
1292 | * Handle the changed_pte MMU notifier for the TDP MMU. |
1293 | * data is a pointer to the new pte_t mapping the HVA specified by the MMU |
1294 | * notifier. |
1295 | * Returns non-zero if a flush is needed before releasing the MMU lock. |
1296 | */ |
1297 | bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
1298 | { |
1299 | /* |
1300 | * No need to handle the remote TLB flush under RCU protection, the |
1301 | * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a |
1302 | * shadow page. See the WARN on pfn_changed in handle_changed_spte(). |
1303 | */ |
1304 | return kvm_tdp_mmu_handle_gfn(kvm, range, handler: set_spte_gfn); |
1305 | } |
1306 | |
1307 | /* |
1308 | * Remove write access from all SPTEs at or above min_level that map GFNs |
1309 | * [start, end). Returns true if an SPTE has been changed and the TLBs need to |
1310 | * be flushed. |
1311 | */ |
1312 | static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, |
1313 | gfn_t start, gfn_t end, int min_level) |
1314 | { |
1315 | struct tdp_iter iter; |
1316 | u64 new_spte; |
1317 | bool spte_set = false; |
1318 | |
1319 | rcu_read_lock(); |
1320 | |
1321 | BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); |
1322 | |
1323 | for_each_tdp_pte_min_level(iter, root, min_level, start, end) { |
1324 | retry: |
1325 | if (tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush: false, shared: true)) |
1326 | continue; |
1327 | |
1328 | if (!is_shadow_present_pte(iter.old_spte) || |
1329 | !is_last_spte(iter.old_spte, iter.level) || |
1330 | !(iter.old_spte & PT_WRITABLE_MASK)) |
1331 | continue; |
1332 | |
1333 | new_spte = iter.old_spte & ~PT_WRITABLE_MASK; |
1334 | |
1335 | if (tdp_mmu_set_spte_atomic(kvm, iter: &iter, new_spte)) |
1336 | goto retry; |
1337 | |
1338 | spte_set = true; |
1339 | } |
1340 | |
1341 | rcu_read_unlock(); |
1342 | return spte_set; |
1343 | } |
1344 | |
1345 | /* |
1346 | * Remove write access from all the SPTEs mapping GFNs in the memslot. Will |
1347 | * only affect leaf SPTEs down to min_level. |
1348 | * Returns true if an SPTE has been changed and the TLBs need to be flushed. |
1349 | */ |
1350 | bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, |
1351 | const struct kvm_memory_slot *slot, int min_level) |
1352 | { |
1353 | struct kvm_mmu_page *root; |
1354 | bool spte_set = false; |
1355 | |
1356 | lockdep_assert_held_read(&kvm->mmu_lock); |
1357 | |
1358 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) |
1359 | spte_set |= wrprot_gfn_range(kvm, root, start: slot->base_gfn, |
1360 | end: slot->base_gfn + slot->npages, min_level); |
1361 | |
1362 | return spte_set; |
1363 | } |
1364 | |
1365 | static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) |
1366 | { |
1367 | struct kvm_mmu_page *sp; |
1368 | |
1369 | gfp |= __GFP_ZERO; |
1370 | |
1371 | sp = kmem_cache_alloc(cachep: mmu_page_header_cache, flags: gfp); |
1372 | if (!sp) |
1373 | return NULL; |
1374 | |
1375 | sp->spt = (void *)__get_free_page(gfp); |
1376 | if (!sp->spt) { |
1377 | kmem_cache_free(s: mmu_page_header_cache, objp: sp); |
1378 | return NULL; |
1379 | } |
1380 | |
1381 | return sp; |
1382 | } |
1383 | |
1384 | static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, |
1385 | struct tdp_iter *iter, |
1386 | bool shared) |
1387 | { |
1388 | struct kvm_mmu_page *sp; |
1389 | |
1390 | kvm_lockdep_assert_mmu_lock_held(kvm, shared); |
1391 | |
1392 | /* |
1393 | * Since we are allocating while under the MMU lock we have to be |
1394 | * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct |
1395 | * reclaim and to avoid making any filesystem callbacks (which can end |
1396 | * up invoking KVM MMU notifiers, resulting in a deadlock). |
1397 | * |
1398 | * If this allocation fails we drop the lock and retry with reclaim |
1399 | * allowed. |
1400 | */ |
1401 | sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT); |
1402 | if (sp) |
1403 | return sp; |
1404 | |
1405 | rcu_read_unlock(); |
1406 | |
1407 | if (shared) |
1408 | read_unlock(&kvm->mmu_lock); |
1409 | else |
1410 | write_unlock(&kvm->mmu_lock); |
1411 | |
1412 | iter->yielded = true; |
1413 | sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT); |
1414 | |
1415 | if (shared) |
1416 | read_lock(&kvm->mmu_lock); |
1417 | else |
1418 | write_lock(&kvm->mmu_lock); |
1419 | |
1420 | rcu_read_lock(); |
1421 | |
1422 | return sp; |
1423 | } |
1424 | |
1425 | /* Note, the caller is responsible for initializing @sp. */ |
1426 | static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, |
1427 | struct kvm_mmu_page *sp, bool shared) |
1428 | { |
1429 | const u64 huge_spte = iter->old_spte; |
1430 | const int level = iter->level; |
1431 | int ret, i; |
1432 | |
1433 | /* |
1434 | * No need for atomics when writing to sp->spt since the page table has |
1435 | * not been linked in yet and thus is not reachable from any other CPU. |
1436 | */ |
1437 | for (i = 0; i < SPTE_ENT_PER_PAGE; i++) |
1438 | sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, role: sp->role, index: i); |
1439 | |
1440 | /* |
1441 | * Replace the huge spte with a pointer to the populated lower level |
1442 | * page table. Since we are making this change without a TLB flush vCPUs |
1443 | * will see a mix of the split mappings and the original huge mapping, |
1444 | * depending on what's currently in their TLB. This is fine from a |
1445 | * correctness standpoint since the translation will be the same either |
1446 | * way. |
1447 | */ |
1448 | ret = tdp_mmu_link_sp(kvm, iter, sp, shared); |
1449 | if (ret) |
1450 | goto out; |
1451 | |
1452 | /* |
1453 | * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we |
1454 | * are overwriting from the page stats. But we have to manually update |
1455 | * the page stats with the new present child pages. |
1456 | */ |
1457 | kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); |
1458 | |
1459 | out: |
1460 | trace_kvm_mmu_split_huge_page(gfn: iter->gfn, spte: huge_spte, level, errno: ret); |
1461 | return ret; |
1462 | } |
1463 | |
1464 | static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, |
1465 | struct kvm_mmu_page *root, |
1466 | gfn_t start, gfn_t end, |
1467 | int target_level, bool shared) |
1468 | { |
1469 | struct kvm_mmu_page *sp = NULL; |
1470 | struct tdp_iter iter; |
1471 | int ret = 0; |
1472 | |
1473 | rcu_read_lock(); |
1474 | |
1475 | /* |
1476 | * Traverse the page table splitting all huge pages above the target |
1477 | * level into one lower level. For example, if we encounter a 1GB page |
1478 | * we split it into 512 2MB pages. |
1479 | * |
1480 | * Since the TDP iterator uses a pre-order traversal, we are guaranteed |
1481 | * to visit an SPTE before ever visiting its children, which means we |
1482 | * will correctly recursively split huge pages that are more than one |
1483 | * level above the target level (e.g. splitting a 1GB to 512 2MB pages, |
1484 | * and then splitting each of those to 512 4KB pages). |
1485 | */ |
1486 | for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { |
1487 | retry: |
1488 | if (tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush: false, shared)) |
1489 | continue; |
1490 | |
1491 | if (!is_shadow_present_pte(pte: iter.old_spte) || !is_large_pte(pte: iter.old_spte)) |
1492 | continue; |
1493 | |
1494 | if (!sp) { |
1495 | sp = tdp_mmu_alloc_sp_for_split(kvm, iter: &iter, shared); |
1496 | if (!sp) { |
1497 | ret = -ENOMEM; |
1498 | trace_kvm_mmu_split_huge_page(gfn: iter.gfn, |
1499 | spte: iter.old_spte, |
1500 | level: iter.level, errno: ret); |
1501 | break; |
1502 | } |
1503 | |
1504 | if (iter.yielded) |
1505 | continue; |
1506 | } |
1507 | |
1508 | tdp_mmu_init_child_sp(child_sp: sp, iter: &iter); |
1509 | |
1510 | if (tdp_mmu_split_huge_page(kvm, iter: &iter, sp, shared)) |
1511 | goto retry; |
1512 | |
1513 | sp = NULL; |
1514 | } |
1515 | |
1516 | rcu_read_unlock(); |
1517 | |
1518 | /* |
1519 | * It's possible to exit the loop having never used the last sp if, for |
1520 | * example, a vCPU doing HugePage NX splitting wins the race and |
1521 | * installs its own sp in place of the last sp we tried to split. |
1522 | */ |
1523 | if (sp) |
1524 | tdp_mmu_free_sp(sp); |
1525 | |
1526 | return ret; |
1527 | } |
1528 | |
1529 | |
1530 | /* |
1531 | * Try to split all huge pages mapped by the TDP MMU down to the target level. |
1532 | */ |
1533 | void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, |
1534 | const struct kvm_memory_slot *slot, |
1535 | gfn_t start, gfn_t end, |
1536 | int target_level, bool shared) |
1537 | { |
1538 | struct kvm_mmu_page *root; |
1539 | int r = 0; |
1540 | |
1541 | kvm_lockdep_assert_mmu_lock_held(kvm, shared); |
1542 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) { |
1543 | r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); |
1544 | if (r) { |
1545 | kvm_tdp_mmu_put_root(kvm, root); |
1546 | break; |
1547 | } |
1548 | } |
1549 | } |
1550 | |
1551 | static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp) |
1552 | { |
1553 | /* |
1554 | * All TDP MMU shadow pages share the same role as their root, aside |
1555 | * from level, so it is valid to key off any shadow page to determine if |
1556 | * write protection is needed for an entire tree. |
1557 | */ |
1558 | return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled(); |
1559 | } |
1560 | |
1561 | static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, |
1562 | gfn_t start, gfn_t end) |
1563 | { |
1564 | const u64 dbit = tdp_mmu_need_write_protect(sp: root) ? PT_WRITABLE_MASK : |
1565 | shadow_dirty_mask; |
1566 | struct tdp_iter iter; |
1567 | bool spte_set = false; |
1568 | |
1569 | rcu_read_lock(); |
1570 | |
1571 | tdp_root_for_each_pte(iter, root, start, end) { |
1572 | retry: |
1573 | if (!is_shadow_present_pte(pte: iter.old_spte) || |
1574 | !is_last_spte(pte: iter.old_spte, level: iter.level)) |
1575 | continue; |
1576 | |
1577 | if (tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush: false, shared: true)) |
1578 | continue; |
1579 | |
1580 | KVM_MMU_WARN_ON(dbit == shadow_dirty_mask && |
1581 | spte_ad_need_write_protect(iter.old_spte)); |
1582 | |
1583 | if (!(iter.old_spte & dbit)) |
1584 | continue; |
1585 | |
1586 | if (tdp_mmu_set_spte_atomic(kvm, iter: &iter, new_spte: iter.old_spte & ~dbit)) |
1587 | goto retry; |
1588 | |
1589 | spte_set = true; |
1590 | } |
1591 | |
1592 | rcu_read_unlock(); |
1593 | return spte_set; |
1594 | } |
1595 | |
1596 | /* |
1597 | * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the |
1598 | * memslot. Returns true if an SPTE has been changed and the TLBs need to be |
1599 | * flushed. |
1600 | */ |
1601 | bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, |
1602 | const struct kvm_memory_slot *slot) |
1603 | { |
1604 | struct kvm_mmu_page *root; |
1605 | bool spte_set = false; |
1606 | |
1607 | lockdep_assert_held_read(&kvm->mmu_lock); |
1608 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) |
1609 | spte_set |= clear_dirty_gfn_range(kvm, root, start: slot->base_gfn, |
1610 | end: slot->base_gfn + slot->npages); |
1611 | |
1612 | return spte_set; |
1613 | } |
1614 | |
1615 | static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, |
1616 | gfn_t gfn, unsigned long mask, bool wrprot) |
1617 | { |
1618 | const u64 dbit = (wrprot || tdp_mmu_need_write_protect(sp: root)) ? PT_WRITABLE_MASK : |
1619 | shadow_dirty_mask; |
1620 | struct tdp_iter iter; |
1621 | |
1622 | lockdep_assert_held_write(&kvm->mmu_lock); |
1623 | |
1624 | rcu_read_lock(); |
1625 | |
1626 | tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), |
1627 | gfn + BITS_PER_LONG) { |
1628 | if (!mask) |
1629 | break; |
1630 | |
1631 | KVM_MMU_WARN_ON(dbit == shadow_dirty_mask && |
1632 | spte_ad_need_write_protect(iter.old_spte)); |
1633 | |
1634 | if (iter.level > PG_LEVEL_4K || |
1635 | !(mask & (1UL << (iter.gfn - gfn)))) |
1636 | continue; |
1637 | |
1638 | mask &= ~(1UL << (iter.gfn - gfn)); |
1639 | |
1640 | if (!(iter.old_spte & dbit)) |
1641 | continue; |
1642 | |
1643 | iter.old_spte = tdp_mmu_clear_spte_bits(sptep: iter.sptep, |
1644 | old_spte: iter.old_spte, mask: dbit, |
1645 | level: iter.level); |
1646 | |
1647 | trace_kvm_tdp_mmu_spte_changed(as_id: iter.as_id, gfn: iter.gfn, level: iter.level, |
1648 | old_spte: iter.old_spte, |
1649 | new_spte: iter.old_spte & ~dbit); |
1650 | kvm_set_pfn_dirty(pfn: spte_to_pfn(pte: iter.old_spte)); |
1651 | } |
1652 | |
1653 | rcu_read_unlock(); |
1654 | } |
1655 | |
1656 | /* |
1657 | * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for |
1658 | * which a bit is set in mask, starting at gfn. The given memslot is expected to |
1659 | * contain all the GFNs represented by set bits in the mask. |
1660 | */ |
1661 | void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
1662 | struct kvm_memory_slot *slot, |
1663 | gfn_t gfn, unsigned long mask, |
1664 | bool wrprot) |
1665 | { |
1666 | struct kvm_mmu_page *root; |
1667 | |
1668 | for_each_valid_tdp_mmu_root(kvm, root, slot->as_id) |
1669 | clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); |
1670 | } |
1671 | |
1672 | static void zap_collapsible_spte_range(struct kvm *kvm, |
1673 | struct kvm_mmu_page *root, |
1674 | const struct kvm_memory_slot *slot) |
1675 | { |
1676 | gfn_t start = slot->base_gfn; |
1677 | gfn_t end = start + slot->npages; |
1678 | struct tdp_iter iter; |
1679 | int max_mapping_level; |
1680 | |
1681 | rcu_read_lock(); |
1682 | |
1683 | for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { |
1684 | retry: |
1685 | if (tdp_mmu_iter_cond_resched(kvm, iter: &iter, flush: false, shared: true)) |
1686 | continue; |
1687 | |
1688 | if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || |
1689 | !is_shadow_present_pte(pte: iter.old_spte)) |
1690 | continue; |
1691 | |
1692 | /* |
1693 | * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with |
1694 | * a large page size, then its parent would have been zapped |
1695 | * instead of stepping down. |
1696 | */ |
1697 | if (is_last_spte(pte: iter.old_spte, level: iter.level)) |
1698 | continue; |
1699 | |
1700 | /* |
1701 | * If iter.gfn resides outside of the slot, i.e. the page for |
1702 | * the current level overlaps but is not contained by the slot, |
1703 | * then the SPTE can't be made huge. More importantly, trying |
1704 | * to query that info from slot->arch.lpage_info will cause an |
1705 | * out-of-bounds access. |
1706 | */ |
1707 | if (iter.gfn < start || iter.gfn >= end) |
1708 | continue; |
1709 | |
1710 | max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, |
1711 | gfn: iter.gfn, max_level: PG_LEVEL_NUM); |
1712 | if (max_mapping_level < iter.level) |
1713 | continue; |
1714 | |
1715 | /* Note, a successful atomic zap also does a remote TLB flush. */ |
1716 | if (tdp_mmu_zap_spte_atomic(kvm, iter: &iter)) |
1717 | goto retry; |
1718 | } |
1719 | |
1720 | rcu_read_unlock(); |
1721 | } |
1722 | |
1723 | /* |
1724 | * Zap non-leaf SPTEs (and free their associated page tables) which could |
1725 | * be replaced by huge pages, for GFNs within the slot. |
1726 | */ |
1727 | void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, |
1728 | const struct kvm_memory_slot *slot) |
1729 | { |
1730 | struct kvm_mmu_page *root; |
1731 | |
1732 | lockdep_assert_held_read(&kvm->mmu_lock); |
1733 | for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) |
1734 | zap_collapsible_spte_range(kvm, root, slot); |
1735 | } |
1736 | |
1737 | /* |
1738 | * Removes write access on the last level SPTE mapping this GFN and unsets the |
1739 | * MMU-writable bit to ensure future writes continue to be intercepted. |
1740 | * Returns true if an SPTE was set and a TLB flush is needed. |
1741 | */ |
1742 | static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, |
1743 | gfn_t gfn, int min_level) |
1744 | { |
1745 | struct tdp_iter iter; |
1746 | u64 new_spte; |
1747 | bool spte_set = false; |
1748 | |
1749 | BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL); |
1750 | |
1751 | rcu_read_lock(); |
1752 | |
1753 | for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { |
1754 | if (!is_shadow_present_pte(pte: iter.old_spte) || |
1755 | !is_last_spte(pte: iter.old_spte, level: iter.level)) |
1756 | continue; |
1757 | |
1758 | new_spte = iter.old_spte & |
1759 | ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); |
1760 | |
1761 | if (new_spte == iter.old_spte) |
1762 | break; |
1763 | |
1764 | tdp_mmu_iter_set_spte(kvm, iter: &iter, new_spte); |
1765 | spte_set = true; |
1766 | } |
1767 | |
1768 | rcu_read_unlock(); |
1769 | |
1770 | return spte_set; |
1771 | } |
1772 | |
1773 | /* |
1774 | * Removes write access on the last level SPTE mapping this GFN and unsets the |
1775 | * MMU-writable bit to ensure future writes continue to be intercepted. |
1776 | * Returns true if an SPTE was set and a TLB flush is needed. |
1777 | */ |
1778 | bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, |
1779 | struct kvm_memory_slot *slot, gfn_t gfn, |
1780 | int min_level) |
1781 | { |
1782 | struct kvm_mmu_page *root; |
1783 | bool spte_set = false; |
1784 | |
1785 | lockdep_assert_held_write(&kvm->mmu_lock); |
1786 | for_each_valid_tdp_mmu_root(kvm, root, slot->as_id) |
1787 | spte_set |= write_protect_gfn(kvm, root, gfn, min_level); |
1788 | |
1789 | return spte_set; |
1790 | } |
1791 | |
1792 | /* |
1793 | * Return the level of the lowest level SPTE added to sptes. |
1794 | * That SPTE may be non-present. |
1795 | * |
1796 | * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. |
1797 | */ |
1798 | int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, |
1799 | int *root_level) |
1800 | { |
1801 | struct tdp_iter iter; |
1802 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
1803 | gfn_t gfn = addr >> PAGE_SHIFT; |
1804 | int leaf = -1; |
1805 | |
1806 | *root_level = vcpu->arch.mmu->root_role.level; |
1807 | |
1808 | tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { |
1809 | leaf = iter.level; |
1810 | sptes[leaf] = iter.old_spte; |
1811 | } |
1812 | |
1813 | return leaf; |
1814 | } |
1815 | |
1816 | /* |
1817 | * Returns the last level spte pointer of the shadow page walk for the given |
1818 | * gpa, and sets *spte to the spte value. This spte may be non-preset. If no |
1819 | * walk could be performed, returns NULL and *spte does not contain valid data. |
1820 | * |
1821 | * Contract: |
1822 | * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}. |
1823 | * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end. |
1824 | * |
1825 | * WARNING: This function is only intended to be called during fast_page_fault. |
1826 | */ |
1827 | u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, |
1828 | u64 *spte) |
1829 | { |
1830 | struct tdp_iter iter; |
1831 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
1832 | gfn_t gfn = addr >> PAGE_SHIFT; |
1833 | tdp_ptep_t sptep = NULL; |
1834 | |
1835 | tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { |
1836 | *spte = iter.old_spte; |
1837 | sptep = iter.sptep; |
1838 | } |
1839 | |
1840 | /* |
1841 | * Perform the rcu_dereference to get the raw spte pointer value since |
1842 | * we are passing it up to fast_page_fault, which is shared with the |
1843 | * legacy MMU and thus does not retain the TDP MMU-specific __rcu |
1844 | * annotation. |
1845 | * |
1846 | * This is safe since fast_page_fault obeys the contracts of this |
1847 | * function as well as all TDP MMU contracts around modifying SPTEs |
1848 | * outside of mmu_lock. |
1849 | */ |
1850 | return rcu_dereference(sptep); |
1851 | } |
1852 | |