1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * kvm asynchronous fault support |
4 | * |
5 | * Copyright 2010 Red Hat, Inc. |
6 | * |
7 | * Author: |
8 | * Gleb Natapov <gleb@redhat.com> |
9 | */ |
10 | |
11 | #include <linux/kvm_host.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/module.h> |
14 | #include <linux/mmu_context.h> |
15 | #include <linux/sched/mm.h> |
16 | |
17 | #include "async_pf.h" |
18 | #include <trace/events/kvm.h> |
19 | |
20 | static struct kmem_cache *async_pf_cache; |
21 | |
22 | int kvm_async_pf_init(void) |
23 | { |
24 | async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); |
25 | |
26 | if (!async_pf_cache) |
27 | return -ENOMEM; |
28 | |
29 | return 0; |
30 | } |
31 | |
32 | void kvm_async_pf_deinit(void) |
33 | { |
34 | kmem_cache_destroy(s: async_pf_cache); |
35 | async_pf_cache = NULL; |
36 | } |
37 | |
38 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) |
39 | { |
40 | INIT_LIST_HEAD(list: &vcpu->async_pf.done); |
41 | INIT_LIST_HEAD(list: &vcpu->async_pf.queue); |
42 | spin_lock_init(&vcpu->async_pf.lock); |
43 | } |
44 | |
45 | static void async_pf_execute(struct work_struct *work) |
46 | { |
47 | struct kvm_async_pf *apf = |
48 | container_of(work, struct kvm_async_pf, work); |
49 | struct kvm_vcpu *vcpu = apf->vcpu; |
50 | struct mm_struct *mm = vcpu->kvm->mm; |
51 | unsigned long addr = apf->addr; |
52 | gpa_t cr2_or_gpa = apf->cr2_or_gpa; |
53 | int locked = 1; |
54 | bool first; |
55 | |
56 | might_sleep(); |
57 | |
58 | /* |
59 | * Attempt to pin the VM's host address space, and simply skip gup() if |
60 | * acquiring a pin fail, i.e. if the process is exiting. Note, KVM |
61 | * holds a reference to its associated mm_struct until the very end of |
62 | * kvm_destroy_vm(), i.e. the struct itself won't be freed before this |
63 | * work item is fully processed. |
64 | */ |
65 | if (mmget_not_zero(mm)) { |
66 | mmap_read_lock(mm); |
67 | get_user_pages_remote(mm, start: addr, nr_pages: 1, gup_flags: FOLL_WRITE, NULL, locked: &locked); |
68 | if (locked) |
69 | mmap_read_unlock(mm); |
70 | mmput(mm); |
71 | } |
72 | |
73 | /* |
74 | * Notify and kick the vCPU even if faulting in the page failed, e.g. |
75 | * so that the vCPU can retry the fault synchronously. |
76 | */ |
77 | if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
78 | kvm_arch_async_page_present(vcpu, work: apf); |
79 | |
80 | spin_lock(lock: &vcpu->async_pf.lock); |
81 | first = list_empty(head: &vcpu->async_pf.done); |
82 | list_add_tail(new: &apf->link, head: &vcpu->async_pf.done); |
83 | apf->vcpu = NULL; |
84 | spin_unlock(lock: &vcpu->async_pf.lock); |
85 | |
86 | /* |
87 | * The apf struct may be freed by kvm_check_async_pf_completion() as |
88 | * soon as the lock is dropped. Nullify it to prevent improper usage. |
89 | */ |
90 | apf = NULL; |
91 | |
92 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) |
93 | kvm_arch_async_page_present_queued(vcpu); |
94 | |
95 | trace_kvm_async_pf_completed(address: addr, gva: cr2_or_gpa); |
96 | |
97 | __kvm_vcpu_wake_up(vcpu); |
98 | } |
99 | |
100 | static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) |
101 | { |
102 | /* |
103 | * The async #PF is "done", but KVM must wait for the work item itself, |
104 | * i.e. async_pf_execute(), to run to completion. If KVM is a module, |
105 | * KVM must ensure *no* code owned by the KVM (the module) can be run |
106 | * after the last call to module_put(). Note, flushing the work item |
107 | * is always required when the item is taken off the completion queue. |
108 | * E.g. even if the vCPU handles the item in the "normal" path, the VM |
109 | * could be terminated before async_pf_execute() completes. |
110 | * |
111 | * Wake all events skip the queue and go straight done, i.e. don't |
112 | * need to be flushed (but sanity check that the work wasn't queued). |
113 | */ |
114 | if (work->wakeup_all) |
115 | WARN_ON_ONCE(work->work.func); |
116 | else |
117 | flush_work(work: &work->work); |
118 | kmem_cache_free(s: async_pf_cache, objp: work); |
119 | } |
120 | |
121 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) |
122 | { |
123 | spin_lock(lock: &vcpu->async_pf.lock); |
124 | |
125 | /* cancel outstanding work queue item */ |
126 | while (!list_empty(head: &vcpu->async_pf.queue)) { |
127 | struct kvm_async_pf *work = |
128 | list_first_entry(&vcpu->async_pf.queue, |
129 | typeof(*work), queue); |
130 | list_del(entry: &work->queue); |
131 | |
132 | /* |
133 | * We know it's present in vcpu->async_pf.done, do |
134 | * nothing here. |
135 | */ |
136 | if (!work->vcpu) |
137 | continue; |
138 | |
139 | spin_unlock(lock: &vcpu->async_pf.lock); |
140 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC |
141 | flush_work(&work->work); |
142 | #else |
143 | if (cancel_work_sync(work: &work->work)) |
144 | kmem_cache_free(s: async_pf_cache, objp: work); |
145 | #endif |
146 | spin_lock(lock: &vcpu->async_pf.lock); |
147 | } |
148 | |
149 | while (!list_empty(head: &vcpu->async_pf.done)) { |
150 | struct kvm_async_pf *work = |
151 | list_first_entry(&vcpu->async_pf.done, |
152 | typeof(*work), link); |
153 | list_del(entry: &work->link); |
154 | |
155 | spin_unlock(lock: &vcpu->async_pf.lock); |
156 | kvm_flush_and_free_async_pf_work(work); |
157 | spin_lock(lock: &vcpu->async_pf.lock); |
158 | } |
159 | spin_unlock(lock: &vcpu->async_pf.lock); |
160 | |
161 | vcpu->async_pf.queued = 0; |
162 | } |
163 | |
164 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) |
165 | { |
166 | struct kvm_async_pf *work; |
167 | |
168 | while (!list_empty_careful(head: &vcpu->async_pf.done) && |
169 | kvm_arch_can_dequeue_async_page_present(vcpu)) { |
170 | spin_lock(lock: &vcpu->async_pf.lock); |
171 | work = list_first_entry(&vcpu->async_pf.done, typeof(*work), |
172 | link); |
173 | list_del(entry: &work->link); |
174 | spin_unlock(lock: &vcpu->async_pf.lock); |
175 | |
176 | kvm_arch_async_page_ready(vcpu, work); |
177 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
178 | kvm_arch_async_page_present(vcpu, work); |
179 | |
180 | list_del(entry: &work->queue); |
181 | vcpu->async_pf.queued--; |
182 | kvm_flush_and_free_async_pf_work(work); |
183 | } |
184 | } |
185 | |
186 | /* |
187 | * Try to schedule a job to handle page fault asynchronously. Returns 'true' on |
188 | * success, 'false' on failure (page fault has to be handled synchronously). |
189 | */ |
190 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
191 | unsigned long hva, struct kvm_arch_async_pf *arch) |
192 | { |
193 | struct kvm_async_pf *work; |
194 | |
195 | if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) |
196 | return false; |
197 | |
198 | /* Arch specific code should not do async PF in this case */ |
199 | if (unlikely(kvm_is_error_hva(hva))) |
200 | return false; |
201 | |
202 | /* |
203 | * do alloc nowait since if we are going to sleep anyway we |
204 | * may as well sleep faulting in page |
205 | */ |
206 | work = kmem_cache_zalloc(k: async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); |
207 | if (!work) |
208 | return false; |
209 | |
210 | work->wakeup_all = false; |
211 | work->vcpu = vcpu; |
212 | work->cr2_or_gpa = cr2_or_gpa; |
213 | work->addr = hva; |
214 | work->arch = *arch; |
215 | |
216 | INIT_WORK(&work->work, async_pf_execute); |
217 | |
218 | list_add_tail(new: &work->queue, head: &vcpu->async_pf.queue); |
219 | vcpu->async_pf.queued++; |
220 | work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work); |
221 | |
222 | schedule_work(work: &work->work); |
223 | |
224 | return true; |
225 | } |
226 | |
227 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) |
228 | { |
229 | struct kvm_async_pf *work; |
230 | bool first; |
231 | |
232 | if (!list_empty_careful(head: &vcpu->async_pf.done)) |
233 | return 0; |
234 | |
235 | work = kmem_cache_zalloc(k: async_pf_cache, GFP_ATOMIC); |
236 | if (!work) |
237 | return -ENOMEM; |
238 | |
239 | work->wakeup_all = true; |
240 | INIT_LIST_HEAD(list: &work->queue); /* for list_del to work */ |
241 | |
242 | spin_lock(lock: &vcpu->async_pf.lock); |
243 | first = list_empty(head: &vcpu->async_pf.done); |
244 | list_add_tail(new: &work->link, head: &vcpu->async_pf.done); |
245 | spin_unlock(lock: &vcpu->async_pf.lock); |
246 | |
247 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) |
248 | kvm_arch_async_page_present_queued(vcpu); |
249 | |
250 | vcpu->async_pf.queued++; |
251 | return 0; |
252 | } |
253 | |