1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | #ifndef __KVM_HOST_H |
3 | #define __KVM_HOST_H |
4 | |
5 | |
6 | #include <linux/types.h> |
7 | #include <linux/hardirq.h> |
8 | #include <linux/list.h> |
9 | #include <linux/mutex.h> |
10 | #include <linux/spinlock.h> |
11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/sched/stat.h> |
14 | #include <linux/bug.h> |
15 | #include <linux/minmax.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/mmu_notifier.h> |
18 | #include <linux/preempt.h> |
19 | #include <linux/msi.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/vmalloc.h> |
22 | #include <linux/rcupdate.h> |
23 | #include <linux/ratelimit.h> |
24 | #include <linux/err.h> |
25 | #include <linux/irqflags.h> |
26 | #include <linux/context_tracking.h> |
27 | #include <linux/irqbypass.h> |
28 | #include <linux/rcuwait.h> |
29 | #include <linux/refcount.h> |
30 | #include <linux/nospec.h> |
31 | #include <linux/notifier.h> |
32 | #include <linux/ftrace.h> |
33 | #include <linux/hashtable.h> |
34 | #include <linux/instrumentation.h> |
35 | #include <linux/interval_tree.h> |
36 | #include <linux/rbtree.h> |
37 | #include <linux/xarray.h> |
38 | #include <asm/signal.h> |
39 | |
40 | #include <linux/kvm.h> |
41 | #include <linux/kvm_para.h> |
42 | |
43 | #include <linux/kvm_types.h> |
44 | |
45 | #include <asm/kvm_host.h> |
46 | #include <linux/kvm_dirty_ring.h> |
47 | |
48 | #ifndef KVM_MAX_VCPU_IDS |
49 | #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS |
50 | #endif |
51 | |
52 | /* |
53 | * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally |
54 | * used in kvm, other bits are visible for userspace which are defined in |
55 | * include/linux/kvm_h. |
56 | */ |
57 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
58 | |
59 | /* |
60 | * Bit 63 of the memslot generation number is an "update in-progress flag", |
61 | * e.g. is temporarily set for the duration of kvm_swap_active_memslots(). |
62 | * This flag effectively creates a unique generation number that is used to |
63 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, |
64 | * i.e. may (or may not) have come from the previous memslots generation. |
65 | * |
66 | * This is necessary because the actual memslots update is not atomic with |
67 | * respect to the generation number update. Updating the generation number |
68 | * first would allow a vCPU to cache a spte from the old memslots using the |
69 | * new generation number, and updating the generation number after switching |
70 | * to the new memslots would allow cache hits using the old generation number |
71 | * to reference the defunct memslots. |
72 | * |
73 | * This mechanism is used to prevent getting hits in KVM's caches while a |
74 | * memslot update is in-progress, and to prevent cache hits *after* updating |
75 | * the actual generation number against accesses that were inserted into the |
76 | * cache *before* the memslots were updated. |
77 | */ |
78 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
79 | |
80 | /* Two fragments for cross MMIO pages. */ |
81 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
82 | |
83 | #ifndef KVM_MAX_NR_ADDRESS_SPACES |
84 | #define KVM_MAX_NR_ADDRESS_SPACES 1 |
85 | #endif |
86 | |
87 | /* |
88 | * For the normal pfn, the highest 12 bits should be zero, |
89 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
90 | * mask bit 63 to indicate the noslot pfn. |
91 | */ |
92 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
93 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) |
94 | #define KVM_PFN_NOSLOT (0x1ULL << 63) |
95 | |
96 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) |
97 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
98 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
99 | #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) |
100 | |
101 | /* |
102 | * error pfns indicate that the gfn is in slot but faild to |
103 | * translate it to pfn on host. |
104 | */ |
105 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
106 | { |
107 | return !!(pfn & KVM_PFN_ERR_MASK); |
108 | } |
109 | |
110 | /* |
111 | * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted |
112 | * by a pending signal. Note, the signal may or may not be fatal. |
113 | */ |
114 | static inline bool is_sigpending_pfn(kvm_pfn_t pfn) |
115 | { |
116 | return pfn == KVM_PFN_ERR_SIGPENDING; |
117 | } |
118 | |
119 | /* |
120 | * error_noslot pfns indicate that the gfn can not be |
121 | * translated to pfn - it is not in slot or failed to |
122 | * translate it to pfn. |
123 | */ |
124 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
125 | { |
126 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
127 | } |
128 | |
129 | /* noslot pfn indicates that the gfn is not in slot. */ |
130 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
131 | { |
132 | return pfn == KVM_PFN_NOSLOT; |
133 | } |
134 | |
135 | /* |
136 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) |
137 | * provide own defines and kvm_is_error_hva |
138 | */ |
139 | #ifndef KVM_HVA_ERR_BAD |
140 | |
141 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
142 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
143 | |
144 | static inline bool kvm_is_error_hva(unsigned long addr) |
145 | { |
146 | return addr >= PAGE_OFFSET; |
147 | } |
148 | |
149 | #endif |
150 | |
151 | static inline bool kvm_is_error_gpa(gpa_t gpa) |
152 | { |
153 | return gpa == INVALID_GPA; |
154 | } |
155 | |
156 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
157 | |
158 | static inline bool is_error_page(struct page *page) |
159 | { |
160 | return IS_ERR(ptr: page); |
161 | } |
162 | |
163 | #define KVM_REQUEST_MASK GENMASK(7,0) |
164 | #define KVM_REQUEST_NO_WAKEUP BIT(8) |
165 | #define KVM_REQUEST_WAIT BIT(9) |
166 | #define KVM_REQUEST_NO_ACTION BIT(10) |
167 | /* |
168 | * Architecture-independent vcpu->requests bit members |
169 | * Bits 3-7 are reserved for more arch-independent bits. |
170 | */ |
171 | #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
172 | #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
173 | #define KVM_REQ_UNBLOCK 2 |
174 | #define KVM_REQ_DIRTY_RING_SOFT_FULL 3 |
175 | #define KVM_REQUEST_ARCH_BASE 8 |
176 | |
177 | /* |
178 | * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to |
179 | * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" |
180 | * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing |
181 | * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous |
182 | * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no |
183 | * guarantee the vCPU received an IPI and has actually exited guest mode. |
184 | */ |
185 | #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
186 | |
187 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ |
188 | BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
189 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
190 | }) |
191 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) |
192 | |
193 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
194 | unsigned long *vcpu_bitmap); |
195 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
196 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
197 | struct kvm_vcpu *except); |
198 | |
199 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
200 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
201 | |
202 | extern struct mutex kvm_lock; |
203 | extern struct list_head vm_list; |
204 | |
205 | struct kvm_io_range { |
206 | gpa_t addr; |
207 | int len; |
208 | struct kvm_io_device *dev; |
209 | }; |
210 | |
211 | #define NR_IOBUS_DEVS 1000 |
212 | |
213 | struct kvm_io_bus { |
214 | int dev_count; |
215 | int ioeventfd_count; |
216 | struct kvm_io_range range[]; |
217 | }; |
218 | |
219 | enum kvm_bus { |
220 | KVM_MMIO_BUS, |
221 | KVM_PIO_BUS, |
222 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
223 | KVM_FAST_MMIO_BUS, |
224 | KVM_NR_BUSES |
225 | }; |
226 | |
227 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
228 | int len, const void *val); |
229 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
230 | gpa_t addr, int len, const void *val, long cookie); |
231 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
232 | int len, void *val); |
233 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
234 | int len, struct kvm_io_device *dev); |
235 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
236 | struct kvm_io_device *dev); |
237 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
238 | gpa_t addr); |
239 | |
240 | #ifdef CONFIG_KVM_ASYNC_PF |
241 | struct kvm_async_pf { |
242 | struct work_struct work; |
243 | struct list_head link; |
244 | struct list_head queue; |
245 | struct kvm_vcpu *vcpu; |
246 | gpa_t cr2_or_gpa; |
247 | unsigned long addr; |
248 | struct kvm_arch_async_pf arch; |
249 | bool wakeup_all; |
250 | bool notpresent_injected; |
251 | }; |
252 | |
253 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); |
254 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
255 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
256 | unsigned long hva, struct kvm_arch_async_pf *arch); |
257 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
258 | #endif |
259 | |
260 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
261 | union kvm_mmu_notifier_arg { |
262 | pte_t pte; |
263 | unsigned long attributes; |
264 | }; |
265 | |
266 | struct kvm_gfn_range { |
267 | struct kvm_memory_slot *slot; |
268 | gfn_t start; |
269 | gfn_t end; |
270 | union kvm_mmu_notifier_arg arg; |
271 | bool may_block; |
272 | }; |
273 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); |
274 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
275 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
276 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
277 | #endif |
278 | |
279 | enum { |
280 | OUTSIDE_GUEST_MODE, |
281 | IN_GUEST_MODE, |
282 | EXITING_GUEST_MODE, |
283 | READING_SHADOW_PAGE_TABLES, |
284 | }; |
285 | |
286 | #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) |
287 | |
288 | struct kvm_host_map { |
289 | /* |
290 | * Only valid if the 'pfn' is managed by the host kernel (i.e. There is |
291 | * a 'struct page' for it. When using mem= kernel parameter some memory |
292 | * can be used as guest memory but they are not managed by host |
293 | * kernel). |
294 | * If 'pfn' is not managed by the host kernel, this field is |
295 | * initialized to KVM_UNMAPPED_PAGE. |
296 | */ |
297 | struct page *page; |
298 | void *hva; |
299 | kvm_pfn_t pfn; |
300 | kvm_pfn_t gfn; |
301 | }; |
302 | |
303 | /* |
304 | * Used to check if the mapping is valid or not. Never use 'kvm_host_map' |
305 | * directly to check for that. |
306 | */ |
307 | static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) |
308 | { |
309 | return !!map->hva; |
310 | } |
311 | |
312 | static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) |
313 | { |
314 | return single_task_running() && !need_resched() && ktime_before(cmp1: cur, cmp2: stop); |
315 | } |
316 | |
317 | /* |
318 | * Sometimes a large or cross-page mmio needs to be broken up into separate |
319 | * exits for userspace servicing. |
320 | */ |
321 | struct kvm_mmio_fragment { |
322 | gpa_t gpa; |
323 | void *data; |
324 | unsigned len; |
325 | }; |
326 | |
327 | struct kvm_vcpu { |
328 | struct kvm *kvm; |
329 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
330 | struct preempt_notifier preempt_notifier; |
331 | #endif |
332 | int cpu; |
333 | int vcpu_id; /* id given by userspace at creation */ |
334 | int vcpu_idx; /* index into kvm->vcpu_array */ |
335 | int ____srcu_idx; /* Don't use this directly. You've been warned. */ |
336 | #ifdef CONFIG_PROVE_RCU |
337 | int srcu_depth; |
338 | #endif |
339 | int mode; |
340 | u64 requests; |
341 | unsigned long guest_debug; |
342 | |
343 | struct mutex mutex; |
344 | struct kvm_run *run; |
345 | |
346 | #ifndef __KVM_HAVE_ARCH_WQP |
347 | struct rcuwait wait; |
348 | #endif |
349 | struct pid __rcu *pid; |
350 | int sigset_active; |
351 | sigset_t sigset; |
352 | unsigned int halt_poll_ns; |
353 | bool valid_wakeup; |
354 | |
355 | #ifdef CONFIG_HAS_IOMEM |
356 | int mmio_needed; |
357 | int mmio_read_completed; |
358 | int mmio_is_write; |
359 | int mmio_cur_fragment; |
360 | int mmio_nr_fragments; |
361 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
362 | #endif |
363 | |
364 | #ifdef CONFIG_KVM_ASYNC_PF |
365 | struct { |
366 | u32 queued; |
367 | struct list_head queue; |
368 | struct list_head done; |
369 | spinlock_t lock; |
370 | } async_pf; |
371 | #endif |
372 | |
373 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
374 | /* |
375 | * Cpu relax intercept or pause loop exit optimization |
376 | * in_spin_loop: set when a vcpu does a pause loop exit |
377 | * or cpu relax intercepted. |
378 | * dy_eligible: indicates whether vcpu is eligible for directed yield. |
379 | */ |
380 | struct { |
381 | bool in_spin_loop; |
382 | bool dy_eligible; |
383 | } spin_loop; |
384 | #endif |
385 | bool preempted; |
386 | bool ready; |
387 | struct kvm_vcpu_arch arch; |
388 | struct kvm_vcpu_stat stat; |
389 | char stats_id[KVM_STATS_NAME_SIZE]; |
390 | struct kvm_dirty_ring dirty_ring; |
391 | |
392 | /* |
393 | * The most recently used memslot by this vCPU and the slots generation |
394 | * for which it is valid. |
395 | * No wraparound protection is needed since generations won't overflow in |
396 | * thousands of years, even assuming 1M memslot operations per second. |
397 | */ |
398 | struct kvm_memory_slot *last_used_slot; |
399 | u64 last_used_slot_gen; |
400 | }; |
401 | |
402 | /* |
403 | * Start accounting time towards a guest. |
404 | * Must be called before entering guest context. |
405 | */ |
406 | static __always_inline void guest_timing_enter_irqoff(void) |
407 | { |
408 | /* |
409 | * This is running in ioctl context so its safe to assume that it's the |
410 | * stime pending cputime to flush. |
411 | */ |
412 | instrumentation_begin(); |
413 | vtime_account_guest_enter(); |
414 | instrumentation_end(); |
415 | } |
416 | |
417 | /* |
418 | * Enter guest context and enter an RCU extended quiescent state. |
419 | * |
420 | * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is |
421 | * unsafe to use any code which may directly or indirectly use RCU, tracing |
422 | * (including IRQ flag tracing), or lockdep. All code in this period must be |
423 | * non-instrumentable. |
424 | */ |
425 | static __always_inline void guest_context_enter_irqoff(void) |
426 | { |
427 | /* |
428 | * KVM does not hold any references to rcu protected data when it |
429 | * switches CPU into a guest mode. In fact switching to a guest mode |
430 | * is very similar to exiting to userspace from rcu point of view. In |
431 | * addition CPU may stay in a guest mode for quite a long time (up to |
432 | * one time slice). Lets treat guest mode as quiescent state, just like |
433 | * we do with user-mode execution. |
434 | */ |
435 | if (!context_tracking_guest_enter()) { |
436 | instrumentation_begin(); |
437 | rcu_virt_note_context_switch(); |
438 | instrumentation_end(); |
439 | } |
440 | } |
441 | |
442 | /* |
443 | * Deprecated. Architectures should move to guest_timing_enter_irqoff() and |
444 | * guest_state_enter_irqoff(). |
445 | */ |
446 | static __always_inline void guest_enter_irqoff(void) |
447 | { |
448 | guest_timing_enter_irqoff(); |
449 | guest_context_enter_irqoff(); |
450 | } |
451 | |
452 | /** |
453 | * guest_state_enter_irqoff - Fixup state when entering a guest |
454 | * |
455 | * Entry to a guest will enable interrupts, but the kernel state is interrupts |
456 | * disabled when this is invoked. Also tell RCU about it. |
457 | * |
458 | * 1) Trace interrupts on state |
459 | * 2) Invoke context tracking if enabled to adjust RCU state |
460 | * 3) Tell lockdep that interrupts are enabled |
461 | * |
462 | * Invoked from architecture specific code before entering a guest. |
463 | * Must be called with interrupts disabled and the caller must be |
464 | * non-instrumentable. |
465 | * The caller has to invoke guest_timing_enter_irqoff() before this. |
466 | * |
467 | * Note: this is analogous to exit_to_user_mode(). |
468 | */ |
469 | static __always_inline void guest_state_enter_irqoff(void) |
470 | { |
471 | instrumentation_begin(); |
472 | trace_hardirqs_on_prepare(); |
473 | lockdep_hardirqs_on_prepare(); |
474 | instrumentation_end(); |
475 | |
476 | guest_context_enter_irqoff(); |
477 | lockdep_hardirqs_on(CALLER_ADDR0); |
478 | } |
479 | |
480 | /* |
481 | * Exit guest context and exit an RCU extended quiescent state. |
482 | * |
483 | * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is |
484 | * unsafe to use any code which may directly or indirectly use RCU, tracing |
485 | * (including IRQ flag tracing), or lockdep. All code in this period must be |
486 | * non-instrumentable. |
487 | */ |
488 | static __always_inline void guest_context_exit_irqoff(void) |
489 | { |
490 | context_tracking_guest_exit(); |
491 | } |
492 | |
493 | /* |
494 | * Stop accounting time towards a guest. |
495 | * Must be called after exiting guest context. |
496 | */ |
497 | static __always_inline void guest_timing_exit_irqoff(void) |
498 | { |
499 | instrumentation_begin(); |
500 | /* Flush the guest cputime we spent on the guest */ |
501 | vtime_account_guest_exit(); |
502 | instrumentation_end(); |
503 | } |
504 | |
505 | /* |
506 | * Deprecated. Architectures should move to guest_state_exit_irqoff() and |
507 | * guest_timing_exit_irqoff(). |
508 | */ |
509 | static __always_inline void guest_exit_irqoff(void) |
510 | { |
511 | guest_context_exit_irqoff(); |
512 | guest_timing_exit_irqoff(); |
513 | } |
514 | |
515 | static inline void guest_exit(void) |
516 | { |
517 | unsigned long flags; |
518 | |
519 | local_irq_save(flags); |
520 | guest_exit_irqoff(); |
521 | local_irq_restore(flags); |
522 | } |
523 | |
524 | /** |
525 | * guest_state_exit_irqoff - Establish state when returning from guest mode |
526 | * |
527 | * Entry from a guest disables interrupts, but guest mode is traced as |
528 | * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. |
529 | * |
530 | * 1) Tell lockdep that interrupts are disabled |
531 | * 2) Invoke context tracking if enabled to reactivate RCU |
532 | * 3) Trace interrupts off state |
533 | * |
534 | * Invoked from architecture specific code after exiting a guest. |
535 | * Must be invoked with interrupts disabled and the caller must be |
536 | * non-instrumentable. |
537 | * The caller has to invoke guest_timing_exit_irqoff() after this. |
538 | * |
539 | * Note: this is analogous to enter_from_user_mode(). |
540 | */ |
541 | static __always_inline void guest_state_exit_irqoff(void) |
542 | { |
543 | lockdep_hardirqs_off(CALLER_ADDR0); |
544 | guest_context_exit_irqoff(); |
545 | |
546 | instrumentation_begin(); |
547 | trace_hardirqs_off_finish(); |
548 | instrumentation_end(); |
549 | } |
550 | |
551 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
552 | { |
553 | /* |
554 | * The memory barrier ensures a previous write to vcpu->requests cannot |
555 | * be reordered with the read of vcpu->mode. It pairs with the general |
556 | * memory barrier following the write of vcpu->mode in VCPU RUN. |
557 | */ |
558 | smp_mb__before_atomic(); |
559 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
560 | } |
561 | |
562 | /* |
563 | * Some of the bitops functions do not support too long bitmaps. |
564 | * This number must be determined not to exceed such limits. |
565 | */ |
566 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
567 | |
568 | /* |
569 | * Since at idle each memslot belongs to two memslot sets it has to contain |
570 | * two embedded nodes for each data structure that it forms a part of. |
571 | * |
572 | * Two memslot sets (one active and one inactive) are necessary so the VM |
573 | * continues to run on one memslot set while the other is being modified. |
574 | * |
575 | * These two memslot sets normally point to the same set of memslots. |
576 | * They can, however, be desynchronized when performing a memslot management |
577 | * operation by replacing the memslot to be modified by its copy. |
578 | * After the operation is complete, both memslot sets once again point to |
579 | * the same, common set of memslot data. |
580 | * |
581 | * The memslots themselves are independent of each other so they can be |
582 | * individually added or deleted. |
583 | */ |
584 | struct kvm_memory_slot { |
585 | struct hlist_node id_node[2]; |
586 | struct interval_tree_node hva_node[2]; |
587 | struct rb_node gfn_node[2]; |
588 | gfn_t base_gfn; |
589 | unsigned long npages; |
590 | unsigned long *dirty_bitmap; |
591 | struct kvm_arch_memory_slot arch; |
592 | unsigned long userspace_addr; |
593 | u32 flags; |
594 | short id; |
595 | u16 as_id; |
596 | |
597 | #ifdef CONFIG_KVM_PRIVATE_MEM |
598 | struct { |
599 | struct file __rcu *file; |
600 | pgoff_t pgoff; |
601 | } gmem; |
602 | #endif |
603 | }; |
604 | |
605 | static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot) |
606 | { |
607 | return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); |
608 | } |
609 | |
610 | static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) |
611 | { |
612 | return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; |
613 | } |
614 | |
615 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
616 | { |
617 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
618 | } |
619 | |
620 | static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) |
621 | { |
622 | unsigned long len = kvm_dirty_bitmap_bytes(memslot); |
623 | |
624 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); |
625 | } |
626 | |
627 | #ifndef KVM_DIRTY_LOG_MANUAL_CAPS |
628 | #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
629 | #endif |
630 | |
631 | struct kvm_s390_adapter_int { |
632 | u64 ind_addr; |
633 | u64 summary_addr; |
634 | u64 ind_offset; |
635 | u32 summary_offset; |
636 | u32 adapter_id; |
637 | }; |
638 | |
639 | struct kvm_hv_sint { |
640 | u32 vcpu; |
641 | u32 sint; |
642 | }; |
643 | |
644 | struct kvm_xen_evtchn { |
645 | u32 port; |
646 | u32 vcpu_id; |
647 | int vcpu_idx; |
648 | u32 priority; |
649 | }; |
650 | |
651 | struct kvm_kernel_irq_routing_entry { |
652 | u32 gsi; |
653 | u32 type; |
654 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
655 | struct kvm *kvm, int irq_source_id, int level, |
656 | bool line_status); |
657 | union { |
658 | struct { |
659 | unsigned irqchip; |
660 | unsigned pin; |
661 | } irqchip; |
662 | struct { |
663 | u32 address_lo; |
664 | u32 address_hi; |
665 | u32 data; |
666 | u32 flags; |
667 | u32 devid; |
668 | } msi; |
669 | struct kvm_s390_adapter_int adapter; |
670 | struct kvm_hv_sint hv_sint; |
671 | struct kvm_xen_evtchn xen_evtchn; |
672 | }; |
673 | struct hlist_node link; |
674 | }; |
675 | |
676 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
677 | struct kvm_irq_routing_table { |
678 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
679 | u32 nr_rt_entries; |
680 | /* |
681 | * Array indexed by gsi. Each entry contains list of irq chips |
682 | * the gsi is connected to. |
683 | */ |
684 | struct hlist_head map[] __counted_by(nr_rt_entries); |
685 | }; |
686 | #endif |
687 | |
688 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); |
689 | |
690 | #ifndef KVM_INTERNAL_MEM_SLOTS |
691 | #define KVM_INTERNAL_MEM_SLOTS 0 |
692 | #endif |
693 | |
694 | #define KVM_MEM_SLOTS_NUM SHRT_MAX |
695 | #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS) |
696 | |
697 | #if KVM_MAX_NR_ADDRESS_SPACES == 1 |
698 | static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) |
699 | { |
700 | return KVM_MAX_NR_ADDRESS_SPACES; |
701 | } |
702 | |
703 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) |
704 | { |
705 | return 0; |
706 | } |
707 | #endif |
708 | |
709 | /* |
710 | * Arch code must define kvm_arch_has_private_mem if support for private memory |
711 | * is enabled. |
712 | */ |
713 | #if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) |
714 | static inline bool kvm_arch_has_private_mem(struct kvm *kvm) |
715 | { |
716 | return false; |
717 | } |
718 | #endif |
719 | |
720 | struct kvm_memslots { |
721 | u64 generation; |
722 | atomic_long_t last_used_slot; |
723 | struct rb_root_cached hva_tree; |
724 | struct rb_root gfn_tree; |
725 | /* |
726 | * The mapping table from slot id to memslot. |
727 | * |
728 | * 7-bit bucket count matches the size of the old id to index array for |
729 | * 512 slots, while giving good performance with this slot count. |
730 | * Higher bucket counts bring only small performance improvements but |
731 | * always result in higher memory usage (even for lower memslot counts). |
732 | */ |
733 | DECLARE_HASHTABLE(id_hash, 7); |
734 | int node_idx; |
735 | }; |
736 | |
737 | struct kvm { |
738 | #ifdef KVM_HAVE_MMU_RWLOCK |
739 | rwlock_t mmu_lock; |
740 | #else |
741 | spinlock_t mmu_lock; |
742 | #endif /* KVM_HAVE_MMU_RWLOCK */ |
743 | |
744 | struct mutex slots_lock; |
745 | |
746 | /* |
747 | * Protects the arch-specific fields of struct kvm_memory_slots in |
748 | * use by the VM. To be used under the slots_lock (above) or in a |
749 | * kvm->srcu critical section where acquiring the slots_lock would |
750 | * lead to deadlock with the synchronize_srcu in |
751 | * kvm_swap_active_memslots(). |
752 | */ |
753 | struct mutex slots_arch_lock; |
754 | struct mm_struct *mm; /* userspace tied to this vm */ |
755 | unsigned long nr_memslot_pages; |
756 | /* The two memslot sets - active and inactive (per address space) */ |
757 | struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2]; |
758 | /* The current active memslot set for each address space */ |
759 | struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; |
760 | struct xarray vcpu_array; |
761 | /* |
762 | * Protected by slots_lock, but can be read outside if an |
763 | * incorrect answer is acceptable. |
764 | */ |
765 | atomic_t nr_memslots_dirty_logging; |
766 | |
767 | /* Used to wait for completion of MMU notifiers. */ |
768 | spinlock_t mn_invalidate_lock; |
769 | unsigned long mn_active_invalidate_count; |
770 | struct rcuwait mn_memslots_update_rcuwait; |
771 | |
772 | /* For management / invalidation of gfn_to_pfn_caches */ |
773 | spinlock_t gpc_lock; |
774 | struct list_head gpc_list; |
775 | |
776 | /* |
777 | * created_vcpus is protected by kvm->lock, and is incremented |
778 | * at the beginning of KVM_CREATE_VCPU. online_vcpus is only |
779 | * incremented after storing the kvm_vcpu pointer in vcpus, |
780 | * and is accessed atomically. |
781 | */ |
782 | atomic_t online_vcpus; |
783 | int max_vcpus; |
784 | int created_vcpus; |
785 | int last_boosted_vcpu; |
786 | struct list_head vm_list; |
787 | struct mutex lock; |
788 | struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
789 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
790 | struct { |
791 | spinlock_t lock; |
792 | struct list_head items; |
793 | /* resampler_list update side is protected by resampler_lock. */ |
794 | struct list_head resampler_list; |
795 | struct mutex resampler_lock; |
796 | } irqfds; |
797 | #endif |
798 | struct list_head ioeventfds; |
799 | struct kvm_vm_stat stat; |
800 | struct kvm_arch arch; |
801 | refcount_t users_count; |
802 | #ifdef CONFIG_KVM_MMIO |
803 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
804 | spinlock_t ring_lock; |
805 | struct list_head coalesced_zones; |
806 | #endif |
807 | |
808 | struct mutex irq_lock; |
809 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
810 | /* |
811 | * Update side is protected by irq_lock. |
812 | */ |
813 | struct kvm_irq_routing_table __rcu *irq_routing; |
814 | |
815 | struct hlist_head irq_ack_notifier_list; |
816 | #endif |
817 | |
818 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
819 | struct mmu_notifier mmu_notifier; |
820 | unsigned long mmu_invalidate_seq; |
821 | long mmu_invalidate_in_progress; |
822 | gfn_t mmu_invalidate_range_start; |
823 | gfn_t mmu_invalidate_range_end; |
824 | #endif |
825 | struct list_head devices; |
826 | u64 manual_dirty_log_protect; |
827 | struct dentry *debugfs_dentry; |
828 | struct kvm_stat_data **debugfs_stat_data; |
829 | struct srcu_struct srcu; |
830 | struct srcu_struct irq_srcu; |
831 | pid_t userspace_pid; |
832 | bool override_halt_poll_ns; |
833 | unsigned int max_halt_poll_ns; |
834 | u32 dirty_ring_size; |
835 | bool dirty_ring_with_bitmap; |
836 | bool vm_bugged; |
837 | bool vm_dead; |
838 | |
839 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
840 | struct notifier_block pm_notifier; |
841 | #endif |
842 | #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES |
843 | /* Protected by slots_locks (for writes) and RCU (for reads) */ |
844 | struct xarray mem_attr_array; |
845 | #endif |
846 | char stats_id[KVM_STATS_NAME_SIZE]; |
847 | }; |
848 | |
849 | #define kvm_err(fmt, ...) \ |
850 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
851 | #define kvm_info(fmt, ...) \ |
852 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
853 | #define kvm_debug(fmt, ...) \ |
854 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
855 | #define kvm_debug_ratelimited(fmt, ...) \ |
856 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ |
857 | ## __VA_ARGS__) |
858 | #define kvm_pr_unimpl(fmt, ...) \ |
859 | pr_err_ratelimited("kvm [%i]: " fmt, \ |
860 | task_tgid_nr(current), ## __VA_ARGS__) |
861 | |
862 | /* The guest did something we don't support. */ |
863 | #define vcpu_unimpl(vcpu, fmt, ...) \ |
864 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
865 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) |
866 | |
867 | #define vcpu_debug(vcpu, fmt, ...) \ |
868 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
869 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ |
870 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ |
871 | ## __VA_ARGS__) |
872 | #define vcpu_err(vcpu, fmt, ...) \ |
873 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
874 | |
875 | static inline void kvm_vm_dead(struct kvm *kvm) |
876 | { |
877 | kvm->vm_dead = true; |
878 | kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); |
879 | } |
880 | |
881 | static inline void kvm_vm_bugged(struct kvm *kvm) |
882 | { |
883 | kvm->vm_bugged = true; |
884 | kvm_vm_dead(kvm); |
885 | } |
886 | |
887 | |
888 | #define KVM_BUG(cond, kvm, fmt...) \ |
889 | ({ \ |
890 | bool __ret = !!(cond); \ |
891 | \ |
892 | if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ |
893 | kvm_vm_bugged(kvm); \ |
894 | unlikely(__ret); \ |
895 | }) |
896 | |
897 | #define KVM_BUG_ON(cond, kvm) \ |
898 | ({ \ |
899 | bool __ret = !!(cond); \ |
900 | \ |
901 | if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ |
902 | kvm_vm_bugged(kvm); \ |
903 | unlikely(__ret); \ |
904 | }) |
905 | |
906 | /* |
907 | * Note, "data corruption" refers to corruption of host kernel data structures, |
908 | * not guest data. Guest data corruption, suspected or confirmed, that is tied |
909 | * and contained to a single VM should *never* BUG() and potentially panic the |
910 | * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure |
911 | * is corrupted and that corruption can have a cascading effect to other parts |
912 | * of the hosts and/or to other VMs. |
913 | */ |
914 | #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ |
915 | ({ \ |
916 | bool __ret = !!(cond); \ |
917 | \ |
918 | if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ |
919 | BUG_ON(__ret); \ |
920 | else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ |
921 | kvm_vm_bugged(kvm); \ |
922 | unlikely(__ret); \ |
923 | }) |
924 | |
925 | static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) |
926 | { |
927 | #ifdef CONFIG_PROVE_RCU |
928 | WARN_ONCE(vcpu->srcu_depth++, |
929 | "KVM: Illegal vCPU srcu_idx LOCK, depth=%d" , vcpu->srcu_depth - 1); |
930 | #endif |
931 | vcpu->____srcu_idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
932 | } |
933 | |
934 | static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) |
935 | { |
936 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx: vcpu->____srcu_idx); |
937 | |
938 | #ifdef CONFIG_PROVE_RCU |
939 | WARN_ONCE(--vcpu->srcu_depth, |
940 | "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d" , vcpu->srcu_depth); |
941 | #endif |
942 | } |
943 | |
944 | static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) |
945 | { |
946 | return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); |
947 | } |
948 | |
949 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
950 | { |
951 | return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, |
952 | lockdep_is_held(&kvm->slots_lock) || |
953 | !refcount_read(&kvm->users_count)); |
954 | } |
955 | |
956 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
957 | { |
958 | int num_vcpus = atomic_read(v: &kvm->online_vcpus); |
959 | i = array_index_nospec(i, num_vcpus); |
960 | |
961 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ |
962 | smp_rmb(); |
963 | return xa_load(&kvm->vcpu_array, index: i); |
964 | } |
965 | |
966 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
967 | xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ |
968 | (atomic_read(&kvm->online_vcpus) - 1)) |
969 | |
970 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
971 | { |
972 | struct kvm_vcpu *vcpu = NULL; |
973 | unsigned long i; |
974 | |
975 | if (id < 0) |
976 | return NULL; |
977 | if (id < KVM_MAX_VCPUS) |
978 | vcpu = kvm_get_vcpu(kvm, i: id); |
979 | if (vcpu && vcpu->vcpu_id == id) |
980 | return vcpu; |
981 | kvm_for_each_vcpu(i, vcpu, kvm) |
982 | if (vcpu->vcpu_id == id) |
983 | return vcpu; |
984 | return NULL; |
985 | } |
986 | |
987 | void kvm_destroy_vcpus(struct kvm *kvm); |
988 | |
989 | void vcpu_load(struct kvm_vcpu *vcpu); |
990 | void vcpu_put(struct kvm_vcpu *vcpu); |
991 | |
992 | #ifdef __KVM_HAVE_IOAPIC |
993 | void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
994 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
995 | #else |
996 | static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
997 | { |
998 | } |
999 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
1000 | { |
1001 | } |
1002 | #endif |
1003 | |
1004 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
1005 | int kvm_irqfd_init(void); |
1006 | void kvm_irqfd_exit(void); |
1007 | #else |
1008 | static inline int kvm_irqfd_init(void) |
1009 | { |
1010 | return 0; |
1011 | } |
1012 | |
1013 | static inline void kvm_irqfd_exit(void) |
1014 | { |
1015 | } |
1016 | #endif |
1017 | int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module); |
1018 | void kvm_exit(void); |
1019 | |
1020 | void kvm_get_kvm(struct kvm *kvm); |
1021 | bool kvm_get_kvm_safe(struct kvm *kvm); |
1022 | void kvm_put_kvm(struct kvm *kvm); |
1023 | bool file_is_kvm(struct file *file); |
1024 | void kvm_put_kvm_no_destroy(struct kvm *kvm); |
1025 | |
1026 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
1027 | { |
1028 | as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES); |
1029 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
1030 | lockdep_is_held(&kvm->slots_lock) || |
1031 | !refcount_read(&kvm->users_count)); |
1032 | } |
1033 | |
1034 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
1035 | { |
1036 | return __kvm_memslots(kvm, as_id: 0); |
1037 | } |
1038 | |
1039 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
1040 | { |
1041 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
1042 | |
1043 | return __kvm_memslots(kvm: vcpu->kvm, as_id); |
1044 | } |
1045 | |
1046 | static inline bool kvm_memslots_empty(struct kvm_memslots *slots) |
1047 | { |
1048 | return RB_EMPTY_ROOT(&slots->gfn_tree); |
1049 | } |
1050 | |
1051 | bool kvm_are_all_memslots_empty(struct kvm *kvm); |
1052 | |
1053 | #define kvm_for_each_memslot(memslot, bkt, slots) \ |
1054 | hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ |
1055 | if (WARN_ON_ONCE(!memslot->npages)) { \ |
1056 | } else |
1057 | |
1058 | static inline |
1059 | struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) |
1060 | { |
1061 | struct kvm_memory_slot *slot; |
1062 | int idx = slots->node_idx; |
1063 | |
1064 | hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { |
1065 | if (slot->id == id) |
1066 | return slot; |
1067 | } |
1068 | |
1069 | return NULL; |
1070 | } |
1071 | |
1072 | /* Iterator used for walking memslots that overlap a gfn range. */ |
1073 | struct kvm_memslot_iter { |
1074 | struct kvm_memslots *slots; |
1075 | struct rb_node *node; |
1076 | struct kvm_memory_slot *slot; |
1077 | }; |
1078 | |
1079 | static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) |
1080 | { |
1081 | iter->node = rb_next(iter->node); |
1082 | if (!iter->node) |
1083 | return; |
1084 | |
1085 | iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); |
1086 | } |
1087 | |
1088 | static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, |
1089 | struct kvm_memslots *slots, |
1090 | gfn_t start) |
1091 | { |
1092 | int idx = slots->node_idx; |
1093 | struct rb_node *tmp; |
1094 | struct kvm_memory_slot *slot; |
1095 | |
1096 | iter->slots = slots; |
1097 | |
1098 | /* |
1099 | * Find the so called "upper bound" of a key - the first node that has |
1100 | * its key strictly greater than the searched one (the start gfn in our case). |
1101 | */ |
1102 | iter->node = NULL; |
1103 | for (tmp = slots->gfn_tree.rb_node; tmp; ) { |
1104 | slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); |
1105 | if (start < slot->base_gfn) { |
1106 | iter->node = tmp; |
1107 | tmp = tmp->rb_left; |
1108 | } else { |
1109 | tmp = tmp->rb_right; |
1110 | } |
1111 | } |
1112 | |
1113 | /* |
1114 | * Find the slot with the lowest gfn that can possibly intersect with |
1115 | * the range, so we'll ideally have slot start <= range start |
1116 | */ |
1117 | if (iter->node) { |
1118 | /* |
1119 | * A NULL previous node means that the very first slot |
1120 | * already has a higher start gfn. |
1121 | * In this case slot start > range start. |
1122 | */ |
1123 | tmp = rb_prev(iter->node); |
1124 | if (tmp) |
1125 | iter->node = tmp; |
1126 | } else { |
1127 | /* a NULL node below means no slots */ |
1128 | iter->node = rb_last(&slots->gfn_tree); |
1129 | } |
1130 | |
1131 | if (iter->node) { |
1132 | iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); |
1133 | |
1134 | /* |
1135 | * It is possible in the slot start < range start case that the |
1136 | * found slot ends before or at range start (slot end <= range start) |
1137 | * and so it does not overlap the requested range. |
1138 | * |
1139 | * In such non-overlapping case the next slot (if it exists) will |
1140 | * already have slot start > range start, otherwise the logic above |
1141 | * would have found it instead of the current slot. |
1142 | */ |
1143 | if (iter->slot->base_gfn + iter->slot->npages <= start) |
1144 | kvm_memslot_iter_next(iter); |
1145 | } |
1146 | } |
1147 | |
1148 | static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) |
1149 | { |
1150 | if (!iter->node) |
1151 | return false; |
1152 | |
1153 | /* |
1154 | * If this slot starts beyond or at the end of the range so does |
1155 | * every next one |
1156 | */ |
1157 | return iter->slot->base_gfn < end; |
1158 | } |
1159 | |
1160 | /* Iterate over each memslot at least partially intersecting [start, end) range */ |
1161 | #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ |
1162 | for (kvm_memslot_iter_start(iter, slots, start); \ |
1163 | kvm_memslot_iter_is_valid(iter, end); \ |
1164 | kvm_memslot_iter_next(iter)) |
1165 | |
1166 | /* |
1167 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: |
1168 | * - create a new memory slot |
1169 | * - delete an existing memory slot |
1170 | * - modify an existing memory slot |
1171 | * -- move it in the guest physical memory space |
1172 | * -- just change its flags |
1173 | * |
1174 | * Since flags can be changed by some of these operations, the following |
1175 | * differentiation is the best we can do for __kvm_set_memory_region(): |
1176 | */ |
1177 | enum kvm_mr_change { |
1178 | KVM_MR_CREATE, |
1179 | KVM_MR_DELETE, |
1180 | KVM_MR_MOVE, |
1181 | KVM_MR_FLAGS_ONLY, |
1182 | }; |
1183 | |
1184 | int kvm_set_memory_region(struct kvm *kvm, |
1185 | const struct kvm_userspace_memory_region2 *mem); |
1186 | int __kvm_set_memory_region(struct kvm *kvm, |
1187 | const struct kvm_userspace_memory_region2 *mem); |
1188 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
1189 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
1190 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
1191 | const struct kvm_memory_slot *old, |
1192 | struct kvm_memory_slot *new, |
1193 | enum kvm_mr_change change); |
1194 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
1195 | struct kvm_memory_slot *old, |
1196 | const struct kvm_memory_slot *new, |
1197 | enum kvm_mr_change change); |
1198 | /* flush all memory translations */ |
1199 | void kvm_arch_flush_shadow_all(struct kvm *kvm); |
1200 | /* flush memory translations pointing to 'slot' */ |
1201 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
1202 | struct kvm_memory_slot *slot); |
1203 | |
1204 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
1205 | struct page **pages, int nr_pages); |
1206 | |
1207 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
1208 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
1209 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
1210 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
1211 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
1212 | bool *writable); |
1213 | void kvm_release_page_clean(struct page *page); |
1214 | void kvm_release_page_dirty(struct page *page); |
1215 | |
1216 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
1217 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
1218 | bool *writable); |
1219 | kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); |
1220 | kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); |
1221 | kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, |
1222 | bool atomic, bool interruptible, bool *async, |
1223 | bool write_fault, bool *writable, hva_t *hva); |
1224 | |
1225 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
1226 | void kvm_release_pfn_dirty(kvm_pfn_t pfn); |
1227 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
1228 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); |
1229 | |
1230 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); |
1231 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
1232 | int len); |
1233 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
1234 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1235 | void *data, unsigned long len); |
1236 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1237 | void *data, unsigned int offset, |
1238 | unsigned long len); |
1239 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
1240 | int offset, int len); |
1241 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
1242 | unsigned long len); |
1243 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1244 | void *data, unsigned long len); |
1245 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1246 | void *data, unsigned int offset, |
1247 | unsigned long len); |
1248 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1249 | gpa_t gpa, unsigned long len); |
1250 | |
1251 | #define __kvm_get_guest(kvm, gfn, offset, v) \ |
1252 | ({ \ |
1253 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ |
1254 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
1255 | int __ret = -EFAULT; \ |
1256 | \ |
1257 | if (!kvm_is_error_hva(__addr)) \ |
1258 | __ret = get_user(v, __uaddr); \ |
1259 | __ret; \ |
1260 | }) |
1261 | |
1262 | #define kvm_get_guest(kvm, gpa, v) \ |
1263 | ({ \ |
1264 | gpa_t __gpa = gpa; \ |
1265 | struct kvm *__kvm = kvm; \ |
1266 | \ |
1267 | __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
1268 | offset_in_page(__gpa), v); \ |
1269 | }) |
1270 | |
1271 | #define __kvm_put_guest(kvm, gfn, offset, v) \ |
1272 | ({ \ |
1273 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ |
1274 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
1275 | int __ret = -EFAULT; \ |
1276 | \ |
1277 | if (!kvm_is_error_hva(__addr)) \ |
1278 | __ret = put_user(v, __uaddr); \ |
1279 | if (!__ret) \ |
1280 | mark_page_dirty(kvm, gfn); \ |
1281 | __ret; \ |
1282 | }) |
1283 | |
1284 | #define kvm_put_guest(kvm, gpa, v) \ |
1285 | ({ \ |
1286 | gpa_t __gpa = gpa; \ |
1287 | struct kvm *__kvm = kvm; \ |
1288 | \ |
1289 | __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
1290 | offset_in_page(__gpa), v); \ |
1291 | }) |
1292 | |
1293 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
1294 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
1295 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
1296 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1297 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); |
1298 | void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); |
1299 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
1300 | |
1301 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
1302 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); |
1303 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
1304 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1305 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); |
1306 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); |
1307 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
1308 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); |
1309 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, |
1310 | int len); |
1311 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
1312 | unsigned long len); |
1313 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
1314 | unsigned long len); |
1315 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, |
1316 | int offset, int len); |
1317 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
1318 | unsigned long len); |
1319 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
1320 | |
1321 | /** |
1322 | * kvm_gpc_init - initialize gfn_to_pfn_cache. |
1323 | * |
1324 | * @gpc: struct gfn_to_pfn_cache object. |
1325 | * @kvm: pointer to kvm instance. |
1326 | * |
1327 | * This sets up a gfn_to_pfn_cache by initializing locks and assigning the |
1328 | * immutable attributes. Note, the cache must be zero-allocated (or zeroed by |
1329 | * the caller before init). |
1330 | */ |
1331 | void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm); |
1332 | |
1333 | /** |
1334 | * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest |
1335 | * physical address. |
1336 | * |
1337 | * @gpc: struct gfn_to_pfn_cache object. |
1338 | * @gpa: guest physical address to map. |
1339 | * @len: sanity check; the range being access must fit a single page. |
1340 | * |
1341 | * @return: 0 for success. |
1342 | * -EINVAL for a mapping which would cross a page boundary. |
1343 | * -EFAULT for an untranslatable guest physical address. |
1344 | * |
1345 | * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for |
1346 | * invalidations to be processed. Callers are required to use kvm_gpc_check() |
1347 | * to ensure that the cache is valid before accessing the target page. |
1348 | */ |
1349 | int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); |
1350 | |
1351 | /** |
1352 | * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA. |
1353 | * |
1354 | * @gpc: struct gfn_to_pfn_cache object. |
1355 | * @hva: userspace virtual address to map. |
1356 | * @len: sanity check; the range being access must fit a single page. |
1357 | * |
1358 | * @return: 0 for success. |
1359 | * -EINVAL for a mapping which would cross a page boundary. |
1360 | * -EFAULT for an untranslatable guest physical address. |
1361 | * |
1362 | * The semantics of this function are the same as those of kvm_gpc_activate(). It |
1363 | * merely bypasses a layer of address translation. |
1364 | */ |
1365 | int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len); |
1366 | |
1367 | /** |
1368 | * kvm_gpc_check - check validity of a gfn_to_pfn_cache. |
1369 | * |
1370 | * @gpc: struct gfn_to_pfn_cache object. |
1371 | * @len: sanity check; the range being access must fit a single page. |
1372 | * |
1373 | * @return: %true if the cache is still valid and the address matches. |
1374 | * %false if the cache is not valid. |
1375 | * |
1376 | * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock |
1377 | * while calling this function, and then continue to hold the lock until the |
1378 | * access is complete. |
1379 | * |
1380 | * Callers in IN_GUEST_MODE may do so without locking, although they should |
1381 | * still hold a read lock on kvm->scru for the memslot checks. |
1382 | */ |
1383 | bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len); |
1384 | |
1385 | /** |
1386 | * kvm_gpc_refresh - update a previously initialized cache. |
1387 | * |
1388 | * @gpc: struct gfn_to_pfn_cache object. |
1389 | * @len: sanity check; the range being access must fit a single page. |
1390 | * |
1391 | * @return: 0 for success. |
1392 | * -EINVAL for a mapping which would cross a page boundary. |
1393 | * -EFAULT for an untranslatable guest physical address. |
1394 | * |
1395 | * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful |
1396 | * return from this function does not mean the page can be immediately |
1397 | * accessed because it may have raced with an invalidation. Callers must |
1398 | * still lock and check the cache status, as this function does not return |
1399 | * with the lock still held to permit access. |
1400 | */ |
1401 | int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); |
1402 | |
1403 | /** |
1404 | * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. |
1405 | * |
1406 | * @gpc: struct gfn_to_pfn_cache object. |
1407 | * |
1408 | * This removes a cache from the VM's list to be processed on MMU notifier |
1409 | * invocation. |
1410 | */ |
1411 | void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); |
1412 | |
1413 | static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc) |
1414 | { |
1415 | return gpc->active && !kvm_is_error_gpa(gpa: gpc->gpa); |
1416 | } |
1417 | |
1418 | static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc) |
1419 | { |
1420 | return gpc->active && kvm_is_error_gpa(gpa: gpc->gpa); |
1421 | } |
1422 | |
1423 | void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
1424 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); |
1425 | |
1426 | void kvm_vcpu_halt(struct kvm_vcpu *vcpu); |
1427 | bool kvm_vcpu_block(struct kvm_vcpu *vcpu); |
1428 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
1429 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
1430 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
1431 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
1432 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
1433 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); |
1434 | |
1435 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
1436 | void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); |
1437 | void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, |
1438 | const struct kvm_memory_slot *memslot); |
1439 | |
1440 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
1441 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); |
1442 | int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); |
1443 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); |
1444 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); |
1445 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); |
1446 | #endif |
1447 | |
1448 | void kvm_mmu_invalidate_begin(struct kvm *kvm); |
1449 | void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); |
1450 | void kvm_mmu_invalidate_end(struct kvm *kvm); |
1451 | bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); |
1452 | |
1453 | long kvm_arch_dev_ioctl(struct file *filp, |
1454 | unsigned int ioctl, unsigned long arg); |
1455 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1456 | unsigned int ioctl, unsigned long arg); |
1457 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
1458 | |
1459 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
1460 | |
1461 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
1462 | struct kvm_memory_slot *slot, |
1463 | gfn_t gfn_offset, |
1464 | unsigned long mask); |
1465 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); |
1466 | |
1467 | #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
1468 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
1469 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
1470 | int *is_dirty, struct kvm_memory_slot **memslot); |
1471 | #endif |
1472 | |
1473 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
1474 | bool line_status); |
1475 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
1476 | struct kvm_enable_cap *cap); |
1477 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); |
1478 | long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, |
1479 | unsigned long arg); |
1480 | |
1481 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
1482 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
1483 | |
1484 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1485 | struct kvm_translation *tr); |
1486 | |
1487 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
1488 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
1489 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1490 | struct kvm_sregs *sregs); |
1491 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1492 | struct kvm_sregs *sregs); |
1493 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1494 | struct kvm_mp_state *mp_state); |
1495 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
1496 | struct kvm_mp_state *mp_state); |
1497 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1498 | struct kvm_guest_debug *dbg); |
1499 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); |
1500 | |
1501 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
1502 | |
1503 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
1504 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
1505 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
1506 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); |
1507 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
1508 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
1509 | |
1510 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
1511 | int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); |
1512 | #endif |
1513 | |
1514 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
1515 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); |
1516 | #else |
1517 | static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} |
1518 | #endif |
1519 | |
1520 | #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING |
1521 | int kvm_arch_hardware_enable(void); |
1522 | void kvm_arch_hardware_disable(void); |
1523 | #endif |
1524 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
1525 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
1526 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
1527 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
1528 | bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); |
1529 | bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu); |
1530 | int kvm_arch_post_init_vm(struct kvm *kvm); |
1531 | void kvm_arch_pre_destroy_vm(struct kvm *kvm); |
1532 | void kvm_arch_create_vm_debugfs(struct kvm *kvm); |
1533 | |
1534 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
1535 | /* |
1536 | * All architectures that want to use vzalloc currently also |
1537 | * need their own kvm_arch_alloc_vm implementation. |
1538 | */ |
1539 | static inline struct kvm *kvm_arch_alloc_vm(void) |
1540 | { |
1541 | return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT); |
1542 | } |
1543 | #endif |
1544 | |
1545 | static inline void __kvm_arch_free_vm(struct kvm *kvm) |
1546 | { |
1547 | kvfree(addr: kvm); |
1548 | } |
1549 | |
1550 | #ifndef __KVM_HAVE_ARCH_VM_FREE |
1551 | static inline void kvm_arch_free_vm(struct kvm *kvm) |
1552 | { |
1553 | __kvm_arch_free_vm(kvm); |
1554 | } |
1555 | #endif |
1556 | |
1557 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS |
1558 | static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
1559 | { |
1560 | return -ENOTSUPP; |
1561 | } |
1562 | #else |
1563 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm); |
1564 | #endif |
1565 | |
1566 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE |
1567 | static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, |
1568 | gfn_t gfn, u64 nr_pages) |
1569 | { |
1570 | return -EOPNOTSUPP; |
1571 | } |
1572 | #else |
1573 | int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); |
1574 | #endif |
1575 | |
1576 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
1577 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); |
1578 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); |
1579 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); |
1580 | #else |
1581 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) |
1582 | { |
1583 | } |
1584 | |
1585 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) |
1586 | { |
1587 | } |
1588 | |
1589 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) |
1590 | { |
1591 | return false; |
1592 | } |
1593 | #endif |
1594 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
1595 | void kvm_arch_start_assignment(struct kvm *kvm); |
1596 | void kvm_arch_end_assignment(struct kvm *kvm); |
1597 | bool kvm_arch_has_assigned_device(struct kvm *kvm); |
1598 | #else |
1599 | static inline void kvm_arch_start_assignment(struct kvm *kvm) |
1600 | { |
1601 | } |
1602 | |
1603 | static inline void kvm_arch_end_assignment(struct kvm *kvm) |
1604 | { |
1605 | } |
1606 | |
1607 | static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
1608 | { |
1609 | return false; |
1610 | } |
1611 | #endif |
1612 | |
1613 | static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) |
1614 | { |
1615 | #ifdef __KVM_HAVE_ARCH_WQP |
1616 | return vcpu->arch.waitp; |
1617 | #else |
1618 | return &vcpu->wait; |
1619 | #endif |
1620 | } |
1621 | |
1622 | /* |
1623 | * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns |
1624 | * true if the vCPU was blocking and was awakened, false otherwise. |
1625 | */ |
1626 | static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) |
1627 | { |
1628 | return !!rcuwait_wake_up(w: kvm_arch_vcpu_get_wait(vcpu)); |
1629 | } |
1630 | |
1631 | static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) |
1632 | { |
1633 | return rcuwait_active(w: kvm_arch_vcpu_get_wait(vcpu)); |
1634 | } |
1635 | |
1636 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
1637 | /* |
1638 | * returns true if the virtual interrupt controller is initialized and |
1639 | * ready to accept virtual IRQ. On some architectures the virtual interrupt |
1640 | * controller is dynamically instantiated and this is not always true. |
1641 | */ |
1642 | bool kvm_arch_intc_initialized(struct kvm *kvm); |
1643 | #else |
1644 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) |
1645 | { |
1646 | return true; |
1647 | } |
1648 | #endif |
1649 | |
1650 | #ifdef CONFIG_GUEST_PERF_EVENTS |
1651 | unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); |
1652 | |
1653 | void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)); |
1654 | void kvm_unregister_perf_callbacks(void); |
1655 | #else |
1656 | static inline void kvm_register_perf_callbacks(void *ign) {} |
1657 | static inline void kvm_unregister_perf_callbacks(void) {} |
1658 | #endif /* CONFIG_GUEST_PERF_EVENTS */ |
1659 | |
1660 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
1661 | void kvm_arch_destroy_vm(struct kvm *kvm); |
1662 | void kvm_arch_sync_events(struct kvm *kvm); |
1663 | |
1664 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
1665 | |
1666 | struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); |
1667 | bool kvm_is_zone_device_page(struct page *page); |
1668 | |
1669 | struct kvm_irq_ack_notifier { |
1670 | struct hlist_node link; |
1671 | unsigned gsi; |
1672 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
1673 | }; |
1674 | |
1675 | int kvm_irq_map_gsi(struct kvm *kvm, |
1676 | struct kvm_kernel_irq_routing_entry *entries, int gsi); |
1677 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
1678 | |
1679 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
1680 | bool line_status); |
1681 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
1682 | int irq_source_id, int level, bool line_status); |
1683 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
1684 | struct kvm *kvm, int irq_source_id, |
1685 | int level, bool line_status); |
1686 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
1687 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
1688 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
1689 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
1690 | struct kvm_irq_ack_notifier *kian); |
1691 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
1692 | struct kvm_irq_ack_notifier *kian); |
1693 | int kvm_request_irq_source_id(struct kvm *kvm); |
1694 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
1695 | bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
1696 | |
1697 | /* |
1698 | * Returns a pointer to the memslot if it contains gfn. |
1699 | * Otherwise returns NULL. |
1700 | */ |
1701 | static inline struct kvm_memory_slot * |
1702 | try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
1703 | { |
1704 | if (!slot) |
1705 | return NULL; |
1706 | |
1707 | if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) |
1708 | return slot; |
1709 | else |
1710 | return NULL; |
1711 | } |
1712 | |
1713 | /* |
1714 | * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. |
1715 | * |
1716 | * With "approx" set returns the memslot also when the address falls |
1717 | * in a hole. In that case one of the memslots bordering the hole is |
1718 | * returned. |
1719 | */ |
1720 | static inline struct kvm_memory_slot * |
1721 | search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) |
1722 | { |
1723 | struct kvm_memory_slot *slot; |
1724 | struct rb_node *node; |
1725 | int idx = slots->node_idx; |
1726 | |
1727 | slot = NULL; |
1728 | for (node = slots->gfn_tree.rb_node; node; ) { |
1729 | slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); |
1730 | if (gfn >= slot->base_gfn) { |
1731 | if (gfn < slot->base_gfn + slot->npages) |
1732 | return slot; |
1733 | node = node->rb_right; |
1734 | } else |
1735 | node = node->rb_left; |
1736 | } |
1737 | |
1738 | return approx ? slot : NULL; |
1739 | } |
1740 | |
1741 | static inline struct kvm_memory_slot * |
1742 | ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) |
1743 | { |
1744 | struct kvm_memory_slot *slot; |
1745 | |
1746 | slot = (struct kvm_memory_slot *)atomic_long_read(v: &slots->last_used_slot); |
1747 | slot = try_get_memslot(slot, gfn); |
1748 | if (slot) |
1749 | return slot; |
1750 | |
1751 | slot = search_memslots(slots, gfn, approx); |
1752 | if (slot) { |
1753 | atomic_long_set(v: &slots->last_used_slot, i: (unsigned long)slot); |
1754 | return slot; |
1755 | } |
1756 | |
1757 | return NULL; |
1758 | } |
1759 | |
1760 | /* |
1761 | * __gfn_to_memslot() and its descendants are here to allow arch code to inline |
1762 | * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline |
1763 | * because that would bloat other code too much. |
1764 | */ |
1765 | static inline struct kvm_memory_slot * |
1766 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) |
1767 | { |
1768 | return ____gfn_to_memslot(slots, gfn, approx: false); |
1769 | } |
1770 | |
1771 | static inline unsigned long |
1772 | __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) |
1773 | { |
1774 | /* |
1775 | * The index was checked originally in search_memslots. To avoid |
1776 | * that a malicious guest builds a Spectre gadget out of e.g. page |
1777 | * table walks, do not let the processor speculate loads outside |
1778 | * the guest's registered memslots. |
1779 | */ |
1780 | unsigned long offset = gfn - slot->base_gfn; |
1781 | offset = array_index_nospec(offset, slot->npages); |
1782 | return slot->userspace_addr + offset * PAGE_SIZE; |
1783 | } |
1784 | |
1785 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
1786 | { |
1787 | return gfn_to_memslot(kvm, gfn)->id; |
1788 | } |
1789 | |
1790 | static inline gfn_t |
1791 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
1792 | { |
1793 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
1794 | |
1795 | return slot->base_gfn + gfn_offset; |
1796 | } |
1797 | |
1798 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
1799 | { |
1800 | return (gpa_t)gfn << PAGE_SHIFT; |
1801 | } |
1802 | |
1803 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
1804 | { |
1805 | return (gfn_t)(gpa >> PAGE_SHIFT); |
1806 | } |
1807 | |
1808 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
1809 | { |
1810 | return (hpa_t)pfn << PAGE_SHIFT; |
1811 | } |
1812 | |
1813 | static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) |
1814 | { |
1815 | unsigned long hva = gfn_to_hva(kvm, gfn: gpa_to_gfn(gpa)); |
1816 | |
1817 | return !kvm_is_error_hva(addr: hva); |
1818 | } |
1819 | |
1820 | static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc) |
1821 | { |
1822 | lockdep_assert_held(&gpc->lock); |
1823 | |
1824 | if (!gpc->memslot) |
1825 | return; |
1826 | |
1827 | mark_page_dirty_in_slot(kvm: gpc->kvm, memslot: gpc->memslot, gfn: gpa_to_gfn(gpa: gpc->gpa)); |
1828 | } |
1829 | |
1830 | enum kvm_stat_kind { |
1831 | KVM_STAT_VM, |
1832 | KVM_STAT_VCPU, |
1833 | }; |
1834 | |
1835 | struct kvm_stat_data { |
1836 | struct kvm *kvm; |
1837 | const struct _kvm_stats_desc *desc; |
1838 | enum kvm_stat_kind kind; |
1839 | }; |
1840 | |
1841 | struct _kvm_stats_desc { |
1842 | struct kvm_stats_desc desc; |
1843 | char name[KVM_STATS_NAME_SIZE]; |
1844 | }; |
1845 | |
1846 | #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ |
1847 | .flags = type | unit | base | \ |
1848 | BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ |
1849 | BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ |
1850 | BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ |
1851 | .exponent = exp, \ |
1852 | .size = sz, \ |
1853 | .bucket_size = bsz |
1854 | |
1855 | #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
1856 | { \ |
1857 | { \ |
1858 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
1859 | .offset = offsetof(struct kvm_vm_stat, generic.stat) \ |
1860 | }, \ |
1861 | .name = #stat, \ |
1862 | } |
1863 | #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
1864 | { \ |
1865 | { \ |
1866 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
1867 | .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ |
1868 | }, \ |
1869 | .name = #stat, \ |
1870 | } |
1871 | #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
1872 | { \ |
1873 | { \ |
1874 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
1875 | .offset = offsetof(struct kvm_vm_stat, stat) \ |
1876 | }, \ |
1877 | .name = #stat, \ |
1878 | } |
1879 | #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ |
1880 | { \ |
1881 | { \ |
1882 | STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ |
1883 | .offset = offsetof(struct kvm_vcpu_stat, stat) \ |
1884 | }, \ |
1885 | .name = #stat, \ |
1886 | } |
1887 | /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ |
1888 | #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ |
1889 | SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) |
1890 | |
1891 | #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ |
1892 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ |
1893 | unit, base, exponent, 1, 0) |
1894 | #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ |
1895 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ |
1896 | unit, base, exponent, 1, 0) |
1897 | #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ |
1898 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ |
1899 | unit, base, exponent, 1, 0) |
1900 | #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ |
1901 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ |
1902 | unit, base, exponent, sz, bsz) |
1903 | #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ |
1904 | STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ |
1905 | unit, base, exponent, sz, 0) |
1906 | |
1907 | /* Cumulative counter, read/write */ |
1908 | #define STATS_DESC_COUNTER(SCOPE, name) \ |
1909 | STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ |
1910 | KVM_STATS_BASE_POW10, 0) |
1911 | /* Instantaneous counter, read only */ |
1912 | #define STATS_DESC_ICOUNTER(SCOPE, name) \ |
1913 | STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ |
1914 | KVM_STATS_BASE_POW10, 0) |
1915 | /* Peak counter, read/write */ |
1916 | #define STATS_DESC_PCOUNTER(SCOPE, name) \ |
1917 | STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ |
1918 | KVM_STATS_BASE_POW10, 0) |
1919 | |
1920 | /* Instantaneous boolean value, read only */ |
1921 | #define STATS_DESC_IBOOLEAN(SCOPE, name) \ |
1922 | STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ |
1923 | KVM_STATS_BASE_POW10, 0) |
1924 | /* Peak (sticky) boolean value, read/write */ |
1925 | #define STATS_DESC_PBOOLEAN(SCOPE, name) \ |
1926 | STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ |
1927 | KVM_STATS_BASE_POW10, 0) |
1928 | |
1929 | /* Cumulative time in nanosecond */ |
1930 | #define STATS_DESC_TIME_NSEC(SCOPE, name) \ |
1931 | STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ |
1932 | KVM_STATS_BASE_POW10, -9) |
1933 | /* Linear histogram for time in nanosecond */ |
1934 | #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ |
1935 | STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ |
1936 | KVM_STATS_BASE_POW10, -9, sz, bsz) |
1937 | /* Logarithmic histogram for time in nanosecond */ |
1938 | #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ |
1939 | STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ |
1940 | KVM_STATS_BASE_POW10, -9, sz) |
1941 | |
1942 | #define KVM_GENERIC_VM_STATS() \ |
1943 | STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ |
1944 | STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) |
1945 | |
1946 | #define KVM_GENERIC_VCPU_STATS() \ |
1947 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ |
1948 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ |
1949 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ |
1950 | STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ |
1951 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ |
1952 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ |
1953 | STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ |
1954 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ |
1955 | HALT_POLL_HIST_COUNT), \ |
1956 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ |
1957 | HALT_POLL_HIST_COUNT), \ |
1958 | STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ |
1959 | HALT_POLL_HIST_COUNT), \ |
1960 | STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) |
1961 | |
1962 | extern struct dentry *kvm_debugfs_dir; |
1963 | |
1964 | ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *, |
1965 | const struct _kvm_stats_desc *desc, |
1966 | void *stats, size_t size_stats, |
1967 | char __user *user_buffer, size_t size, loff_t *offset); |
1968 | |
1969 | /** |
1970 | * kvm_stats_linear_hist_update() - Update bucket value for linear histogram |
1971 | * statistics data. |
1972 | * |
1973 | * @data: start address of the stats data |
1974 | * @size: the number of bucket of the stats data |
1975 | * @value: the new value used to update the linear histogram's bucket |
1976 | * @bucket_size: the size (width) of a bucket |
1977 | */ |
1978 | static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, |
1979 | u64 value, size_t bucket_size) |
1980 | { |
1981 | size_t index = div64_u64(dividend: value, divisor: bucket_size); |
1982 | |
1983 | index = min(index, size - 1); |
1984 | ++data[index]; |
1985 | } |
1986 | |
1987 | /** |
1988 | * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram |
1989 | * statistics data. |
1990 | * |
1991 | * @data: start address of the stats data |
1992 | * @size: the number of bucket of the stats data |
1993 | * @value: the new value used to update the logarithmic histogram's bucket |
1994 | */ |
1995 | static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) |
1996 | { |
1997 | size_t index = fls64(x: value); |
1998 | |
1999 | index = min(index, size - 1); |
2000 | ++data[index]; |
2001 | } |
2002 | |
2003 | #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ |
2004 | kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) |
2005 | #define KVM_STATS_LOG_HIST_UPDATE(array, value) \ |
2006 | kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) |
2007 | |
2008 | |
2009 | extern const struct kvm_stats_header ; |
2010 | extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; |
2011 | extern const struct kvm_stats_header ; |
2012 | extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; |
2013 | |
2014 | #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER |
2015 | static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) |
2016 | { |
2017 | if (unlikely(kvm->mmu_invalidate_in_progress)) |
2018 | return 1; |
2019 | /* |
2020 | * Ensure the read of mmu_invalidate_in_progress happens before |
2021 | * the read of mmu_invalidate_seq. This interacts with the |
2022 | * smp_wmb() in mmu_notifier_invalidate_range_end to make sure |
2023 | * that the caller either sees the old (non-zero) value of |
2024 | * mmu_invalidate_in_progress or the new (incremented) value of |
2025 | * mmu_invalidate_seq. |
2026 | * |
2027 | * PowerPC Book3s HV KVM calls this under a per-page lock rather |
2028 | * than under kvm->mmu_lock, for scalability, so can't rely on |
2029 | * kvm->mmu_lock to keep things ordered. |
2030 | */ |
2031 | smp_rmb(); |
2032 | if (kvm->mmu_invalidate_seq != mmu_seq) |
2033 | return 1; |
2034 | return 0; |
2035 | } |
2036 | |
2037 | static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, |
2038 | unsigned long mmu_seq, |
2039 | gfn_t gfn) |
2040 | { |
2041 | lockdep_assert_held(&kvm->mmu_lock); |
2042 | /* |
2043 | * If mmu_invalidate_in_progress is non-zero, then the range maintained |
2044 | * by kvm_mmu_notifier_invalidate_range_start contains all addresses |
2045 | * that might be being invalidated. Note that it may include some false |
2046 | * positives, due to shortcuts when handing concurrent invalidations. |
2047 | */ |
2048 | if (unlikely(kvm->mmu_invalidate_in_progress)) { |
2049 | /* |
2050 | * Dropping mmu_lock after bumping mmu_invalidate_in_progress |
2051 | * but before updating the range is a KVM bug. |
2052 | */ |
2053 | if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || |
2054 | kvm->mmu_invalidate_range_end == INVALID_GPA)) |
2055 | return 1; |
2056 | |
2057 | if (gfn >= kvm->mmu_invalidate_range_start && |
2058 | gfn < kvm->mmu_invalidate_range_end) |
2059 | return 1; |
2060 | } |
2061 | |
2062 | if (kvm->mmu_invalidate_seq != mmu_seq) |
2063 | return 1; |
2064 | return 0; |
2065 | } |
2066 | |
2067 | /* |
2068 | * This lockless version of the range-based retry check *must* be paired with a |
2069 | * call to the locked version after acquiring mmu_lock, i.e. this is safe to |
2070 | * use only as a pre-check to avoid contending mmu_lock. This version *will* |
2071 | * get false negatives and false positives. |
2072 | */ |
2073 | static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, |
2074 | unsigned long mmu_seq, |
2075 | gfn_t gfn) |
2076 | { |
2077 | /* |
2078 | * Use READ_ONCE() to ensure the in-progress flag and sequence counter |
2079 | * are always read from memory, e.g. so that checking for retry in a |
2080 | * loop won't result in an infinite retry loop. Don't force loads for |
2081 | * start+end, as the key to avoiding infinite retry loops is observing |
2082 | * the 1=>0 transition of in-progress, i.e. getting false negatives |
2083 | * due to stale start+end values is acceptable. |
2084 | */ |
2085 | if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && |
2086 | gfn >= kvm->mmu_invalidate_range_start && |
2087 | gfn < kvm->mmu_invalidate_range_end) |
2088 | return true; |
2089 | |
2090 | return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; |
2091 | } |
2092 | #endif |
2093 | |
2094 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
2095 | |
2096 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
2097 | |
2098 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
2099 | int kvm_set_irq_routing(struct kvm *kvm, |
2100 | const struct kvm_irq_routing_entry *entries, |
2101 | unsigned nr, |
2102 | unsigned flags); |
2103 | int kvm_set_routing_entry(struct kvm *kvm, |
2104 | struct kvm_kernel_irq_routing_entry *e, |
2105 | const struct kvm_irq_routing_entry *ue); |
2106 | void kvm_free_irq_routing(struct kvm *kvm); |
2107 | |
2108 | #else |
2109 | |
2110 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} |
2111 | |
2112 | #endif |
2113 | |
2114 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
2115 | |
2116 | void kvm_eventfd_init(struct kvm *kvm); |
2117 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
2118 | |
2119 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
2120 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
2121 | void kvm_irqfd_release(struct kvm *kvm); |
2122 | bool kvm_notify_irqfd_resampler(struct kvm *kvm, |
2123 | unsigned int irqchip, |
2124 | unsigned int pin); |
2125 | void kvm_irq_routing_update(struct kvm *); |
2126 | #else |
2127 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
2128 | { |
2129 | return -EINVAL; |
2130 | } |
2131 | |
2132 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
2133 | |
2134 | static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, |
2135 | unsigned int irqchip, |
2136 | unsigned int pin) |
2137 | { |
2138 | return false; |
2139 | } |
2140 | #endif /* CONFIG_HAVE_KVM_IRQCHIP */ |
2141 | |
2142 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
2143 | |
2144 | static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) |
2145 | { |
2146 | /* |
2147 | * Ensure the rest of the request is published to kvm_check_request's |
2148 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. |
2149 | */ |
2150 | smp_wmb(); |
2151 | set_bit(nr: req & KVM_REQUEST_MASK, addr: (void *)&vcpu->requests); |
2152 | } |
2153 | |
2154 | static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
2155 | { |
2156 | /* |
2157 | * Request that don't require vCPU action should never be logged in |
2158 | * vcpu->requests. The vCPU won't clear the request, so it will stay |
2159 | * logged indefinitely and prevent the vCPU from entering the guest. |
2160 | */ |
2161 | BUILD_BUG_ON(!__builtin_constant_p(req) || |
2162 | (req & KVM_REQUEST_NO_ACTION)); |
2163 | |
2164 | __kvm_make_request(req, vcpu); |
2165 | } |
2166 | |
2167 | static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) |
2168 | { |
2169 | return READ_ONCE(vcpu->requests); |
2170 | } |
2171 | |
2172 | static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) |
2173 | { |
2174 | return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
2175 | } |
2176 | |
2177 | static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) |
2178 | { |
2179 | clear_bit(nr: req & KVM_REQUEST_MASK, addr: (void *)&vcpu->requests); |
2180 | } |
2181 | |
2182 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
2183 | { |
2184 | if (kvm_test_request(req, vcpu)) { |
2185 | kvm_clear_request(req, vcpu); |
2186 | |
2187 | /* |
2188 | * Ensure the rest of the request is visible to kvm_check_request's |
2189 | * caller. Paired with the smp_wmb in kvm_make_request. |
2190 | */ |
2191 | smp_mb__after_atomic(); |
2192 | return true; |
2193 | } else { |
2194 | return false; |
2195 | } |
2196 | } |
2197 | |
2198 | #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING |
2199 | extern bool kvm_rebooting; |
2200 | #endif |
2201 | |
2202 | extern unsigned int halt_poll_ns; |
2203 | extern unsigned int halt_poll_ns_grow; |
2204 | extern unsigned int halt_poll_ns_grow_start; |
2205 | extern unsigned int halt_poll_ns_shrink; |
2206 | |
2207 | struct kvm_device { |
2208 | const struct kvm_device_ops *ops; |
2209 | struct kvm *kvm; |
2210 | void *private; |
2211 | struct list_head vm_node; |
2212 | }; |
2213 | |
2214 | /* create, destroy, and name are mandatory */ |
2215 | struct kvm_device_ops { |
2216 | const char *name; |
2217 | |
2218 | /* |
2219 | * create is called holding kvm->lock and any operations not suitable |
2220 | * to do while holding the lock should be deferred to init (see |
2221 | * below). |
2222 | */ |
2223 | int (*create)(struct kvm_device *dev, u32 type); |
2224 | |
2225 | /* |
2226 | * init is called after create if create is successful and is called |
2227 | * outside of holding kvm->lock. |
2228 | */ |
2229 | void (*init)(struct kvm_device *dev); |
2230 | |
2231 | /* |
2232 | * Destroy is responsible for freeing dev. |
2233 | * |
2234 | * Destroy may be called before or after destructors are called |
2235 | * on emulated I/O regions, depending on whether a reference is |
2236 | * held by a vcpu or other kvm component that gets destroyed |
2237 | * after the emulated I/O. |
2238 | */ |
2239 | void (*destroy)(struct kvm_device *dev); |
2240 | |
2241 | /* |
2242 | * Release is an alternative method to free the device. It is |
2243 | * called when the device file descriptor is closed. Once |
2244 | * release is called, the destroy method will not be called |
2245 | * anymore as the device is removed from the device list of |
2246 | * the VM. kvm->lock is held. |
2247 | */ |
2248 | void (*release)(struct kvm_device *dev); |
2249 | |
2250 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
2251 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
2252 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
2253 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, |
2254 | unsigned long arg); |
2255 | int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
2256 | }; |
2257 | |
2258 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
2259 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
2260 | void kvm_unregister_device_ops(u32 type); |
2261 | |
2262 | extern struct kvm_device_ops kvm_mpic_ops; |
2263 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
2264 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
2265 | |
2266 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
2267 | |
2268 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
2269 | { |
2270 | vcpu->spin_loop.in_spin_loop = val; |
2271 | } |
2272 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
2273 | { |
2274 | vcpu->spin_loop.dy_eligible = val; |
2275 | } |
2276 | |
2277 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
2278 | |
2279 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
2280 | { |
2281 | } |
2282 | |
2283 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
2284 | { |
2285 | } |
2286 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
2287 | |
2288 | static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) |
2289 | { |
2290 | return (memslot && memslot->id < KVM_USER_MEM_SLOTS && |
2291 | !(memslot->flags & KVM_MEMSLOT_INVALID)); |
2292 | } |
2293 | |
2294 | struct kvm_vcpu *kvm_get_running_vcpu(void); |
2295 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
2296 | |
2297 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
2298 | bool kvm_arch_has_irq_bypass(void); |
2299 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
2300 | struct irq_bypass_producer *); |
2301 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, |
2302 | struct irq_bypass_producer *); |
2303 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); |
2304 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); |
2305 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
2306 | uint32_t guest_irq, bool set); |
2307 | bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, |
2308 | struct kvm_kernel_irq_routing_entry *); |
2309 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
2310 | |
2311 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
2312 | /* If we wakeup during the poll time, was it a sucessful poll? */ |
2313 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
2314 | { |
2315 | return vcpu->valid_wakeup; |
2316 | } |
2317 | |
2318 | #else |
2319 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
2320 | { |
2321 | return true; |
2322 | } |
2323 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
2324 | |
2325 | #ifdef CONFIG_HAVE_KVM_NO_POLL |
2326 | /* Callback that tells if we must not poll */ |
2327 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); |
2328 | #else |
2329 | static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) |
2330 | { |
2331 | return false; |
2332 | } |
2333 | #endif /* CONFIG_HAVE_KVM_NO_POLL */ |
2334 | |
2335 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
2336 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
2337 | unsigned int ioctl, unsigned long arg); |
2338 | #else |
2339 | static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, |
2340 | unsigned int ioctl, |
2341 | unsigned long arg) |
2342 | { |
2343 | return -ENOIOCTLCMD; |
2344 | } |
2345 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ |
2346 | |
2347 | void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); |
2348 | |
2349 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
2350 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); |
2351 | #else |
2352 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
2353 | { |
2354 | return 0; |
2355 | } |
2356 | #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ |
2357 | |
2358 | typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); |
2359 | |
2360 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, |
2361 | uintptr_t data, const char *name, |
2362 | struct task_struct **thread_ptr); |
2363 | |
2364 | #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK |
2365 | static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) |
2366 | { |
2367 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
2368 | vcpu->stat.signal_exits++; |
2369 | } |
2370 | #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ |
2371 | |
2372 | /* |
2373 | * If more than one page is being (un)accounted, @virt must be the address of |
2374 | * the first page of a block of pages what were allocated together (i.e |
2375 | * accounted together). |
2376 | * |
2377 | * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state() |
2378 | * is thread-safe. |
2379 | */ |
2380 | static inline void kvm_account_pgtable_pages(void *virt, int nr) |
2381 | { |
2382 | mod_lruvec_page_state(virt_to_page(virt), idx: NR_SECONDARY_PAGETABLE, val: nr); |
2383 | } |
2384 | |
2385 | /* |
2386 | * This defines how many reserved entries we want to keep before we |
2387 | * kick the vcpu to the userspace to avoid dirty ring full. This |
2388 | * value can be tuned to higher if e.g. PML is enabled on the host. |
2389 | */ |
2390 | #define KVM_DIRTY_RING_RSVD_ENTRIES 64 |
2391 | |
2392 | /* Max number of entries allowed for each kvm dirty ring */ |
2393 | #define KVM_DIRTY_RING_MAX_ENTRIES 65536 |
2394 | |
2395 | static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, |
2396 | gpa_t gpa, gpa_t size, |
2397 | bool is_write, bool is_exec, |
2398 | bool is_private) |
2399 | { |
2400 | vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; |
2401 | vcpu->run->memory_fault.gpa = gpa; |
2402 | vcpu->run->memory_fault.size = size; |
2403 | |
2404 | /* RWX flags are not (yet) defined or communicated to userspace. */ |
2405 | vcpu->run->memory_fault.flags = 0; |
2406 | if (is_private) |
2407 | vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; |
2408 | } |
2409 | |
2410 | #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES |
2411 | static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) |
2412 | { |
2413 | return xa_to_value(entry: xa_load(&kvm->mem_attr_array, index: gfn)); |
2414 | } |
2415 | |
2416 | bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, |
2417 | unsigned long attrs); |
2418 | bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, |
2419 | struct kvm_gfn_range *range); |
2420 | bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, |
2421 | struct kvm_gfn_range *range); |
2422 | |
2423 | static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) |
2424 | { |
2425 | return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) && |
2426 | kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; |
2427 | } |
2428 | #else |
2429 | static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) |
2430 | { |
2431 | return false; |
2432 | } |
2433 | #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ |
2434 | |
2435 | #ifdef CONFIG_KVM_PRIVATE_MEM |
2436 | int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, |
2437 | gfn_t gfn, kvm_pfn_t *pfn, int *max_order); |
2438 | #else |
2439 | static inline int kvm_gmem_get_pfn(struct kvm *kvm, |
2440 | struct kvm_memory_slot *slot, gfn_t gfn, |
2441 | kvm_pfn_t *pfn, int *max_order) |
2442 | { |
2443 | KVM_BUG_ON(1, kvm); |
2444 | return -EIO; |
2445 | } |
2446 | #endif /* CONFIG_KVM_PRIVATE_MEM */ |
2447 | |
2448 | #endif |
2449 | |