1 | #ifndef KVM_DIRTY_RING_H |
2 | #define KVM_DIRTY_RING_H |
3 | |
4 | #include <linux/kvm.h> |
5 | |
6 | /** |
7 | * kvm_dirty_ring: KVM internal dirty ring structure |
8 | * |
9 | * @dirty_index: free running counter that points to the next slot in |
10 | * dirty_ring->dirty_gfns, where a new dirty page should go |
11 | * @reset_index: free running counter that points to the next dirty page |
12 | * in dirty_ring->dirty_gfns for which dirty trap needs to |
13 | * be reenabled |
14 | * @size: size of the compact list, dirty_ring->dirty_gfns |
15 | * @soft_limit: when the number of dirty pages in the list reaches this |
16 | * limit, vcpu that owns this ring should exit to userspace |
17 | * to allow userspace to harvest all the dirty pages |
18 | * @dirty_gfns: the array to keep the dirty gfns |
19 | * @index: index of this dirty ring |
20 | */ |
21 | struct kvm_dirty_ring { |
22 | u32 dirty_index; |
23 | u32 reset_index; |
24 | u32 size; |
25 | u32 soft_limit; |
26 | struct kvm_dirty_gfn *dirty_gfns; |
27 | int index; |
28 | }; |
29 | |
30 | #ifndef CONFIG_HAVE_KVM_DIRTY_RING |
31 | /* |
32 | * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should |
33 | * not be included as well, so define these nop functions for the arch. |
34 | */ |
35 | static inline u32 kvm_dirty_ring_get_rsvd_entries(void) |
36 | { |
37 | return 0; |
38 | } |
39 | |
40 | static inline bool kvm_use_dirty_bitmap(struct kvm *kvm) |
41 | { |
42 | return true; |
43 | } |
44 | |
45 | static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, |
46 | int index, u32 size) |
47 | { |
48 | return 0; |
49 | } |
50 | |
51 | static inline int kvm_dirty_ring_reset(struct kvm *kvm, |
52 | struct kvm_dirty_ring *ring) |
53 | { |
54 | return 0; |
55 | } |
56 | |
57 | static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, |
58 | u32 slot, u64 offset) |
59 | { |
60 | } |
61 | |
62 | static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, |
63 | u32 offset) |
64 | { |
65 | return NULL; |
66 | } |
67 | |
68 | static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring) |
69 | { |
70 | } |
71 | |
72 | #else /* CONFIG_HAVE_KVM_DIRTY_RING */ |
73 | |
74 | int kvm_cpu_dirty_log_size(void); |
75 | bool kvm_use_dirty_bitmap(struct kvm *kvm); |
76 | bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm); |
77 | u32 kvm_dirty_ring_get_rsvd_entries(void); |
78 | int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); |
79 | |
80 | /* |
81 | * called with kvm->slots_lock held, returns the number of |
82 | * processed pages. |
83 | */ |
84 | int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring); |
85 | |
86 | /* |
87 | * returns =0: successfully pushed |
88 | * <0: unable to push, need to wait |
89 | */ |
90 | void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset); |
91 | |
92 | bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu); |
93 | |
94 | /* for use in vm_operations_struct */ |
95 | struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset); |
96 | |
97 | void kvm_dirty_ring_free(struct kvm_dirty_ring *ring); |
98 | |
99 | #endif /* CONFIG_HAVE_KVM_DIRTY_RING */ |
100 | |
101 | #endif /* KVM_DIRTY_RING_H */ |
102 | |