1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. |
4 | * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
5 | * |
6 | * KVM Xen emulation |
7 | */ |
8 | |
9 | #ifndef __ARCH_X86_KVM_XEN_H__ |
10 | #define __ARCH_X86_KVM_XEN_H__ |
11 | |
12 | #include <asm/xen/hypervisor.h> |
13 | |
14 | #ifdef CONFIG_KVM_XEN |
15 | #include <linux/jump_label_ratelimit.h> |
16 | |
17 | extern struct static_key_false_deferred kvm_xen_enabled; |
18 | |
19 | int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); |
20 | void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); |
21 | void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu); |
22 | int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); |
23 | int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); |
24 | int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); |
25 | int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); |
26 | int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt); |
27 | int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); |
28 | int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); |
29 | void kvm_xen_init_vm(struct kvm *kvm); |
30 | void kvm_xen_destroy_vm(struct kvm *kvm); |
31 | void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu); |
32 | void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu); |
33 | int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, |
34 | struct kvm *kvm); |
35 | int kvm_xen_setup_evtchn(struct kvm *kvm, |
36 | struct kvm_kernel_irq_routing_entry *e, |
37 | const struct kvm_irq_routing_entry *ue); |
38 | void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu); |
39 | |
40 | static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) |
41 | { |
42 | /* |
43 | * The local APIC is being enabled. If the per-vCPU upcall vector is |
44 | * set and the vCPU's evtchn_upcall_pending flag is set, inject the |
45 | * interrupt. |
46 | */ |
47 | if (static_branch_unlikely(&kvm_xen_enabled.key) && |
48 | vcpu->arch.xen.vcpu_info_cache.active && |
49 | vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) |
50 | kvm_xen_inject_vcpu_vector(vcpu); |
51 | } |
52 | |
53 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) |
54 | { |
55 | return static_branch_unlikely(&kvm_xen_enabled.key) && |
56 | kvm->arch.xen_hvm_config.msr; |
57 | } |
58 | |
59 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) |
60 | { |
61 | return static_branch_unlikely(&kvm_xen_enabled.key) && |
62 | (kvm->arch.xen_hvm_config.flags & |
63 | KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); |
64 | } |
65 | |
66 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) |
67 | { |
68 | if (static_branch_unlikely(&kvm_xen_enabled.key) && |
69 | vcpu->arch.xen.vcpu_info_cache.active && |
70 | vcpu->kvm->arch.xen.upcall_vector) |
71 | return __kvm_xen_has_interrupt(vcpu); |
72 | |
73 | return 0; |
74 | } |
75 | |
76 | static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) |
77 | { |
78 | return static_branch_unlikely(&kvm_xen_enabled.key) && |
79 | vcpu->arch.xen.evtchn_pending_sel; |
80 | } |
81 | |
82 | static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) |
83 | { |
84 | return !!vcpu->arch.xen.timer_virq; |
85 | } |
86 | |
87 | static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) |
88 | { |
89 | if (kvm_xen_hypercall_enabled(kvm: vcpu->kvm) && kvm_xen_timer_enabled(vcpu)) |
90 | return atomic_read(v: &vcpu->arch.xen.timer_pending); |
91 | |
92 | return 0; |
93 | } |
94 | |
95 | void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu); |
96 | #else |
97 | static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) |
98 | { |
99 | return 1; |
100 | } |
101 | |
102 | static inline void kvm_xen_init_vm(struct kvm *kvm) |
103 | { |
104 | } |
105 | |
106 | static inline void kvm_xen_destroy_vm(struct kvm *kvm) |
107 | { |
108 | } |
109 | |
110 | static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) |
111 | { |
112 | } |
113 | |
114 | static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) |
115 | { |
116 | } |
117 | |
118 | static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) |
119 | { |
120 | } |
121 | |
122 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) |
123 | { |
124 | return false; |
125 | } |
126 | |
127 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) |
128 | { |
129 | return false; |
130 | } |
131 | |
132 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) |
133 | { |
134 | return 0; |
135 | } |
136 | |
137 | static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu) |
138 | { |
139 | } |
140 | |
141 | static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) |
142 | { |
143 | return false; |
144 | } |
145 | |
146 | static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) |
147 | { |
148 | return 0; |
149 | } |
150 | |
151 | static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) |
152 | { |
153 | } |
154 | |
155 | static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) |
156 | { |
157 | return false; |
158 | } |
159 | |
160 | static inline void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu) |
161 | { |
162 | } |
163 | #endif |
164 | |
165 | int kvm_xen_hypercall(struct kvm_vcpu *vcpu); |
166 | |
167 | #include <asm/pvclock-abi.h> |
168 | #include <asm/xen/interface.h> |
169 | #include <xen/interface/vcpu.h> |
170 | |
171 | void kvm_xen_update_runstate(struct kvm_vcpu *vcpu, int state); |
172 | |
173 | static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) |
174 | { |
175 | kvm_xen_update_runstate(vcpu, RUNSTATE_running); |
176 | } |
177 | |
178 | static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) |
179 | { |
180 | /* |
181 | * If the vCPU wasn't preempted but took a normal exit for |
182 | * some reason (hypercalls, I/O, etc.), that is accounted as |
183 | * still RUNSTATE_running, as the VMM is still operating on |
184 | * behalf of the vCPU. Only if the VMM does actually block |
185 | * does it need to enter RUNSTATE_blocked. |
186 | */ |
187 | if (WARN_ON_ONCE(!vcpu->preempted)) |
188 | return; |
189 | |
190 | kvm_xen_update_runstate(vcpu, RUNSTATE_runnable); |
191 | } |
192 | |
193 | /* 32-bit compatibility definitions, also used natively in 32-bit build */ |
194 | struct compat_arch_vcpu_info { |
195 | unsigned int cr2; |
196 | unsigned int pad[5]; |
197 | }; |
198 | |
199 | struct compat_vcpu_info { |
200 | uint8_t evtchn_upcall_pending; |
201 | uint8_t evtchn_upcall_mask; |
202 | uint16_t pad; |
203 | uint32_t evtchn_pending_sel; |
204 | struct compat_arch_vcpu_info arch; |
205 | struct pvclock_vcpu_time_info time; |
206 | }; /* 64 bytes (x86) */ |
207 | |
208 | struct compat_arch_shared_info { |
209 | unsigned int max_pfn; |
210 | unsigned int pfn_to_mfn_frame_list_list; |
211 | unsigned int nmi_reason; |
212 | unsigned int p2m_cr3; |
213 | unsigned int p2m_vaddr; |
214 | unsigned int p2m_generation; |
215 | uint32_t wc_sec_hi; |
216 | }; |
217 | |
218 | struct compat_shared_info { |
219 | struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; |
220 | uint32_t evtchn_pending[32]; |
221 | uint32_t evtchn_mask[32]; |
222 | struct pvclock_wall_clock wc; |
223 | struct compat_arch_shared_info arch; |
224 | }; |
225 | |
226 | #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \ |
227 | sizeof_field(struct compat_shared_info, \ |
228 | evtchn_pending)) |
229 | struct compat_vcpu_runstate_info { |
230 | int state; |
231 | uint64_t state_entry_time; |
232 | uint64_t time[4]; |
233 | } __attribute__((packed)); |
234 | |
235 | struct compat_sched_poll { |
236 | /* This is actually a guest virtual address which points to ports. */ |
237 | uint32_t ports; |
238 | unsigned int nr_ports; |
239 | uint64_t timeout; |
240 | }; |
241 | |
242 | #endif /* __ARCH_X86_KVM_XEN_H__ */ |
243 | |