1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | /* |
4 | * Local APIC virtualization |
5 | * |
6 | * Copyright (C) 2006 Qumranet, Inc. |
7 | * Copyright (C) 2007 Novell |
8 | * Copyright (C) 2007 Intel |
9 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
10 | * |
11 | * Authors: |
12 | * Dor Laor <dor.laor@qumranet.com> |
13 | * Gregory Haskins <ghaskins@novell.com> |
14 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> |
15 | * |
16 | * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. |
17 | */ |
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | |
20 | #include <linux/kvm_host.h> |
21 | #include <linux/kvm.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/highmem.h> |
24 | #include <linux/smp.h> |
25 | #include <linux/hrtimer.h> |
26 | #include <linux/io.h> |
27 | #include <linux/export.h> |
28 | #include <linux/math64.h> |
29 | #include <linux/slab.h> |
30 | #include <asm/processor.h> |
31 | #include <asm/mce.h> |
32 | #include <asm/msr.h> |
33 | #include <asm/page.h> |
34 | #include <asm/current.h> |
35 | #include <asm/apicdef.h> |
36 | #include <asm/delay.h> |
37 | #include <linux/atomic.h> |
38 | #include <linux/jump_label.h> |
39 | #include "kvm_cache_regs.h" |
40 | #include "irq.h" |
41 | #include "ioapic.h" |
42 | #include "trace.h" |
43 | #include "x86.h" |
44 | #include "xen.h" |
45 | #include "cpuid.h" |
46 | #include "hyperv.h" |
47 | #include "smm.h" |
48 | |
49 | #ifndef CONFIG_X86_64 |
50 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) |
51 | #else |
52 | #define mod_64(x, y) ((x) % (y)) |
53 | #endif |
54 | |
55 | /* 14 is the version for Xeon and Pentium 8.4.8*/ |
56 | #define APIC_VERSION 0x14UL |
57 | #define LAPIC_MMIO_LENGTH (1 << 12) |
58 | /* followed define is not in apicdef.h */ |
59 | #define MAX_APIC_VECTOR 256 |
60 | #define APIC_VECTORS_PER_REG 32 |
61 | |
62 | static bool lapic_timer_advance_dynamic __read_mostly; |
63 | #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */ |
64 | #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */ |
65 | #define LAPIC_TIMER_ADVANCE_NS_INIT 1000 |
66 | #define LAPIC_TIMER_ADVANCE_NS_MAX 5000 |
67 | /* step-by-step approximation to mitigate fluctuation */ |
68 | #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 |
69 | static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data); |
70 | static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data); |
71 | |
72 | static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val) |
73 | { |
74 | *((u32 *) (regs + reg_off)) = val; |
75 | } |
76 | |
77 | static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) |
78 | { |
79 | __kvm_lapic_set_reg(regs: apic->regs, reg_off, val); |
80 | } |
81 | |
82 | static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg) |
83 | { |
84 | BUILD_BUG_ON(reg != APIC_ICR); |
85 | return *((u64 *) (regs + reg)); |
86 | } |
87 | |
88 | static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg) |
89 | { |
90 | return __kvm_lapic_get_reg64(regs: apic->regs, reg); |
91 | } |
92 | |
93 | static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val) |
94 | { |
95 | BUILD_BUG_ON(reg != APIC_ICR); |
96 | *((u64 *) (regs + reg)) = val; |
97 | } |
98 | |
99 | static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic, |
100 | int reg, u64 val) |
101 | { |
102 | __kvm_lapic_set_reg64(regs: apic->regs, reg, val); |
103 | } |
104 | |
105 | static inline int apic_test_vector(int vec, void *bitmap) |
106 | { |
107 | return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); |
108 | } |
109 | |
110 | bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) |
111 | { |
112 | struct kvm_lapic *apic = vcpu->arch.apic; |
113 | |
114 | return apic_test_vector(vec: vector, bitmap: apic->regs + APIC_ISR) || |
115 | apic_test_vector(vec: vector, bitmap: apic->regs + APIC_IRR); |
116 | } |
117 | |
118 | static inline int __apic_test_and_set_vector(int vec, void *bitmap) |
119 | { |
120 | return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); |
121 | } |
122 | |
123 | static inline int __apic_test_and_clear_vector(int vec, void *bitmap) |
124 | { |
125 | return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); |
126 | } |
127 | |
128 | __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); |
129 | EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); |
130 | |
131 | __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); |
132 | __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); |
133 | |
134 | static inline int apic_enabled(struct kvm_lapic *apic) |
135 | { |
136 | return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); |
137 | } |
138 | |
139 | #define LVT_MASK \ |
140 | (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) |
141 | |
142 | #define LINT_MASK \ |
143 | (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ |
144 | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) |
145 | |
146 | static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) |
147 | { |
148 | return apic->vcpu->vcpu_id; |
149 | } |
150 | |
151 | static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu) |
152 | { |
153 | return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) && |
154 | (kvm_mwait_in_guest(kvm: vcpu->kvm) || kvm_hlt_in_guest(kvm: vcpu->kvm)); |
155 | } |
156 | |
157 | bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) |
158 | { |
159 | return kvm_x86_ops.set_hv_timer |
160 | && !(kvm_mwait_in_guest(kvm: vcpu->kvm) || |
161 | kvm_can_post_timer_interrupt(vcpu)); |
162 | } |
163 | |
164 | static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu) |
165 | { |
166 | return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE; |
167 | } |
168 | |
169 | static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) |
170 | { |
171 | return ((id >> 4) << 16) | (1 << (id & 0xf)); |
172 | } |
173 | |
174 | static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, |
175 | u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) { |
176 | switch (map->logical_mode) { |
177 | case KVM_APIC_MODE_SW_DISABLED: |
178 | /* Arbitrarily use the flat map so that @cluster isn't NULL. */ |
179 | *cluster = map->xapic_flat_map; |
180 | *mask = 0; |
181 | return true; |
182 | case KVM_APIC_MODE_X2APIC: { |
183 | u32 offset = (dest_id >> 16) * 16; |
184 | u32 max_apic_id = map->max_apic_id; |
185 | |
186 | if (offset <= max_apic_id) { |
187 | u8 cluster_size = min(max_apic_id - offset + 1, 16U); |
188 | |
189 | offset = array_index_nospec(offset, map->max_apic_id + 1); |
190 | *cluster = &map->phys_map[offset]; |
191 | *mask = dest_id & (0xffff >> (16 - cluster_size)); |
192 | } else { |
193 | *mask = 0; |
194 | } |
195 | |
196 | return true; |
197 | } |
198 | case KVM_APIC_MODE_XAPIC_FLAT: |
199 | *cluster = map->xapic_flat_map; |
200 | *mask = dest_id & 0xff; |
201 | return true; |
202 | case KVM_APIC_MODE_XAPIC_CLUSTER: |
203 | *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; |
204 | *mask = dest_id & 0xf; |
205 | return true; |
206 | case KVM_APIC_MODE_MAP_DISABLED: |
207 | return false; |
208 | default: |
209 | WARN_ON_ONCE(1); |
210 | return false; |
211 | } |
212 | } |
213 | |
214 | static void kvm_apic_map_free(struct rcu_head *rcu) |
215 | { |
216 | struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu); |
217 | |
218 | kvfree(addr: map); |
219 | } |
220 | |
221 | static int kvm_recalculate_phys_map(struct kvm_apic_map *new, |
222 | struct kvm_vcpu *vcpu, |
223 | bool *xapic_id_mismatch) |
224 | { |
225 | struct kvm_lapic *apic = vcpu->arch.apic; |
226 | u32 x2apic_id = kvm_x2apic_id(apic); |
227 | u32 xapic_id = kvm_xapic_id(apic); |
228 | u32 physical_id; |
229 | |
230 | /* |
231 | * For simplicity, KVM always allocates enough space for all possible |
232 | * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on |
233 | * without the optimized map. |
234 | */ |
235 | if (WARN_ON_ONCE(xapic_id > new->max_apic_id)) |
236 | return -EINVAL; |
237 | |
238 | /* |
239 | * Bail if a vCPU was added and/or enabled its APIC between allocating |
240 | * the map and doing the actual calculations for the map. Note, KVM |
241 | * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if |
242 | * the compiler decides to reload x2apic_id after this check. |
243 | */ |
244 | if (x2apic_id > new->max_apic_id) |
245 | return -E2BIG; |
246 | |
247 | /* |
248 | * Deliberately truncate the vCPU ID when detecting a mismatched APIC |
249 | * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a |
250 | * 32-bit value. Any unwanted aliasing due to truncation results will |
251 | * be detected below. |
252 | */ |
253 | if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id) |
254 | *xapic_id_mismatch = true; |
255 | |
256 | /* |
257 | * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs. |
258 | * Allow sending events to vCPUs by their x2APIC ID even if the target |
259 | * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs |
260 | * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap |
261 | * and collide). |
262 | * |
263 | * Honor the architectural (and KVM's non-optimized) behavior if |
264 | * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed |
265 | * to process messages independently. If multiple vCPUs have the same |
266 | * effective APIC ID, e.g. due to the x2APIC wrap or because the guest |
267 | * manually modified its xAPIC IDs, events targeting that ID are |
268 | * supposed to be recognized by all vCPUs with said ID. |
269 | */ |
270 | if (vcpu->kvm->arch.x2apic_format) { |
271 | /* See also kvm_apic_match_physical_addr(). */ |
272 | if (apic_x2apic_mode(apic) || x2apic_id > 0xff) |
273 | new->phys_map[x2apic_id] = apic; |
274 | |
275 | if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) |
276 | new->phys_map[xapic_id] = apic; |
277 | } else { |
278 | /* |
279 | * Disable the optimized map if the physical APIC ID is already |
280 | * mapped, i.e. is aliased to multiple vCPUs. The optimized |
281 | * map requires a strict 1:1 mapping between IDs and vCPUs. |
282 | */ |
283 | if (apic_x2apic_mode(apic)) |
284 | physical_id = x2apic_id; |
285 | else |
286 | physical_id = xapic_id; |
287 | |
288 | if (new->phys_map[physical_id]) |
289 | return -EINVAL; |
290 | |
291 | new->phys_map[physical_id] = apic; |
292 | } |
293 | |
294 | return 0; |
295 | } |
296 | |
297 | static void kvm_recalculate_logical_map(struct kvm_apic_map *new, |
298 | struct kvm_vcpu *vcpu) |
299 | { |
300 | struct kvm_lapic *apic = vcpu->arch.apic; |
301 | enum kvm_apic_logical_mode logical_mode; |
302 | struct kvm_lapic **cluster; |
303 | u16 mask; |
304 | u32 ldr; |
305 | |
306 | if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED) |
307 | return; |
308 | |
309 | if (!kvm_apic_sw_enabled(apic)) |
310 | return; |
311 | |
312 | ldr = kvm_lapic_get_reg(apic, APIC_LDR); |
313 | if (!ldr) |
314 | return; |
315 | |
316 | if (apic_x2apic_mode(apic)) { |
317 | logical_mode = KVM_APIC_MODE_X2APIC; |
318 | } else { |
319 | ldr = GET_APIC_LOGICAL_ID(ldr); |
320 | if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) |
321 | logical_mode = KVM_APIC_MODE_XAPIC_FLAT; |
322 | else |
323 | logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER; |
324 | } |
325 | |
326 | /* |
327 | * To optimize logical mode delivery, all software-enabled APICs must |
328 | * be configured for the same mode. |
329 | */ |
330 | if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) { |
331 | new->logical_mode = logical_mode; |
332 | } else if (new->logical_mode != logical_mode) { |
333 | new->logical_mode = KVM_APIC_MODE_MAP_DISABLED; |
334 | return; |
335 | } |
336 | |
337 | /* |
338 | * In x2APIC mode, the LDR is read-only and derived directly from the |
339 | * x2APIC ID, thus is guaranteed to be addressable. KVM reuses |
340 | * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by |
341 | * reversing the LDR calculation to get cluster of APICs, i.e. no |
342 | * additional work is required. |
343 | */ |
344 | if (apic_x2apic_mode(apic)) { |
345 | WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic))); |
346 | return; |
347 | } |
348 | |
349 | if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr, |
350 | &cluster, &mask))) { |
351 | new->logical_mode = KVM_APIC_MODE_MAP_DISABLED; |
352 | return; |
353 | } |
354 | |
355 | if (!mask) |
356 | return; |
357 | |
358 | ldr = ffs(mask) - 1; |
359 | if (!is_power_of_2(n: mask) || cluster[ldr]) |
360 | new->logical_mode = KVM_APIC_MODE_MAP_DISABLED; |
361 | else |
362 | cluster[ldr] = apic; |
363 | } |
364 | |
365 | /* |
366 | * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock. |
367 | * |
368 | * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with |
369 | * apic_map_lock_held. |
370 | */ |
371 | enum { |
372 | CLEAN, |
373 | UPDATE_IN_PROGRESS, |
374 | DIRTY |
375 | }; |
376 | |
377 | void kvm_recalculate_apic_map(struct kvm *kvm) |
378 | { |
379 | struct kvm_apic_map *new, *old = NULL; |
380 | struct kvm_vcpu *vcpu; |
381 | unsigned long i; |
382 | u32 max_id = 255; /* enough space for any xAPIC ID */ |
383 | bool xapic_id_mismatch; |
384 | int r; |
385 | |
386 | /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ |
387 | if (atomic_read_acquire(v: &kvm->arch.apic_map_dirty) == CLEAN) |
388 | return; |
389 | |
390 | WARN_ONCE(!irqchip_in_kernel(kvm), |
391 | "Dirty APIC map without an in-kernel local APIC" ); |
392 | |
393 | mutex_lock(&kvm->arch.apic_map_lock); |
394 | |
395 | retry: |
396 | /* |
397 | * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean) |
398 | * or the APIC registers (if dirty). Note, on retry the map may have |
399 | * not yet been marked dirty by whatever task changed a vCPU's x2APIC |
400 | * ID, i.e. the map may still show up as in-progress. In that case |
401 | * this task still needs to retry and complete its calculation. |
402 | */ |
403 | if (atomic_cmpxchg_acquire(v: &kvm->arch.apic_map_dirty, |
404 | old: DIRTY, new: UPDATE_IN_PROGRESS) == CLEAN) { |
405 | /* Someone else has updated the map. */ |
406 | mutex_unlock(lock: &kvm->arch.apic_map_lock); |
407 | return; |
408 | } |
409 | |
410 | /* |
411 | * Reset the mismatch flag between attempts so that KVM does the right |
412 | * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e. |
413 | * keep max_id strictly increasing. Disallowing max_id from shrinking |
414 | * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU |
415 | * with the highest x2APIC ID is toggling its APIC on and off. |
416 | */ |
417 | xapic_id_mismatch = false; |
418 | |
419 | kvm_for_each_vcpu(i, vcpu, kvm) |
420 | if (kvm_apic_present(vcpu)) |
421 | max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); |
422 | |
423 | new = kvzalloc(size: sizeof(struct kvm_apic_map) + |
424 | sizeof(struct kvm_lapic *) * ((u64)max_id + 1), |
425 | GFP_KERNEL_ACCOUNT); |
426 | |
427 | if (!new) |
428 | goto out; |
429 | |
430 | new->max_apic_id = max_id; |
431 | new->logical_mode = KVM_APIC_MODE_SW_DISABLED; |
432 | |
433 | kvm_for_each_vcpu(i, vcpu, kvm) { |
434 | if (!kvm_apic_present(vcpu)) |
435 | continue; |
436 | |
437 | r = kvm_recalculate_phys_map(new, vcpu, xapic_id_mismatch: &xapic_id_mismatch); |
438 | if (r) { |
439 | kvfree(addr: new); |
440 | new = NULL; |
441 | if (r == -E2BIG) { |
442 | cond_resched(); |
443 | goto retry; |
444 | } |
445 | |
446 | goto out; |
447 | } |
448 | |
449 | kvm_recalculate_logical_map(new, vcpu); |
450 | } |
451 | out: |
452 | /* |
453 | * The optimized map is effectively KVM's internal version of APICv, |
454 | * and all unwanted aliasing that results in disabling the optimized |
455 | * map also applies to APICv. |
456 | */ |
457 | if (!new) |
458 | kvm_set_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED); |
459 | else |
460 | kvm_clear_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED); |
461 | |
462 | if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED) |
463 | kvm_set_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED); |
464 | else |
465 | kvm_clear_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED); |
466 | |
467 | if (xapic_id_mismatch) |
468 | kvm_set_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_APIC_ID_MODIFIED); |
469 | else |
470 | kvm_clear_apicv_inhibit(kvm, reason: APICV_INHIBIT_REASON_APIC_ID_MODIFIED); |
471 | |
472 | old = rcu_dereference_protected(kvm->arch.apic_map, |
473 | lockdep_is_held(&kvm->arch.apic_map_lock)); |
474 | rcu_assign_pointer(kvm->arch.apic_map, new); |
475 | /* |
476 | * Write kvm->arch.apic_map before clearing apic->apic_map_dirty. |
477 | * If another update has come in, leave it DIRTY. |
478 | */ |
479 | atomic_cmpxchg_release(v: &kvm->arch.apic_map_dirty, |
480 | old: UPDATE_IN_PROGRESS, new: CLEAN); |
481 | mutex_unlock(lock: &kvm->arch.apic_map_lock); |
482 | |
483 | if (old) |
484 | call_rcu(head: &old->rcu, func: kvm_apic_map_free); |
485 | |
486 | kvm_make_scan_ioapic_request(kvm); |
487 | } |
488 | |
489 | static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) |
490 | { |
491 | bool enabled = val & APIC_SPIV_APIC_ENABLED; |
492 | |
493 | kvm_lapic_set_reg(apic, APIC_SPIV, val); |
494 | |
495 | if (enabled != apic->sw_enabled) { |
496 | apic->sw_enabled = enabled; |
497 | if (enabled) |
498 | static_branch_slow_dec_deferred(&apic_sw_disabled); |
499 | else |
500 | static_branch_inc(&apic_sw_disabled.key); |
501 | |
502 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
503 | } |
504 | |
505 | /* Check if there are APF page ready requests pending */ |
506 | if (enabled) { |
507 | kvm_make_request(KVM_REQ_APF_READY, vcpu: apic->vcpu); |
508 | kvm_xen_sw_enable_lapic(vcpu: apic->vcpu); |
509 | } |
510 | } |
511 | |
512 | static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) |
513 | { |
514 | kvm_lapic_set_reg(apic, APIC_ID, val: id << 24); |
515 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
516 | } |
517 | |
518 | static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) |
519 | { |
520 | kvm_lapic_set_reg(apic, APIC_LDR, val: id); |
521 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
522 | } |
523 | |
524 | static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val) |
525 | { |
526 | kvm_lapic_set_reg(apic, APIC_DFR, val); |
527 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
528 | } |
529 | |
530 | static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) |
531 | { |
532 | u32 ldr = kvm_apic_calc_x2apic_ldr(id); |
533 | |
534 | WARN_ON_ONCE(id != apic->vcpu->vcpu_id); |
535 | |
536 | kvm_lapic_set_reg(apic, APIC_ID, val: id); |
537 | kvm_lapic_set_reg(apic, APIC_LDR, val: ldr); |
538 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
539 | } |
540 | |
541 | static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) |
542 | { |
543 | return !(kvm_lapic_get_reg(apic, reg_off: lvt_type) & APIC_LVT_MASKED); |
544 | } |
545 | |
546 | static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) |
547 | { |
548 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; |
549 | } |
550 | |
551 | static inline int apic_lvtt_period(struct kvm_lapic *apic) |
552 | { |
553 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; |
554 | } |
555 | |
556 | static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) |
557 | { |
558 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; |
559 | } |
560 | |
561 | static inline int apic_lvt_nmi_mode(u32 lvt_val) |
562 | { |
563 | return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; |
564 | } |
565 | |
566 | static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index) |
567 | { |
568 | return apic->nr_lvt_entries > lvt_index; |
569 | } |
570 | |
571 | static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu) |
572 | { |
573 | return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P); |
574 | } |
575 | |
576 | void kvm_apic_set_version(struct kvm_vcpu *vcpu) |
577 | { |
578 | struct kvm_lapic *apic = vcpu->arch.apic; |
579 | u32 v = 0; |
580 | |
581 | if (!lapic_in_kernel(vcpu)) |
582 | return; |
583 | |
584 | v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16); |
585 | |
586 | /* |
587 | * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation) |
588 | * which doesn't have EOI register; Some buggy OSes (e.g. Windows with |
589 | * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC |
590 | * version first and level-triggered interrupts never get EOIed in |
591 | * IOAPIC. |
592 | */ |
593 | if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) && |
594 | !ioapic_in_kernel(kvm: vcpu->kvm)) |
595 | v |= APIC_LVR_DIRECTED_EOI; |
596 | kvm_lapic_set_reg(apic, APIC_LVR, val: v); |
597 | } |
598 | |
599 | void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu) |
600 | { |
601 | int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu); |
602 | struct kvm_lapic *apic = vcpu->arch.apic; |
603 | int i; |
604 | |
605 | if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries) |
606 | return; |
607 | |
608 | /* Initialize/mask any "new" LVT entries. */ |
609 | for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++) |
610 | kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED); |
611 | |
612 | apic->nr_lvt_entries = nr_lvt_entries; |
613 | |
614 | /* The number of LVT entries is reflected in the version register. */ |
615 | kvm_apic_set_version(vcpu); |
616 | } |
617 | |
618 | static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = { |
619 | [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */ |
620 | [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK, |
621 | [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK, |
622 | [LVT_LINT0] = LINT_MASK, |
623 | [LVT_LINT1] = LINT_MASK, |
624 | [LVT_ERROR] = LVT_MASK, |
625 | [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK |
626 | }; |
627 | |
628 | static int find_highest_vector(void *bitmap) |
629 | { |
630 | int vec; |
631 | u32 *reg; |
632 | |
633 | for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; |
634 | vec >= 0; vec -= APIC_VECTORS_PER_REG) { |
635 | reg = bitmap + REG_POS(vec); |
636 | if (*reg) |
637 | return __fls(word: *reg) + vec; |
638 | } |
639 | |
640 | return -1; |
641 | } |
642 | |
643 | static u8 count_vectors(void *bitmap) |
644 | { |
645 | int vec; |
646 | u32 *reg; |
647 | u8 count = 0; |
648 | |
649 | for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { |
650 | reg = bitmap + REG_POS(vec); |
651 | count += hweight32(*reg); |
652 | } |
653 | |
654 | return count; |
655 | } |
656 | |
657 | bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr) |
658 | { |
659 | u32 i, vec; |
660 | u32 pir_val, irr_val, prev_irr_val; |
661 | int max_updated_irr; |
662 | |
663 | max_updated_irr = -1; |
664 | *max_irr = -1; |
665 | |
666 | for (i = vec = 0; i <= 7; i++, vec += 32) { |
667 | u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10); |
668 | |
669 | irr_val = *p_irr; |
670 | pir_val = READ_ONCE(pir[i]); |
671 | |
672 | if (pir_val) { |
673 | pir_val = xchg(&pir[i], 0); |
674 | |
675 | prev_irr_val = irr_val; |
676 | do { |
677 | irr_val = prev_irr_val | pir_val; |
678 | } while (prev_irr_val != irr_val && |
679 | !try_cmpxchg(p_irr, &prev_irr_val, irr_val)); |
680 | |
681 | if (prev_irr_val != irr_val) |
682 | max_updated_irr = __fls(word: irr_val ^ prev_irr_val) + vec; |
683 | } |
684 | if (irr_val) |
685 | *max_irr = __fls(word: irr_val) + vec; |
686 | } |
687 | |
688 | return ((max_updated_irr != -1) && |
689 | (max_updated_irr == *max_irr)); |
690 | } |
691 | EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); |
692 | |
693 | bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) |
694 | { |
695 | struct kvm_lapic *apic = vcpu->arch.apic; |
696 | bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr); |
697 | |
698 | if (unlikely(!apic->apicv_active && irr_updated)) |
699 | apic->irr_pending = true; |
700 | return irr_updated; |
701 | } |
702 | EXPORT_SYMBOL_GPL(kvm_apic_update_irr); |
703 | |
704 | static inline int apic_search_irr(struct kvm_lapic *apic) |
705 | { |
706 | return find_highest_vector(bitmap: apic->regs + APIC_IRR); |
707 | } |
708 | |
709 | static inline int apic_find_highest_irr(struct kvm_lapic *apic) |
710 | { |
711 | int result; |
712 | |
713 | /* |
714 | * Note that irr_pending is just a hint. It will be always |
715 | * true with virtual interrupt delivery enabled. |
716 | */ |
717 | if (!apic->irr_pending) |
718 | return -1; |
719 | |
720 | result = apic_search_irr(apic); |
721 | ASSERT(result == -1 || result >= 16); |
722 | |
723 | return result; |
724 | } |
725 | |
726 | static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) |
727 | { |
728 | if (unlikely(apic->apicv_active)) { |
729 | /* need to update RVI */ |
730 | kvm_lapic_clear_vector(vec, bitmap: apic->regs + APIC_IRR); |
731 | static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu, |
732 | apic_find_highest_irr(apic)); |
733 | } else { |
734 | apic->irr_pending = false; |
735 | kvm_lapic_clear_vector(vec, bitmap: apic->regs + APIC_IRR); |
736 | if (apic_search_irr(apic) != -1) |
737 | apic->irr_pending = true; |
738 | } |
739 | } |
740 | |
741 | void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec) |
742 | { |
743 | apic_clear_irr(vec, apic: vcpu->arch.apic); |
744 | } |
745 | EXPORT_SYMBOL_GPL(kvm_apic_clear_irr); |
746 | |
747 | static inline void apic_set_isr(int vec, struct kvm_lapic *apic) |
748 | { |
749 | if (__apic_test_and_set_vector(vec, bitmap: apic->regs + APIC_ISR)) |
750 | return; |
751 | |
752 | /* |
753 | * With APIC virtualization enabled, all caching is disabled |
754 | * because the processor can modify ISR under the hood. Instead |
755 | * just set SVI. |
756 | */ |
757 | if (unlikely(apic->apicv_active)) |
758 | static_call_cond(kvm_x86_hwapic_isr_update)(vec); |
759 | else { |
760 | ++apic->isr_count; |
761 | BUG_ON(apic->isr_count > MAX_APIC_VECTOR); |
762 | /* |
763 | * ISR (in service register) bit is set when injecting an interrupt. |
764 | * The highest vector is injected. Thus the latest bit set matches |
765 | * the highest bit in ISR. |
766 | */ |
767 | apic->highest_isr_cache = vec; |
768 | } |
769 | } |
770 | |
771 | static inline int apic_find_highest_isr(struct kvm_lapic *apic) |
772 | { |
773 | int result; |
774 | |
775 | /* |
776 | * Note that isr_count is always 1, and highest_isr_cache |
777 | * is always -1, with APIC virtualization enabled. |
778 | */ |
779 | if (!apic->isr_count) |
780 | return -1; |
781 | if (likely(apic->highest_isr_cache != -1)) |
782 | return apic->highest_isr_cache; |
783 | |
784 | result = find_highest_vector(bitmap: apic->regs + APIC_ISR); |
785 | ASSERT(result == -1 || result >= 16); |
786 | |
787 | return result; |
788 | } |
789 | |
790 | static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) |
791 | { |
792 | if (!__apic_test_and_clear_vector(vec, bitmap: apic->regs + APIC_ISR)) |
793 | return; |
794 | |
795 | /* |
796 | * We do get here for APIC virtualization enabled if the guest |
797 | * uses the Hyper-V APIC enlightenment. In this case we may need |
798 | * to trigger a new interrupt delivery by writing the SVI field; |
799 | * on the other hand isr_count and highest_isr_cache are unused |
800 | * and must be left alone. |
801 | */ |
802 | if (unlikely(apic->apicv_active)) |
803 | static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); |
804 | else { |
805 | --apic->isr_count; |
806 | BUG_ON(apic->isr_count < 0); |
807 | apic->highest_isr_cache = -1; |
808 | } |
809 | } |
810 | |
811 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) |
812 | { |
813 | /* This may race with setting of irr in __apic_accept_irq() and |
814 | * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq |
815 | * will cause vmexit immediately and the value will be recalculated |
816 | * on the next vmentry. |
817 | */ |
818 | return apic_find_highest_irr(apic: vcpu->arch.apic); |
819 | } |
820 | EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); |
821 | |
822 | static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, |
823 | int vector, int level, int trig_mode, |
824 | struct dest_map *dest_map); |
825 | |
826 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, |
827 | struct dest_map *dest_map) |
828 | { |
829 | struct kvm_lapic *apic = vcpu->arch.apic; |
830 | |
831 | return __apic_accept_irq(apic, delivery_mode: irq->delivery_mode, vector: irq->vector, |
832 | level: irq->level, trig_mode: irq->trig_mode, dest_map); |
833 | } |
834 | |
835 | static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, |
836 | struct kvm_lapic_irq *irq, u32 min) |
837 | { |
838 | int i, count = 0; |
839 | struct kvm_vcpu *vcpu; |
840 | |
841 | if (min > map->max_apic_id) |
842 | return 0; |
843 | |
844 | for_each_set_bit(i, ipi_bitmap, |
845 | min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { |
846 | if (map->phys_map[min + i]) { |
847 | vcpu = map->phys_map[min + i]->vcpu; |
848 | count += kvm_apic_set_irq(vcpu, irq, NULL); |
849 | } |
850 | } |
851 | |
852 | return count; |
853 | } |
854 | |
855 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, |
856 | unsigned long ipi_bitmap_high, u32 min, |
857 | unsigned long icr, int op_64_bit) |
858 | { |
859 | struct kvm_apic_map *map; |
860 | struct kvm_lapic_irq irq = {0}; |
861 | int cluster_size = op_64_bit ? 64 : 32; |
862 | int count; |
863 | |
864 | if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK)) |
865 | return -KVM_EINVAL; |
866 | |
867 | irq.vector = icr & APIC_VECTOR_MASK; |
868 | irq.delivery_mode = icr & APIC_MODE_MASK; |
869 | irq.level = (icr & APIC_INT_ASSERT) != 0; |
870 | irq.trig_mode = icr & APIC_INT_LEVELTRIG; |
871 | |
872 | rcu_read_lock(); |
873 | map = rcu_dereference(kvm->arch.apic_map); |
874 | |
875 | count = -EOPNOTSUPP; |
876 | if (likely(map)) { |
877 | count = __pv_send_ipi(ipi_bitmap: &ipi_bitmap_low, map, irq: &irq, min); |
878 | min += cluster_size; |
879 | count += __pv_send_ipi(ipi_bitmap: &ipi_bitmap_high, map, irq: &irq, min); |
880 | } |
881 | |
882 | rcu_read_unlock(); |
883 | return count; |
884 | } |
885 | |
886 | static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) |
887 | { |
888 | |
889 | return kvm_write_guest_cached(kvm: vcpu->kvm, ghc: &vcpu->arch.pv_eoi.data, data: &val, |
890 | len: sizeof(val)); |
891 | } |
892 | |
893 | static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) |
894 | { |
895 | |
896 | return kvm_read_guest_cached(kvm: vcpu->kvm, ghc: &vcpu->arch.pv_eoi.data, data: val, |
897 | len: sizeof(*val)); |
898 | } |
899 | |
900 | static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) |
901 | { |
902 | return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; |
903 | } |
904 | |
905 | static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) |
906 | { |
907 | if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) |
908 | return; |
909 | |
910 | __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); |
911 | } |
912 | |
913 | static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu) |
914 | { |
915 | u8 val; |
916 | |
917 | if (pv_eoi_get_user(vcpu, val: &val) < 0) |
918 | return false; |
919 | |
920 | val &= KVM_PV_EOI_ENABLED; |
921 | |
922 | if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) |
923 | return false; |
924 | |
925 | /* |
926 | * Clear pending bit in any case: it will be set again on vmentry. |
927 | * While this might not be ideal from performance point of view, |
928 | * this makes sure pv eoi is only enabled when we know it's safe. |
929 | */ |
930 | __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); |
931 | |
932 | return val; |
933 | } |
934 | |
935 | static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) |
936 | { |
937 | int highest_irr; |
938 | if (kvm_x86_ops.sync_pir_to_irr) |
939 | highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); |
940 | else |
941 | highest_irr = apic_find_highest_irr(apic); |
942 | if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) |
943 | return -1; |
944 | return highest_irr; |
945 | } |
946 | |
947 | static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr) |
948 | { |
949 | u32 tpr, isrv, ppr, old_ppr; |
950 | int isr; |
951 | |
952 | old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI); |
953 | tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI); |
954 | isr = apic_find_highest_isr(apic); |
955 | isrv = (isr != -1) ? isr : 0; |
956 | |
957 | if ((tpr & 0xf0) >= (isrv & 0xf0)) |
958 | ppr = tpr & 0xff; |
959 | else |
960 | ppr = isrv & 0xf0; |
961 | |
962 | *new_ppr = ppr; |
963 | if (old_ppr != ppr) |
964 | kvm_lapic_set_reg(apic, APIC_PROCPRI, val: ppr); |
965 | |
966 | return ppr < old_ppr; |
967 | } |
968 | |
969 | static void apic_update_ppr(struct kvm_lapic *apic) |
970 | { |
971 | u32 ppr; |
972 | |
973 | if (__apic_update_ppr(apic, new_ppr: &ppr) && |
974 | apic_has_interrupt_for_ppr(apic, ppr) != -1) |
975 | kvm_make_request(KVM_REQ_EVENT, vcpu: apic->vcpu); |
976 | } |
977 | |
978 | void kvm_apic_update_ppr(struct kvm_vcpu *vcpu) |
979 | { |
980 | apic_update_ppr(apic: vcpu->arch.apic); |
981 | } |
982 | EXPORT_SYMBOL_GPL(kvm_apic_update_ppr); |
983 | |
984 | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) |
985 | { |
986 | kvm_lapic_set_reg(apic, APIC_TASKPRI, val: tpr); |
987 | apic_update_ppr(apic); |
988 | } |
989 | |
990 | static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) |
991 | { |
992 | return mda == (apic_x2apic_mode(apic) ? |
993 | X2APIC_BROADCAST : APIC_BROADCAST); |
994 | } |
995 | |
996 | static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) |
997 | { |
998 | if (kvm_apic_broadcast(apic, mda)) |
999 | return true; |
1000 | |
1001 | /* |
1002 | * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they |
1003 | * were in x2APIC mode if the target APIC ID can't be encoded as an |
1004 | * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which |
1005 | * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC |
1006 | * mode. Match the x2APIC ID if and only if the target APIC ID can't |
1007 | * be encoded in xAPIC to avoid spurious matches against a vCPU that |
1008 | * changed its (addressable) xAPIC ID (which is writable). |
1009 | */ |
1010 | if (apic_x2apic_mode(apic) || mda > 0xff) |
1011 | return mda == kvm_x2apic_id(apic); |
1012 | |
1013 | return mda == kvm_xapic_id(apic); |
1014 | } |
1015 | |
1016 | static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) |
1017 | { |
1018 | u32 logical_id; |
1019 | |
1020 | if (kvm_apic_broadcast(apic, mda)) |
1021 | return true; |
1022 | |
1023 | logical_id = kvm_lapic_get_reg(apic, APIC_LDR); |
1024 | |
1025 | if (apic_x2apic_mode(apic)) |
1026 | return ((logical_id >> 16) == (mda >> 16)) |
1027 | && (logical_id & mda & 0xffff) != 0; |
1028 | |
1029 | logical_id = GET_APIC_LOGICAL_ID(logical_id); |
1030 | |
1031 | switch (kvm_lapic_get_reg(apic, APIC_DFR)) { |
1032 | case APIC_DFR_FLAT: |
1033 | return (logical_id & mda) != 0; |
1034 | case APIC_DFR_CLUSTER: |
1035 | return ((logical_id >> 4) == (mda >> 4)) |
1036 | && (logical_id & mda & 0xf) != 0; |
1037 | default: |
1038 | return false; |
1039 | } |
1040 | } |
1041 | |
1042 | /* The KVM local APIC implementation has two quirks: |
1043 | * |
1044 | * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs |
1045 | * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID. |
1046 | * KVM doesn't do that aliasing. |
1047 | * |
1048 | * - in-kernel IOAPIC messages have to be delivered directly to |
1049 | * x2APIC, because the kernel does not support interrupt remapping. |
1050 | * In order to support broadcast without interrupt remapping, x2APIC |
1051 | * rewrites the destination of non-IPI messages from APIC_BROADCAST |
1052 | * to X2APIC_BROADCAST. |
1053 | * |
1054 | * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is |
1055 | * important when userspace wants to use x2APIC-format MSIs, because |
1056 | * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7". |
1057 | */ |
1058 | static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id, |
1059 | struct kvm_lapic *source, struct kvm_lapic *target) |
1060 | { |
1061 | bool ipi = source != NULL; |
1062 | |
1063 | if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled && |
1064 | !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(apic: target)) |
1065 | return X2APIC_BROADCAST; |
1066 | |
1067 | return dest_id; |
1068 | } |
1069 | |
1070 | bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
1071 | int shorthand, unsigned int dest, int dest_mode) |
1072 | { |
1073 | struct kvm_lapic *target = vcpu->arch.apic; |
1074 | u32 mda = kvm_apic_mda(vcpu, dest_id: dest, source, target); |
1075 | |
1076 | ASSERT(target); |
1077 | switch (shorthand) { |
1078 | case APIC_DEST_NOSHORT: |
1079 | if (dest_mode == APIC_DEST_PHYSICAL) |
1080 | return kvm_apic_match_physical_addr(apic: target, mda); |
1081 | else |
1082 | return kvm_apic_match_logical_addr(apic: target, mda); |
1083 | case APIC_DEST_SELF: |
1084 | return target == source; |
1085 | case APIC_DEST_ALLINC: |
1086 | return true; |
1087 | case APIC_DEST_ALLBUT: |
1088 | return target != source; |
1089 | default: |
1090 | return false; |
1091 | } |
1092 | } |
1093 | EXPORT_SYMBOL_GPL(kvm_apic_match_dest); |
1094 | |
1095 | int kvm_vector_to_index(u32 vector, u32 dest_vcpus, |
1096 | const unsigned long *bitmap, u32 bitmap_size) |
1097 | { |
1098 | u32 mod; |
1099 | int i, idx = -1; |
1100 | |
1101 | mod = vector % dest_vcpus; |
1102 | |
1103 | for (i = 0; i <= mod; i++) { |
1104 | idx = find_next_bit(addr: bitmap, size: bitmap_size, offset: idx + 1); |
1105 | BUG_ON(idx == bitmap_size); |
1106 | } |
1107 | |
1108 | return idx; |
1109 | } |
1110 | |
1111 | static void kvm_apic_disabled_lapic_found(struct kvm *kvm) |
1112 | { |
1113 | if (!kvm->arch.disabled_lapic_found) { |
1114 | kvm->arch.disabled_lapic_found = true; |
1115 | pr_info("Disabled LAPIC found during irq injection\n" ); |
1116 | } |
1117 | } |
1118 | |
1119 | static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src, |
1120 | struct kvm_lapic_irq *irq, struct kvm_apic_map *map) |
1121 | { |
1122 | if (kvm->arch.x2apic_broadcast_quirk_disabled) { |
1123 | if ((irq->dest_id == APIC_BROADCAST && |
1124 | map->logical_mode != KVM_APIC_MODE_X2APIC)) |
1125 | return true; |
1126 | if (irq->dest_id == X2APIC_BROADCAST) |
1127 | return true; |
1128 | } else { |
1129 | bool x2apic_ipi = src && *src && apic_x2apic_mode(apic: *src); |
1130 | if (irq->dest_id == (x2apic_ipi ? |
1131 | X2APIC_BROADCAST : APIC_BROADCAST)) |
1132 | return true; |
1133 | } |
1134 | |
1135 | return false; |
1136 | } |
1137 | |
1138 | /* Return true if the interrupt can be handled by using *bitmap as index mask |
1139 | * for valid destinations in *dst array. |
1140 | * Return false if kvm_apic_map_get_dest_lapic did nothing useful. |
1141 | * Note: we may have zero kvm_lapic destinations when we return true, which |
1142 | * means that the interrupt should be dropped. In this case, *bitmap would be |
1143 | * zero and *dst undefined. |
1144 | */ |
1145 | static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, |
1146 | struct kvm_lapic **src, struct kvm_lapic_irq *irq, |
1147 | struct kvm_apic_map *map, struct kvm_lapic ***dst, |
1148 | unsigned long *bitmap) |
1149 | { |
1150 | int i, lowest; |
1151 | |
1152 | if (irq->shorthand == APIC_DEST_SELF && src) { |
1153 | *dst = src; |
1154 | *bitmap = 1; |
1155 | return true; |
1156 | } else if (irq->shorthand) |
1157 | return false; |
1158 | |
1159 | if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map)) |
1160 | return false; |
1161 | |
1162 | if (irq->dest_mode == APIC_DEST_PHYSICAL) { |
1163 | if (irq->dest_id > map->max_apic_id) { |
1164 | *bitmap = 0; |
1165 | } else { |
1166 | u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1); |
1167 | *dst = &map->phys_map[dest_id]; |
1168 | *bitmap = 1; |
1169 | } |
1170 | return true; |
1171 | } |
1172 | |
1173 | *bitmap = 0; |
1174 | if (!kvm_apic_map_get_logical_dest(map, dest_id: irq->dest_id, cluster: dst, |
1175 | mask: (u16 *)bitmap)) |
1176 | return false; |
1177 | |
1178 | if (!kvm_lowest_prio_delivery(irq)) |
1179 | return true; |
1180 | |
1181 | if (!kvm_vector_hashing_enabled()) { |
1182 | lowest = -1; |
1183 | for_each_set_bit(i, bitmap, 16) { |
1184 | if (!(*dst)[i]) |
1185 | continue; |
1186 | if (lowest < 0) |
1187 | lowest = i; |
1188 | else if (kvm_apic_compare_prio(vcpu1: (*dst)[i]->vcpu, |
1189 | vcpu2: (*dst)[lowest]->vcpu) < 0) |
1190 | lowest = i; |
1191 | } |
1192 | } else { |
1193 | if (!*bitmap) |
1194 | return true; |
1195 | |
1196 | lowest = kvm_vector_to_index(vector: irq->vector, hweight16(*bitmap), |
1197 | bitmap, bitmap_size: 16); |
1198 | |
1199 | if (!(*dst)[lowest]) { |
1200 | kvm_apic_disabled_lapic_found(kvm); |
1201 | *bitmap = 0; |
1202 | return true; |
1203 | } |
1204 | } |
1205 | |
1206 | *bitmap = (lowest >= 0) ? 1 << lowest : 0; |
1207 | |
1208 | return true; |
1209 | } |
1210 | |
1211 | bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, |
1212 | struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map) |
1213 | { |
1214 | struct kvm_apic_map *map; |
1215 | unsigned long bitmap; |
1216 | struct kvm_lapic **dst = NULL; |
1217 | int i; |
1218 | bool ret; |
1219 | |
1220 | *r = -1; |
1221 | |
1222 | if (irq->shorthand == APIC_DEST_SELF) { |
1223 | if (KVM_BUG_ON(!src, kvm)) { |
1224 | *r = 0; |
1225 | return true; |
1226 | } |
1227 | *r = kvm_apic_set_irq(vcpu: src->vcpu, irq, dest_map); |
1228 | return true; |
1229 | } |
1230 | |
1231 | rcu_read_lock(); |
1232 | map = rcu_dereference(kvm->arch.apic_map); |
1233 | |
1234 | ret = kvm_apic_map_get_dest_lapic(kvm, src: &src, irq, map, dst: &dst, bitmap: &bitmap); |
1235 | if (ret) { |
1236 | *r = 0; |
1237 | for_each_set_bit(i, &bitmap, 16) { |
1238 | if (!dst[i]) |
1239 | continue; |
1240 | *r += kvm_apic_set_irq(vcpu: dst[i]->vcpu, irq, dest_map); |
1241 | } |
1242 | } |
1243 | |
1244 | rcu_read_unlock(); |
1245 | return ret; |
1246 | } |
1247 | |
1248 | /* |
1249 | * This routine tries to handle interrupts in posted mode, here is how |
1250 | * it deals with different cases: |
1251 | * - For single-destination interrupts, handle it in posted mode |
1252 | * - Else if vector hashing is enabled and it is a lowest-priority |
1253 | * interrupt, handle it in posted mode and use the following mechanism |
1254 | * to find the destination vCPU. |
1255 | * 1. For lowest-priority interrupts, store all the possible |
1256 | * destination vCPUs in an array. |
1257 | * 2. Use "guest vector % max number of destination vCPUs" to find |
1258 | * the right destination vCPU in the array for the lowest-priority |
1259 | * interrupt. |
1260 | * - Otherwise, use remapped mode to inject the interrupt. |
1261 | */ |
1262 | bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, |
1263 | struct kvm_vcpu **dest_vcpu) |
1264 | { |
1265 | struct kvm_apic_map *map; |
1266 | unsigned long bitmap; |
1267 | struct kvm_lapic **dst = NULL; |
1268 | bool ret = false; |
1269 | |
1270 | if (irq->shorthand) |
1271 | return false; |
1272 | |
1273 | rcu_read_lock(); |
1274 | map = rcu_dereference(kvm->arch.apic_map); |
1275 | |
1276 | if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, dst: &dst, bitmap: &bitmap) && |
1277 | hweight16(bitmap) == 1) { |
1278 | unsigned long i = find_first_bit(addr: &bitmap, size: 16); |
1279 | |
1280 | if (dst[i]) { |
1281 | *dest_vcpu = dst[i]->vcpu; |
1282 | ret = true; |
1283 | } |
1284 | } |
1285 | |
1286 | rcu_read_unlock(); |
1287 | return ret; |
1288 | } |
1289 | |
1290 | /* |
1291 | * Add a pending IRQ into lapic. |
1292 | * Return 1 if successfully added and 0 if discarded. |
1293 | */ |
1294 | static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, |
1295 | int vector, int level, int trig_mode, |
1296 | struct dest_map *dest_map) |
1297 | { |
1298 | int result = 0; |
1299 | struct kvm_vcpu *vcpu = apic->vcpu; |
1300 | |
1301 | trace_kvm_apic_accept_irq(apicid: vcpu->vcpu_id, dm: delivery_mode, |
1302 | tm: trig_mode, vec: vector); |
1303 | switch (delivery_mode) { |
1304 | case APIC_DM_LOWEST: |
1305 | vcpu->arch.apic_arb_prio++; |
1306 | fallthrough; |
1307 | case APIC_DM_FIXED: |
1308 | if (unlikely(trig_mode && !level)) |
1309 | break; |
1310 | |
1311 | /* FIXME add logic for vcpu on reset */ |
1312 | if (unlikely(!apic_enabled(apic))) |
1313 | break; |
1314 | |
1315 | result = 1; |
1316 | |
1317 | if (dest_map) { |
1318 | __set_bit(vcpu->vcpu_id, dest_map->map); |
1319 | dest_map->vectors[vcpu->vcpu_id] = vector; |
1320 | } |
1321 | |
1322 | if (apic_test_vector(vec: vector, bitmap: apic->regs + APIC_TMR) != !!trig_mode) { |
1323 | if (trig_mode) |
1324 | kvm_lapic_set_vector(vec: vector, |
1325 | bitmap: apic->regs + APIC_TMR); |
1326 | else |
1327 | kvm_lapic_clear_vector(vec: vector, |
1328 | bitmap: apic->regs + APIC_TMR); |
1329 | } |
1330 | |
1331 | static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode, |
1332 | trig_mode, vector); |
1333 | break; |
1334 | |
1335 | case APIC_DM_REMRD: |
1336 | result = 1; |
1337 | vcpu->arch.pv.pv_unhalted = 1; |
1338 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
1339 | kvm_vcpu_kick(vcpu); |
1340 | break; |
1341 | |
1342 | case APIC_DM_SMI: |
1343 | if (!kvm_inject_smi(vcpu)) { |
1344 | kvm_vcpu_kick(vcpu); |
1345 | result = 1; |
1346 | } |
1347 | break; |
1348 | |
1349 | case APIC_DM_NMI: |
1350 | result = 1; |
1351 | kvm_inject_nmi(vcpu); |
1352 | kvm_vcpu_kick(vcpu); |
1353 | break; |
1354 | |
1355 | case APIC_DM_INIT: |
1356 | if (!trig_mode || level) { |
1357 | result = 1; |
1358 | /* assumes that there are only KVM_APIC_INIT/SIPI */ |
1359 | apic->pending_events = (1UL << KVM_APIC_INIT); |
1360 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
1361 | kvm_vcpu_kick(vcpu); |
1362 | } |
1363 | break; |
1364 | |
1365 | case APIC_DM_STARTUP: |
1366 | result = 1; |
1367 | apic->sipi_vector = vector; |
1368 | /* make sure sipi_vector is visible for the receiver */ |
1369 | smp_wmb(); |
1370 | set_bit(KVM_APIC_SIPI, addr: &apic->pending_events); |
1371 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
1372 | kvm_vcpu_kick(vcpu); |
1373 | break; |
1374 | |
1375 | case APIC_DM_EXTINT: |
1376 | /* |
1377 | * Should only be called by kvm_apic_local_deliver() with LVT0, |
1378 | * before NMI watchdog was enabled. Already handled by |
1379 | * kvm_apic_accept_pic_intr(). |
1380 | */ |
1381 | break; |
1382 | |
1383 | default: |
1384 | printk(KERN_ERR "TODO: unsupported delivery mode %x\n" , |
1385 | delivery_mode); |
1386 | break; |
1387 | } |
1388 | return result; |
1389 | } |
1390 | |
1391 | /* |
1392 | * This routine identifies the destination vcpus mask meant to receive the |
1393 | * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find |
1394 | * out the destination vcpus array and set the bitmap or it traverses to |
1395 | * each available vcpu to identify the same. |
1396 | */ |
1397 | void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, |
1398 | unsigned long *vcpu_bitmap) |
1399 | { |
1400 | struct kvm_lapic **dest_vcpu = NULL; |
1401 | struct kvm_lapic *src = NULL; |
1402 | struct kvm_apic_map *map; |
1403 | struct kvm_vcpu *vcpu; |
1404 | unsigned long bitmap, i; |
1405 | int vcpu_idx; |
1406 | bool ret; |
1407 | |
1408 | rcu_read_lock(); |
1409 | map = rcu_dereference(kvm->arch.apic_map); |
1410 | |
1411 | ret = kvm_apic_map_get_dest_lapic(kvm, src: &src, irq, map, dst: &dest_vcpu, |
1412 | bitmap: &bitmap); |
1413 | if (ret) { |
1414 | for_each_set_bit(i, &bitmap, 16) { |
1415 | if (!dest_vcpu[i]) |
1416 | continue; |
1417 | vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx; |
1418 | __set_bit(vcpu_idx, vcpu_bitmap); |
1419 | } |
1420 | } else { |
1421 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1422 | if (!kvm_apic_present(vcpu)) |
1423 | continue; |
1424 | if (!kvm_apic_match_dest(vcpu, NULL, |
1425 | irq->shorthand, |
1426 | irq->dest_id, |
1427 | irq->dest_mode)) |
1428 | continue; |
1429 | __set_bit(i, vcpu_bitmap); |
1430 | } |
1431 | } |
1432 | rcu_read_unlock(); |
1433 | } |
1434 | |
1435 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) |
1436 | { |
1437 | return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; |
1438 | } |
1439 | |
1440 | static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector) |
1441 | { |
1442 | return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors); |
1443 | } |
1444 | |
1445 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
1446 | { |
1447 | int trigger_mode; |
1448 | |
1449 | /* Eoi the ioapic only if the ioapic doesn't own the vector. */ |
1450 | if (!kvm_ioapic_handles_vector(apic, vector)) |
1451 | return; |
1452 | |
1453 | /* Request a KVM exit to inform the userspace IOAPIC. */ |
1454 | if (irqchip_split(kvm: apic->vcpu->kvm)) { |
1455 | apic->vcpu->arch.pending_ioapic_eoi = vector; |
1456 | kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu: apic->vcpu); |
1457 | return; |
1458 | } |
1459 | |
1460 | if (apic_test_vector(vec: vector, bitmap: apic->regs + APIC_TMR)) |
1461 | trigger_mode = IOAPIC_LEVEL_TRIG; |
1462 | else |
1463 | trigger_mode = IOAPIC_EDGE_TRIG; |
1464 | |
1465 | kvm_ioapic_update_eoi(vcpu: apic->vcpu, vector, trigger_mode); |
1466 | } |
1467 | |
1468 | static int apic_set_eoi(struct kvm_lapic *apic) |
1469 | { |
1470 | int vector = apic_find_highest_isr(apic); |
1471 | |
1472 | trace_kvm_eoi(apic, vector); |
1473 | |
1474 | /* |
1475 | * Not every write EOI will has corresponding ISR, |
1476 | * one example is when Kernel check timer on setup_IO_APIC |
1477 | */ |
1478 | if (vector == -1) |
1479 | return vector; |
1480 | |
1481 | apic_clear_isr(vec: vector, apic); |
1482 | apic_update_ppr(apic); |
1483 | |
1484 | if (kvm_hv_synic_has_vector(vcpu: apic->vcpu, vector)) |
1485 | kvm_hv_synic_send_eoi(vcpu: apic->vcpu, vector); |
1486 | |
1487 | kvm_ioapic_send_eoi(apic, vector); |
1488 | kvm_make_request(KVM_REQ_EVENT, vcpu: apic->vcpu); |
1489 | return vector; |
1490 | } |
1491 | |
1492 | /* |
1493 | * this interface assumes a trap-like exit, which has already finished |
1494 | * desired side effect including vISR and vPPR update. |
1495 | */ |
1496 | void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) |
1497 | { |
1498 | struct kvm_lapic *apic = vcpu->arch.apic; |
1499 | |
1500 | trace_kvm_eoi(apic, vector); |
1501 | |
1502 | kvm_ioapic_send_eoi(apic, vector); |
1503 | kvm_make_request(KVM_REQ_EVENT, vcpu: apic->vcpu); |
1504 | } |
1505 | EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); |
1506 | |
1507 | void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) |
1508 | { |
1509 | struct kvm_lapic_irq irq; |
1510 | |
1511 | /* KVM has no delay and should always clear the BUSY/PENDING flag. */ |
1512 | WARN_ON_ONCE(icr_low & APIC_ICR_BUSY); |
1513 | |
1514 | irq.vector = icr_low & APIC_VECTOR_MASK; |
1515 | irq.delivery_mode = icr_low & APIC_MODE_MASK; |
1516 | irq.dest_mode = icr_low & APIC_DEST_MASK; |
1517 | irq.level = (icr_low & APIC_INT_ASSERT) != 0; |
1518 | irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; |
1519 | irq.shorthand = icr_low & APIC_SHORT_MASK; |
1520 | irq.msi_redir_hint = false; |
1521 | if (apic_x2apic_mode(apic)) |
1522 | irq.dest_id = icr_high; |
1523 | else |
1524 | irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high); |
1525 | |
1526 | trace_kvm_apic_ipi(icr_low, dest_id: irq.dest_id); |
1527 | |
1528 | kvm_irq_delivery_to_apic(kvm: apic->vcpu->kvm, src: apic, irq: &irq, NULL); |
1529 | } |
1530 | EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); |
1531 | |
1532 | static u32 apic_get_tmcct(struct kvm_lapic *apic) |
1533 | { |
1534 | ktime_t remaining, now; |
1535 | s64 ns; |
1536 | |
1537 | ASSERT(apic != NULL); |
1538 | |
1539 | /* if initial count is 0, current count should also be 0 */ |
1540 | if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 || |
1541 | apic->lapic_timer.period == 0) |
1542 | return 0; |
1543 | |
1544 | now = ktime_get(); |
1545 | remaining = ktime_sub(apic->lapic_timer.target_expiration, now); |
1546 | if (ktime_to_ns(kt: remaining) < 0) |
1547 | remaining = 0; |
1548 | |
1549 | ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); |
1550 | return div64_u64(dividend: ns, divisor: (APIC_BUS_CYCLE_NS * apic->divide_count)); |
1551 | } |
1552 | |
1553 | static void __report_tpr_access(struct kvm_lapic *apic, bool write) |
1554 | { |
1555 | struct kvm_vcpu *vcpu = apic->vcpu; |
1556 | struct kvm_run *run = vcpu->run; |
1557 | |
1558 | kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); |
1559 | run->tpr_access.rip = kvm_rip_read(vcpu); |
1560 | run->tpr_access.is_write = write; |
1561 | } |
1562 | |
1563 | static inline void report_tpr_access(struct kvm_lapic *apic, bool write) |
1564 | { |
1565 | if (apic->vcpu->arch.tpr_access_reporting) |
1566 | __report_tpr_access(apic, write); |
1567 | } |
1568 | |
1569 | static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) |
1570 | { |
1571 | u32 val = 0; |
1572 | |
1573 | if (offset >= LAPIC_MMIO_LENGTH) |
1574 | return 0; |
1575 | |
1576 | switch (offset) { |
1577 | case APIC_ARBPRI: |
1578 | break; |
1579 | |
1580 | case APIC_TMCCT: /* Timer CCR */ |
1581 | if (apic_lvtt_tscdeadline(apic)) |
1582 | return 0; |
1583 | |
1584 | val = apic_get_tmcct(apic); |
1585 | break; |
1586 | case APIC_PROCPRI: |
1587 | apic_update_ppr(apic); |
1588 | val = kvm_lapic_get_reg(apic, reg_off: offset); |
1589 | break; |
1590 | case APIC_TASKPRI: |
1591 | report_tpr_access(apic, write: false); |
1592 | fallthrough; |
1593 | default: |
1594 | val = kvm_lapic_get_reg(apic, reg_off: offset); |
1595 | break; |
1596 | } |
1597 | |
1598 | return val; |
1599 | } |
1600 | |
1601 | static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) |
1602 | { |
1603 | return container_of(dev, struct kvm_lapic, dev); |
1604 | } |
1605 | |
1606 | #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4)) |
1607 | #define APIC_REGS_MASK(first, count) \ |
1608 | (APIC_REG_MASK(first) * ((1ull << (count)) - 1)) |
1609 | |
1610 | u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic) |
1611 | { |
1612 | /* Leave bits '0' for reserved and write-only registers. */ |
1613 | u64 valid_reg_mask = |
1614 | APIC_REG_MASK(APIC_ID) | |
1615 | APIC_REG_MASK(APIC_LVR) | |
1616 | APIC_REG_MASK(APIC_TASKPRI) | |
1617 | APIC_REG_MASK(APIC_PROCPRI) | |
1618 | APIC_REG_MASK(APIC_LDR) | |
1619 | APIC_REG_MASK(APIC_SPIV) | |
1620 | APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) | |
1621 | APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) | |
1622 | APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) | |
1623 | APIC_REG_MASK(APIC_ESR) | |
1624 | APIC_REG_MASK(APIC_ICR) | |
1625 | APIC_REG_MASK(APIC_LVTT) | |
1626 | APIC_REG_MASK(APIC_LVTTHMR) | |
1627 | APIC_REG_MASK(APIC_LVTPC) | |
1628 | APIC_REG_MASK(APIC_LVT0) | |
1629 | APIC_REG_MASK(APIC_LVT1) | |
1630 | APIC_REG_MASK(APIC_LVTERR) | |
1631 | APIC_REG_MASK(APIC_TMICT) | |
1632 | APIC_REG_MASK(APIC_TMCCT) | |
1633 | APIC_REG_MASK(APIC_TDCR); |
1634 | |
1635 | if (kvm_lapic_lvt_supported(apic, lvt_index: LVT_CMCI)) |
1636 | valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI); |
1637 | |
1638 | /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */ |
1639 | if (!apic_x2apic_mode(apic)) |
1640 | valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) | |
1641 | APIC_REG_MASK(APIC_DFR) | |
1642 | APIC_REG_MASK(APIC_ICR2); |
1643 | |
1644 | return valid_reg_mask; |
1645 | } |
1646 | EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask); |
1647 | |
1648 | static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, |
1649 | void *data) |
1650 | { |
1651 | unsigned char alignment = offset & 0xf; |
1652 | u32 result; |
1653 | |
1654 | /* |
1655 | * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in |
1656 | * x2APIC and needs to be manually handled by the caller. |
1657 | */ |
1658 | WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR); |
1659 | |
1660 | if (alignment + len > 4) |
1661 | return 1; |
1662 | |
1663 | if (offset > 0x3f0 || |
1664 | !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset))) |
1665 | return 1; |
1666 | |
1667 | result = __apic_read(apic, offset: offset & ~0xf); |
1668 | |
1669 | trace_kvm_apic_read(offset, result); |
1670 | |
1671 | switch (len) { |
1672 | case 1: |
1673 | case 2: |
1674 | case 4: |
1675 | memcpy(data, (char *)&result + alignment, len); |
1676 | break; |
1677 | default: |
1678 | printk(KERN_ERR "Local APIC read with len = %x, " |
1679 | "should be 1,2, or 4 instead\n" , len); |
1680 | break; |
1681 | } |
1682 | return 0; |
1683 | } |
1684 | |
1685 | static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) |
1686 | { |
1687 | return addr >= apic->base_address && |
1688 | addr < apic->base_address + LAPIC_MMIO_LENGTH; |
1689 | } |
1690 | |
1691 | static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
1692 | gpa_t address, int len, void *data) |
1693 | { |
1694 | struct kvm_lapic *apic = to_lapic(dev: this); |
1695 | u32 offset = address - apic->base_address; |
1696 | |
1697 | if (!apic_mmio_in_range(apic, addr: address)) |
1698 | return -EOPNOTSUPP; |
1699 | |
1700 | if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { |
1701 | if (!kvm_check_has_quirk(kvm: vcpu->kvm, |
1702 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) |
1703 | return -EOPNOTSUPP; |
1704 | |
1705 | memset(data, 0xff, len); |
1706 | return 0; |
1707 | } |
1708 | |
1709 | kvm_lapic_reg_read(apic, offset, len, data); |
1710 | |
1711 | return 0; |
1712 | } |
1713 | |
1714 | static void update_divide_count(struct kvm_lapic *apic) |
1715 | { |
1716 | u32 tmp1, tmp2, tdcr; |
1717 | |
1718 | tdcr = kvm_lapic_get_reg(apic, APIC_TDCR); |
1719 | tmp1 = tdcr & 0xf; |
1720 | tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; |
1721 | apic->divide_count = 0x1 << (tmp2 & 0x7); |
1722 | } |
1723 | |
1724 | static void limit_periodic_timer_frequency(struct kvm_lapic *apic) |
1725 | { |
1726 | /* |
1727 | * Do not allow the guest to program periodic timers with small |
1728 | * interval, since the hrtimers are not throttled by the host |
1729 | * scheduler. |
1730 | */ |
1731 | if (apic_lvtt_period(apic) && apic->lapic_timer.period) { |
1732 | s64 min_period = min_timer_period_us * 1000LL; |
1733 | |
1734 | if (apic->lapic_timer.period < min_period) { |
1735 | pr_info_ratelimited( |
1736 | "vcpu %i: requested %lld ns " |
1737 | "lapic timer period limited to %lld ns\n" , |
1738 | apic->vcpu->vcpu_id, |
1739 | apic->lapic_timer.period, min_period); |
1740 | apic->lapic_timer.period = min_period; |
1741 | } |
1742 | } |
1743 | } |
1744 | |
1745 | static void cancel_hv_timer(struct kvm_lapic *apic); |
1746 | |
1747 | static void cancel_apic_timer(struct kvm_lapic *apic) |
1748 | { |
1749 | hrtimer_cancel(timer: &apic->lapic_timer.timer); |
1750 | preempt_disable(); |
1751 | if (apic->lapic_timer.hv_timer_in_use) |
1752 | cancel_hv_timer(apic); |
1753 | preempt_enable(); |
1754 | atomic_set(v: &apic->lapic_timer.pending, i: 0); |
1755 | } |
1756 | |
1757 | static void apic_update_lvtt(struct kvm_lapic *apic) |
1758 | { |
1759 | u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) & |
1760 | apic->lapic_timer.timer_mode_mask; |
1761 | |
1762 | if (apic->lapic_timer.timer_mode != timer_mode) { |
1763 | if (apic_lvtt_tscdeadline(apic) != (timer_mode == |
1764 | APIC_LVT_TIMER_TSCDEADLINE)) { |
1765 | cancel_apic_timer(apic); |
1766 | kvm_lapic_set_reg(apic, APIC_TMICT, val: 0); |
1767 | apic->lapic_timer.period = 0; |
1768 | apic->lapic_timer.tscdeadline = 0; |
1769 | } |
1770 | apic->lapic_timer.timer_mode = timer_mode; |
1771 | limit_periodic_timer_frequency(apic); |
1772 | } |
1773 | } |
1774 | |
1775 | /* |
1776 | * On APICv, this test will cause a busy wait |
1777 | * during a higher-priority task. |
1778 | */ |
1779 | |
1780 | static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) |
1781 | { |
1782 | struct kvm_lapic *apic = vcpu->arch.apic; |
1783 | u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT); |
1784 | |
1785 | if (kvm_apic_hw_enabled(apic)) { |
1786 | int vec = reg & APIC_VECTOR_MASK; |
1787 | void *bitmap = apic->regs + APIC_ISR; |
1788 | |
1789 | if (apic->apicv_active) |
1790 | bitmap = apic->regs + APIC_IRR; |
1791 | |
1792 | if (apic_test_vector(vec, bitmap)) |
1793 | return true; |
1794 | } |
1795 | return false; |
1796 | } |
1797 | |
1798 | static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles) |
1799 | { |
1800 | u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns; |
1801 | |
1802 | /* |
1803 | * If the guest TSC is running at a different ratio than the host, then |
1804 | * convert the delay to nanoseconds to achieve an accurate delay. Note |
1805 | * that __delay() uses delay_tsc whenever the hardware has TSC, thus |
1806 | * always for VMX enabled hardware. |
1807 | */ |
1808 | if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) { |
1809 | __delay(min(guest_cycles, |
1810 | nsec_to_cycles(vcpu, timer_advance_ns))); |
1811 | } else { |
1812 | u64 delay_ns = guest_cycles * 1000000ULL; |
1813 | do_div(delay_ns, vcpu->arch.virtual_tsc_khz); |
1814 | ndelay(min_t(u32, delay_ns, timer_advance_ns)); |
1815 | } |
1816 | } |
1817 | |
1818 | static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu, |
1819 | s64 advance_expire_delta) |
1820 | { |
1821 | struct kvm_lapic *apic = vcpu->arch.apic; |
1822 | u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns; |
1823 | u64 ns; |
1824 | |
1825 | /* Do not adjust for tiny fluctuations or large random spikes. */ |
1826 | if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX || |
1827 | abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN) |
1828 | return; |
1829 | |
1830 | /* too early */ |
1831 | if (advance_expire_delta < 0) { |
1832 | ns = -advance_expire_delta * 1000000ULL; |
1833 | do_div(ns, vcpu->arch.virtual_tsc_khz); |
1834 | timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP; |
1835 | } else { |
1836 | /* too late */ |
1837 | ns = advance_expire_delta * 1000000ULL; |
1838 | do_div(ns, vcpu->arch.virtual_tsc_khz); |
1839 | timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP; |
1840 | } |
1841 | |
1842 | if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX)) |
1843 | timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; |
1844 | apic->lapic_timer.timer_advance_ns = timer_advance_ns; |
1845 | } |
1846 | |
1847 | static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) |
1848 | { |
1849 | struct kvm_lapic *apic = vcpu->arch.apic; |
1850 | u64 guest_tsc, tsc_deadline; |
1851 | |
1852 | tsc_deadline = apic->lapic_timer.expired_tscdeadline; |
1853 | apic->lapic_timer.expired_tscdeadline = 0; |
1854 | guest_tsc = kvm_read_l1_tsc(vcpu, host_tsc: rdtsc()); |
1855 | trace_kvm_wait_lapic_expire(vcpu_id: vcpu->vcpu_id, delta: guest_tsc - tsc_deadline); |
1856 | |
1857 | if (lapic_timer_advance_dynamic) { |
1858 | adjust_lapic_timer_advance(vcpu, advance_expire_delta: guest_tsc - tsc_deadline); |
1859 | /* |
1860 | * If the timer fired early, reread the TSC to account for the |
1861 | * overhead of the above adjustment to avoid waiting longer |
1862 | * than is necessary. |
1863 | */ |
1864 | if (guest_tsc < tsc_deadline) |
1865 | guest_tsc = kvm_read_l1_tsc(vcpu, host_tsc: rdtsc()); |
1866 | } |
1867 | |
1868 | if (guest_tsc < tsc_deadline) |
1869 | __wait_lapic_expire(vcpu, guest_cycles: tsc_deadline - guest_tsc); |
1870 | } |
1871 | |
1872 | void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) |
1873 | { |
1874 | if (lapic_in_kernel(vcpu) && |
1875 | vcpu->arch.apic->lapic_timer.expired_tscdeadline && |
1876 | vcpu->arch.apic->lapic_timer.timer_advance_ns && |
1877 | lapic_timer_int_injected(vcpu)) |
1878 | __kvm_wait_lapic_expire(vcpu); |
1879 | } |
1880 | EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire); |
1881 | |
1882 | static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic) |
1883 | { |
1884 | struct kvm_timer *ktimer = &apic->lapic_timer; |
1885 | |
1886 | kvm_apic_local_deliver(apic, APIC_LVTT); |
1887 | if (apic_lvtt_tscdeadline(apic)) { |
1888 | ktimer->tscdeadline = 0; |
1889 | } else if (apic_lvtt_oneshot(apic)) { |
1890 | ktimer->tscdeadline = 0; |
1891 | ktimer->target_expiration = 0; |
1892 | } |
1893 | } |
1894 | |
1895 | static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn) |
1896 | { |
1897 | struct kvm_vcpu *vcpu = apic->vcpu; |
1898 | struct kvm_timer *ktimer = &apic->lapic_timer; |
1899 | |
1900 | if (atomic_read(v: &apic->lapic_timer.pending)) |
1901 | return; |
1902 | |
1903 | if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use) |
1904 | ktimer->expired_tscdeadline = ktimer->tscdeadline; |
1905 | |
1906 | if (!from_timer_fn && apic->apicv_active) { |
1907 | WARN_ON(kvm_get_running_vcpu() != vcpu); |
1908 | kvm_apic_inject_pending_timer_irqs(apic); |
1909 | return; |
1910 | } |
1911 | |
1912 | if (kvm_use_posted_timer_interrupt(vcpu: apic->vcpu)) { |
1913 | /* |
1914 | * Ensure the guest's timer has truly expired before posting an |
1915 | * interrupt. Open code the relevant checks to avoid querying |
1916 | * lapic_timer_int_injected(), which will be false since the |
1917 | * interrupt isn't yet injected. Waiting until after injecting |
1918 | * is not an option since that won't help a posted interrupt. |
1919 | */ |
1920 | if (vcpu->arch.apic->lapic_timer.expired_tscdeadline && |
1921 | vcpu->arch.apic->lapic_timer.timer_advance_ns) |
1922 | __kvm_wait_lapic_expire(vcpu); |
1923 | kvm_apic_inject_pending_timer_irqs(apic); |
1924 | return; |
1925 | } |
1926 | |
1927 | atomic_inc(v: &apic->lapic_timer.pending); |
1928 | kvm_make_request(KVM_REQ_UNBLOCK, vcpu); |
1929 | if (from_timer_fn) |
1930 | kvm_vcpu_kick(vcpu); |
1931 | } |
1932 | |
1933 | static void start_sw_tscdeadline(struct kvm_lapic *apic) |
1934 | { |
1935 | struct kvm_timer *ktimer = &apic->lapic_timer; |
1936 | u64 guest_tsc, tscdeadline = ktimer->tscdeadline; |
1937 | u64 ns = 0; |
1938 | ktime_t expire; |
1939 | struct kvm_vcpu *vcpu = apic->vcpu; |
1940 | unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; |
1941 | unsigned long flags; |
1942 | ktime_t now; |
1943 | |
1944 | if (unlikely(!tscdeadline || !this_tsc_khz)) |
1945 | return; |
1946 | |
1947 | local_irq_save(flags); |
1948 | |
1949 | now = ktime_get(); |
1950 | guest_tsc = kvm_read_l1_tsc(vcpu, host_tsc: rdtsc()); |
1951 | |
1952 | ns = (tscdeadline - guest_tsc) * 1000000ULL; |
1953 | do_div(ns, this_tsc_khz); |
1954 | |
1955 | if (likely(tscdeadline > guest_tsc) && |
1956 | likely(ns > apic->lapic_timer.timer_advance_ns)) { |
1957 | expire = ktime_add_ns(now, ns); |
1958 | expire = ktime_sub_ns(expire, ktimer->timer_advance_ns); |
1959 | hrtimer_start(timer: &ktimer->timer, tim: expire, mode: HRTIMER_MODE_ABS_HARD); |
1960 | } else |
1961 | apic_timer_expired(apic, from_timer_fn: false); |
1962 | |
1963 | local_irq_restore(flags); |
1964 | } |
1965 | |
1966 | static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict) |
1967 | { |
1968 | return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count; |
1969 | } |
1970 | |
1971 | static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor) |
1972 | { |
1973 | ktime_t now, remaining; |
1974 | u64 ns_remaining_old, ns_remaining_new; |
1975 | |
1976 | apic->lapic_timer.period = |
1977 | tmict_to_ns(apic, tmict: kvm_lapic_get_reg(apic, APIC_TMICT)); |
1978 | limit_periodic_timer_frequency(apic); |
1979 | |
1980 | now = ktime_get(); |
1981 | remaining = ktime_sub(apic->lapic_timer.target_expiration, now); |
1982 | if (ktime_to_ns(kt: remaining) < 0) |
1983 | remaining = 0; |
1984 | |
1985 | ns_remaining_old = ktime_to_ns(kt: remaining); |
1986 | ns_remaining_new = mul_u64_u32_div(a: ns_remaining_old, |
1987 | mul: apic->divide_count, div: old_divisor); |
1988 | |
1989 | apic->lapic_timer.tscdeadline += |
1990 | nsec_to_cycles(vcpu: apic->vcpu, nsec: ns_remaining_new) - |
1991 | nsec_to_cycles(vcpu: apic->vcpu, nsec: ns_remaining_old); |
1992 | apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new); |
1993 | } |
1994 | |
1995 | static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg) |
1996 | { |
1997 | ktime_t now; |
1998 | u64 tscl = rdtsc(); |
1999 | s64 deadline; |
2000 | |
2001 | now = ktime_get(); |
2002 | apic->lapic_timer.period = |
2003 | tmict_to_ns(apic, tmict: kvm_lapic_get_reg(apic, APIC_TMICT)); |
2004 | |
2005 | if (!apic->lapic_timer.period) { |
2006 | apic->lapic_timer.tscdeadline = 0; |
2007 | return false; |
2008 | } |
2009 | |
2010 | limit_periodic_timer_frequency(apic); |
2011 | deadline = apic->lapic_timer.period; |
2012 | |
2013 | if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { |
2014 | if (unlikely(count_reg != APIC_TMICT)) { |
2015 | deadline = tmict_to_ns(apic, |
2016 | tmict: kvm_lapic_get_reg(apic, reg_off: count_reg)); |
2017 | if (unlikely(deadline <= 0)) { |
2018 | if (apic_lvtt_period(apic)) |
2019 | deadline = apic->lapic_timer.period; |
2020 | else |
2021 | deadline = 0; |
2022 | } |
2023 | else if (unlikely(deadline > apic->lapic_timer.period)) { |
2024 | pr_info_ratelimited( |
2025 | "vcpu %i: requested lapic timer restore with " |
2026 | "starting count register %#x=%u (%lld ns) > initial count (%lld ns). " |
2027 | "Using initial count to start timer.\n" , |
2028 | apic->vcpu->vcpu_id, |
2029 | count_reg, |
2030 | kvm_lapic_get_reg(apic, count_reg), |
2031 | deadline, apic->lapic_timer.period); |
2032 | kvm_lapic_set_reg(apic, reg_off: count_reg, val: 0); |
2033 | deadline = apic->lapic_timer.period; |
2034 | } |
2035 | } |
2036 | } |
2037 | |
2038 | apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(vcpu: apic->vcpu, host_tsc: tscl) + |
2039 | nsec_to_cycles(vcpu: apic->vcpu, nsec: deadline); |
2040 | apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline); |
2041 | |
2042 | return true; |
2043 | } |
2044 | |
2045 | static void advance_periodic_target_expiration(struct kvm_lapic *apic) |
2046 | { |
2047 | ktime_t now = ktime_get(); |
2048 | u64 tscl = rdtsc(); |
2049 | ktime_t delta; |
2050 | |
2051 | /* |
2052 | * Synchronize both deadlines to the same time source or |
2053 | * differences in the periods (caused by differences in the |
2054 | * underlying clocks or numerical approximation errors) will |
2055 | * cause the two to drift apart over time as the errors |
2056 | * accumulate. |
2057 | */ |
2058 | apic->lapic_timer.target_expiration = |
2059 | ktime_add_ns(apic->lapic_timer.target_expiration, |
2060 | apic->lapic_timer.period); |
2061 | delta = ktime_sub(apic->lapic_timer.target_expiration, now); |
2062 | apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(vcpu: apic->vcpu, host_tsc: tscl) + |
2063 | nsec_to_cycles(vcpu: apic->vcpu, nsec: delta); |
2064 | } |
2065 | |
2066 | static void start_sw_period(struct kvm_lapic *apic) |
2067 | { |
2068 | if (!apic->lapic_timer.period) |
2069 | return; |
2070 | |
2071 | if (ktime_after(cmp1: ktime_get(), |
2072 | cmp2: apic->lapic_timer.target_expiration)) { |
2073 | apic_timer_expired(apic, from_timer_fn: false); |
2074 | |
2075 | if (apic_lvtt_oneshot(apic)) |
2076 | return; |
2077 | |
2078 | advance_periodic_target_expiration(apic); |
2079 | } |
2080 | |
2081 | hrtimer_start(timer: &apic->lapic_timer.timer, |
2082 | tim: apic->lapic_timer.target_expiration, |
2083 | mode: HRTIMER_MODE_ABS_HARD); |
2084 | } |
2085 | |
2086 | bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) |
2087 | { |
2088 | if (!lapic_in_kernel(vcpu)) |
2089 | return false; |
2090 | |
2091 | return vcpu->arch.apic->lapic_timer.hv_timer_in_use; |
2092 | } |
2093 | |
2094 | static void cancel_hv_timer(struct kvm_lapic *apic) |
2095 | { |
2096 | WARN_ON(preemptible()); |
2097 | WARN_ON(!apic->lapic_timer.hv_timer_in_use); |
2098 | static_call(kvm_x86_cancel_hv_timer)(apic->vcpu); |
2099 | apic->lapic_timer.hv_timer_in_use = false; |
2100 | } |
2101 | |
2102 | static bool start_hv_timer(struct kvm_lapic *apic) |
2103 | { |
2104 | struct kvm_timer *ktimer = &apic->lapic_timer; |
2105 | struct kvm_vcpu *vcpu = apic->vcpu; |
2106 | bool expired; |
2107 | |
2108 | WARN_ON(preemptible()); |
2109 | if (!kvm_can_use_hv_timer(vcpu)) |
2110 | return false; |
2111 | |
2112 | if (!ktimer->tscdeadline) |
2113 | return false; |
2114 | |
2115 | if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired)) |
2116 | return false; |
2117 | |
2118 | ktimer->hv_timer_in_use = true; |
2119 | hrtimer_cancel(timer: &ktimer->timer); |
2120 | |
2121 | /* |
2122 | * To simplify handling the periodic timer, leave the hv timer running |
2123 | * even if the deadline timer has expired, i.e. rely on the resulting |
2124 | * VM-Exit to recompute the periodic timer's target expiration. |
2125 | */ |
2126 | if (!apic_lvtt_period(apic)) { |
2127 | /* |
2128 | * Cancel the hv timer if the sw timer fired while the hv timer |
2129 | * was being programmed, or if the hv timer itself expired. |
2130 | */ |
2131 | if (atomic_read(v: &ktimer->pending)) { |
2132 | cancel_hv_timer(apic); |
2133 | } else if (expired) { |
2134 | apic_timer_expired(apic, from_timer_fn: false); |
2135 | cancel_hv_timer(apic); |
2136 | } |
2137 | } |
2138 | |
2139 | trace_kvm_hv_timer_state(vcpu_id: vcpu->vcpu_id, hv_timer_in_use: ktimer->hv_timer_in_use); |
2140 | |
2141 | return true; |
2142 | } |
2143 | |
2144 | static void start_sw_timer(struct kvm_lapic *apic) |
2145 | { |
2146 | struct kvm_timer *ktimer = &apic->lapic_timer; |
2147 | |
2148 | WARN_ON(preemptible()); |
2149 | if (apic->lapic_timer.hv_timer_in_use) |
2150 | cancel_hv_timer(apic); |
2151 | if (!apic_lvtt_period(apic) && atomic_read(v: &ktimer->pending)) |
2152 | return; |
2153 | |
2154 | if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) |
2155 | start_sw_period(apic); |
2156 | else if (apic_lvtt_tscdeadline(apic)) |
2157 | start_sw_tscdeadline(apic); |
2158 | trace_kvm_hv_timer_state(vcpu_id: apic->vcpu->vcpu_id, hv_timer_in_use: false); |
2159 | } |
2160 | |
2161 | static void restart_apic_timer(struct kvm_lapic *apic) |
2162 | { |
2163 | preempt_disable(); |
2164 | |
2165 | if (!apic_lvtt_period(apic) && atomic_read(v: &apic->lapic_timer.pending)) |
2166 | goto out; |
2167 | |
2168 | if (!start_hv_timer(apic)) |
2169 | start_sw_timer(apic); |
2170 | out: |
2171 | preempt_enable(); |
2172 | } |
2173 | |
2174 | void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) |
2175 | { |
2176 | struct kvm_lapic *apic = vcpu->arch.apic; |
2177 | |
2178 | preempt_disable(); |
2179 | /* If the preempt notifier has already run, it also called apic_timer_expired */ |
2180 | if (!apic->lapic_timer.hv_timer_in_use) |
2181 | goto out; |
2182 | WARN_ON(kvm_vcpu_is_blocking(vcpu)); |
2183 | apic_timer_expired(apic, from_timer_fn: false); |
2184 | cancel_hv_timer(apic); |
2185 | |
2186 | if (apic_lvtt_period(apic) && apic->lapic_timer.period) { |
2187 | advance_periodic_target_expiration(apic); |
2188 | restart_apic_timer(apic); |
2189 | } |
2190 | out: |
2191 | preempt_enable(); |
2192 | } |
2193 | EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); |
2194 | |
2195 | void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) |
2196 | { |
2197 | restart_apic_timer(apic: vcpu->arch.apic); |
2198 | } |
2199 | |
2200 | void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) |
2201 | { |
2202 | struct kvm_lapic *apic = vcpu->arch.apic; |
2203 | |
2204 | preempt_disable(); |
2205 | /* Possibly the TSC deadline timer is not enabled yet */ |
2206 | if (apic->lapic_timer.hv_timer_in_use) |
2207 | start_sw_timer(apic); |
2208 | preempt_enable(); |
2209 | } |
2210 | |
2211 | void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu) |
2212 | { |
2213 | struct kvm_lapic *apic = vcpu->arch.apic; |
2214 | |
2215 | WARN_ON(!apic->lapic_timer.hv_timer_in_use); |
2216 | restart_apic_timer(apic); |
2217 | } |
2218 | |
2219 | static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg) |
2220 | { |
2221 | atomic_set(v: &apic->lapic_timer.pending, i: 0); |
2222 | |
2223 | if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) |
2224 | && !set_target_expiration(apic, count_reg)) |
2225 | return; |
2226 | |
2227 | restart_apic_timer(apic); |
2228 | } |
2229 | |
2230 | static void start_apic_timer(struct kvm_lapic *apic) |
2231 | { |
2232 | __start_apic_timer(apic, APIC_TMICT); |
2233 | } |
2234 | |
2235 | static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) |
2236 | { |
2237 | bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt_val: lvt0_val); |
2238 | |
2239 | if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { |
2240 | apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; |
2241 | if (lvt0_in_nmi_mode) { |
2242 | atomic_inc(v: &apic->vcpu->kvm->arch.vapics_in_nmi_mode); |
2243 | } else |
2244 | atomic_dec(v: &apic->vcpu->kvm->arch.vapics_in_nmi_mode); |
2245 | } |
2246 | } |
2247 | |
2248 | static int get_lvt_index(u32 reg) |
2249 | { |
2250 | if (reg == APIC_LVTCMCI) |
2251 | return LVT_CMCI; |
2252 | if (reg < APIC_LVTT || reg > APIC_LVTERR) |
2253 | return -1; |
2254 | return array_index_nospec( |
2255 | (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES); |
2256 | } |
2257 | |
2258 | static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) |
2259 | { |
2260 | int ret = 0; |
2261 | |
2262 | trace_kvm_apic_write(reg, val); |
2263 | |
2264 | switch (reg) { |
2265 | case APIC_ID: /* Local APIC ID */ |
2266 | if (!apic_x2apic_mode(apic)) { |
2267 | kvm_apic_set_xapic_id(apic, id: val >> 24); |
2268 | } else { |
2269 | ret = 1; |
2270 | } |
2271 | break; |
2272 | |
2273 | case APIC_TASKPRI: |
2274 | report_tpr_access(apic, write: true); |
2275 | apic_set_tpr(apic, tpr: val & 0xff); |
2276 | break; |
2277 | |
2278 | case APIC_EOI: |
2279 | apic_set_eoi(apic); |
2280 | break; |
2281 | |
2282 | case APIC_LDR: |
2283 | if (!apic_x2apic_mode(apic)) |
2284 | kvm_apic_set_ldr(apic, id: val & APIC_LDR_MASK); |
2285 | else |
2286 | ret = 1; |
2287 | break; |
2288 | |
2289 | case APIC_DFR: |
2290 | if (!apic_x2apic_mode(apic)) |
2291 | kvm_apic_set_dfr(apic, val: val | 0x0FFFFFFF); |
2292 | else |
2293 | ret = 1; |
2294 | break; |
2295 | |
2296 | case APIC_SPIV: { |
2297 | u32 mask = 0x3ff; |
2298 | if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) |
2299 | mask |= APIC_SPIV_DIRECTED_EOI; |
2300 | apic_set_spiv(apic, val: val & mask); |
2301 | if (!(val & APIC_SPIV_APIC_ENABLED)) { |
2302 | int i; |
2303 | |
2304 | for (i = 0; i < apic->nr_lvt_entries; i++) { |
2305 | kvm_lapic_set_reg(apic, APIC_LVTx(i), |
2306 | val: kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED); |
2307 | } |
2308 | apic_update_lvtt(apic); |
2309 | atomic_set(v: &apic->lapic_timer.pending, i: 0); |
2310 | |
2311 | } |
2312 | break; |
2313 | } |
2314 | case APIC_ICR: |
2315 | WARN_ON_ONCE(apic_x2apic_mode(apic)); |
2316 | |
2317 | /* No delay here, so we always clear the pending bit */ |
2318 | val &= ~APIC_ICR_BUSY; |
2319 | kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2)); |
2320 | kvm_lapic_set_reg(apic, APIC_ICR, val); |
2321 | break; |
2322 | case APIC_ICR2: |
2323 | if (apic_x2apic_mode(apic)) |
2324 | ret = 1; |
2325 | else |
2326 | kvm_lapic_set_reg(apic, APIC_ICR2, val: val & 0xff000000); |
2327 | break; |
2328 | |
2329 | case APIC_LVT0: |
2330 | apic_manage_nmi_watchdog(apic, lvt0_val: val); |
2331 | fallthrough; |
2332 | case APIC_LVTTHMR: |
2333 | case APIC_LVTPC: |
2334 | case APIC_LVT1: |
2335 | case APIC_LVTERR: |
2336 | case APIC_LVTCMCI: { |
2337 | u32 index = get_lvt_index(reg); |
2338 | if (!kvm_lapic_lvt_supported(apic, lvt_index: index)) { |
2339 | ret = 1; |
2340 | break; |
2341 | } |
2342 | if (!kvm_apic_sw_enabled(apic)) |
2343 | val |= APIC_LVT_MASKED; |
2344 | val &= apic_lvt_mask[index]; |
2345 | kvm_lapic_set_reg(apic, reg_off: reg, val); |
2346 | break; |
2347 | } |
2348 | |
2349 | case APIC_LVTT: |
2350 | if (!kvm_apic_sw_enabled(apic)) |
2351 | val |= APIC_LVT_MASKED; |
2352 | val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); |
2353 | kvm_lapic_set_reg(apic, APIC_LVTT, val); |
2354 | apic_update_lvtt(apic); |
2355 | break; |
2356 | |
2357 | case APIC_TMICT: |
2358 | if (apic_lvtt_tscdeadline(apic)) |
2359 | break; |
2360 | |
2361 | cancel_apic_timer(apic); |
2362 | kvm_lapic_set_reg(apic, APIC_TMICT, val); |
2363 | start_apic_timer(apic); |
2364 | break; |
2365 | |
2366 | case APIC_TDCR: { |
2367 | uint32_t old_divisor = apic->divide_count; |
2368 | |
2369 | kvm_lapic_set_reg(apic, APIC_TDCR, val: val & 0xb); |
2370 | update_divide_count(apic); |
2371 | if (apic->divide_count != old_divisor && |
2372 | apic->lapic_timer.period) { |
2373 | hrtimer_cancel(timer: &apic->lapic_timer.timer); |
2374 | update_target_expiration(apic, old_divisor); |
2375 | restart_apic_timer(apic); |
2376 | } |
2377 | break; |
2378 | } |
2379 | case APIC_ESR: |
2380 | if (apic_x2apic_mode(apic) && val != 0) |
2381 | ret = 1; |
2382 | break; |
2383 | |
2384 | case APIC_SELF_IPI: |
2385 | /* |
2386 | * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold |
2387 | * the vector, everything else is reserved. |
2388 | */ |
2389 | if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK)) |
2390 | ret = 1; |
2391 | else |
2392 | kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0); |
2393 | break; |
2394 | default: |
2395 | ret = 1; |
2396 | break; |
2397 | } |
2398 | |
2399 | /* |
2400 | * Recalculate APIC maps if necessary, e.g. if the software enable bit |
2401 | * was toggled, the APIC ID changed, etc... The maps are marked dirty |
2402 | * on relevant changes, i.e. this is a nop for most writes. |
2403 | */ |
2404 | kvm_recalculate_apic_map(kvm: apic->vcpu->kvm); |
2405 | |
2406 | return ret; |
2407 | } |
2408 | |
2409 | static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
2410 | gpa_t address, int len, const void *data) |
2411 | { |
2412 | struct kvm_lapic *apic = to_lapic(dev: this); |
2413 | unsigned int offset = address - apic->base_address; |
2414 | u32 val; |
2415 | |
2416 | if (!apic_mmio_in_range(apic, addr: address)) |
2417 | return -EOPNOTSUPP; |
2418 | |
2419 | if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { |
2420 | if (!kvm_check_has_quirk(kvm: vcpu->kvm, |
2421 | KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) |
2422 | return -EOPNOTSUPP; |
2423 | |
2424 | return 0; |
2425 | } |
2426 | |
2427 | /* |
2428 | * APIC register must be aligned on 128-bits boundary. |
2429 | * 32/64/128 bits registers must be accessed thru 32 bits. |
2430 | * Refer SDM 8.4.1 |
2431 | */ |
2432 | if (len != 4 || (offset & 0xf)) |
2433 | return 0; |
2434 | |
2435 | val = *(u32*)data; |
2436 | |
2437 | kvm_lapic_reg_write(apic, reg: offset & 0xff0, val); |
2438 | |
2439 | return 0; |
2440 | } |
2441 | |
2442 | void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) |
2443 | { |
2444 | kvm_lapic_reg_write(apic: vcpu->arch.apic, APIC_EOI, val: 0); |
2445 | } |
2446 | EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); |
2447 | |
2448 | /* emulate APIC access in a trap manner */ |
2449 | void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) |
2450 | { |
2451 | struct kvm_lapic *apic = vcpu->arch.apic; |
2452 | |
2453 | /* |
2454 | * ICR is a single 64-bit register when x2APIC is enabled, all others |
2455 | * registers hold 32-bit values. For legacy xAPIC, ICR writes need to |
2456 | * go down the common path to get the upper half from ICR2. |
2457 | * |
2458 | * Note, using the write helpers may incur an unnecessary write to the |
2459 | * virtual APIC state, but KVM needs to conditionally modify the value |
2460 | * in certain cases, e.g. to clear the ICR busy bit. The cost of extra |
2461 | * conditional branches is likely a wash relative to the cost of the |
2462 | * maybe-unecessary write, and both are in the noise anyways. |
2463 | */ |
2464 | if (apic_x2apic_mode(apic) && offset == APIC_ICR) |
2465 | kvm_x2apic_icr_write(apic, data: kvm_lapic_get_reg64(apic, APIC_ICR)); |
2466 | else |
2467 | kvm_lapic_reg_write(apic, reg: offset, val: kvm_lapic_get_reg(apic, reg_off: offset)); |
2468 | } |
2469 | EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); |
2470 | |
2471 | void kvm_free_lapic(struct kvm_vcpu *vcpu) |
2472 | { |
2473 | struct kvm_lapic *apic = vcpu->arch.apic; |
2474 | |
2475 | if (!vcpu->arch.apic) { |
2476 | static_branch_dec(&kvm_has_noapic_vcpu); |
2477 | return; |
2478 | } |
2479 | |
2480 | hrtimer_cancel(timer: &apic->lapic_timer.timer); |
2481 | |
2482 | if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) |
2483 | static_branch_slow_dec_deferred(&apic_hw_disabled); |
2484 | |
2485 | if (!apic->sw_enabled) |
2486 | static_branch_slow_dec_deferred(&apic_sw_disabled); |
2487 | |
2488 | if (apic->regs) |
2489 | free_page((unsigned long)apic->regs); |
2490 | |
2491 | kfree(objp: apic); |
2492 | } |
2493 | |
2494 | /* |
2495 | *---------------------------------------------------------------------- |
2496 | * LAPIC interface |
2497 | *---------------------------------------------------------------------- |
2498 | */ |
2499 | u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) |
2500 | { |
2501 | struct kvm_lapic *apic = vcpu->arch.apic; |
2502 | |
2503 | if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic)) |
2504 | return 0; |
2505 | |
2506 | return apic->lapic_timer.tscdeadline; |
2507 | } |
2508 | |
2509 | void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) |
2510 | { |
2511 | struct kvm_lapic *apic = vcpu->arch.apic; |
2512 | |
2513 | if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic)) |
2514 | return; |
2515 | |
2516 | hrtimer_cancel(timer: &apic->lapic_timer.timer); |
2517 | apic->lapic_timer.tscdeadline = data; |
2518 | start_apic_timer(apic); |
2519 | } |
2520 | |
2521 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) |
2522 | { |
2523 | apic_set_tpr(apic: vcpu->arch.apic, tpr: (cr8 & 0x0f) << 4); |
2524 | } |
2525 | |
2526 | u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) |
2527 | { |
2528 | u64 tpr; |
2529 | |
2530 | tpr = (u64) kvm_lapic_get_reg(apic: vcpu->arch.apic, APIC_TASKPRI); |
2531 | |
2532 | return (tpr & 0xf0) >> 4; |
2533 | } |
2534 | |
2535 | void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) |
2536 | { |
2537 | u64 old_value = vcpu->arch.apic_base; |
2538 | struct kvm_lapic *apic = vcpu->arch.apic; |
2539 | |
2540 | vcpu->arch.apic_base = value; |
2541 | |
2542 | if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) |
2543 | kvm_update_cpuid_runtime(vcpu); |
2544 | |
2545 | if (!apic) |
2546 | return; |
2547 | |
2548 | /* update jump label if enable bit changes */ |
2549 | if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { |
2550 | if (value & MSR_IA32_APICBASE_ENABLE) { |
2551 | kvm_apic_set_xapic_id(apic, id: vcpu->vcpu_id); |
2552 | static_branch_slow_dec_deferred(&apic_hw_disabled); |
2553 | /* Check if there are APF page ready requests pending */ |
2554 | kvm_make_request(KVM_REQ_APF_READY, vcpu); |
2555 | } else { |
2556 | static_branch_inc(&apic_hw_disabled.key); |
2557 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
2558 | } |
2559 | } |
2560 | |
2561 | if ((old_value ^ value) & X2APIC_ENABLE) { |
2562 | if (value & X2APIC_ENABLE) |
2563 | kvm_apic_set_x2apic_id(apic, id: vcpu->vcpu_id); |
2564 | else if (value & MSR_IA32_APICBASE_ENABLE) |
2565 | kvm_apic_set_xapic_id(apic, id: vcpu->vcpu_id); |
2566 | } |
2567 | |
2568 | if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) { |
2569 | kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); |
2570 | static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu); |
2571 | } |
2572 | |
2573 | apic->base_address = apic->vcpu->arch.apic_base & |
2574 | MSR_IA32_APICBASE_BASE; |
2575 | |
2576 | if ((value & MSR_IA32_APICBASE_ENABLE) && |
2577 | apic->base_address != APIC_DEFAULT_PHYS_BASE) { |
2578 | kvm_set_apicv_inhibit(kvm: apic->vcpu->kvm, |
2579 | reason: APICV_INHIBIT_REASON_APIC_BASE_MODIFIED); |
2580 | } |
2581 | } |
2582 | |
2583 | void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) |
2584 | { |
2585 | struct kvm_lapic *apic = vcpu->arch.apic; |
2586 | |
2587 | if (apic->apicv_active) { |
2588 | /* irr_pending is always true when apicv is activated. */ |
2589 | apic->irr_pending = true; |
2590 | apic->isr_count = 1; |
2591 | } else { |
2592 | /* |
2593 | * Don't clear irr_pending, searching the IRR can race with |
2594 | * updates from the CPU as APICv is still active from hardware's |
2595 | * perspective. The flag will be cleared as appropriate when |
2596 | * KVM injects the interrupt. |
2597 | */ |
2598 | apic->isr_count = count_vectors(bitmap: apic->regs + APIC_ISR); |
2599 | } |
2600 | apic->highest_isr_cache = -1; |
2601 | } |
2602 | |
2603 | int kvm_alloc_apic_access_page(struct kvm *kvm) |
2604 | { |
2605 | struct page *page; |
2606 | void __user *hva; |
2607 | int ret = 0; |
2608 | |
2609 | mutex_lock(&kvm->slots_lock); |
2610 | if (kvm->arch.apic_access_memslot_enabled || |
2611 | kvm->arch.apic_access_memslot_inhibited) |
2612 | goto out; |
2613 | |
2614 | hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, |
2615 | APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); |
2616 | if (IS_ERR(ptr: hva)) { |
2617 | ret = PTR_ERR(ptr: hva); |
2618 | goto out; |
2619 | } |
2620 | |
2621 | page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); |
2622 | if (is_error_page(page)) { |
2623 | ret = -EFAULT; |
2624 | goto out; |
2625 | } |
2626 | |
2627 | /* |
2628 | * Do not pin the page in memory, so that memory hot-unplug |
2629 | * is able to migrate it. |
2630 | */ |
2631 | put_page(page); |
2632 | kvm->arch.apic_access_memslot_enabled = true; |
2633 | out: |
2634 | mutex_unlock(lock: &kvm->slots_lock); |
2635 | return ret; |
2636 | } |
2637 | EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page); |
2638 | |
2639 | void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu) |
2640 | { |
2641 | struct kvm *kvm = vcpu->kvm; |
2642 | |
2643 | if (!kvm->arch.apic_access_memslot_enabled) |
2644 | return; |
2645 | |
2646 | kvm_vcpu_srcu_read_unlock(vcpu); |
2647 | |
2648 | mutex_lock(&kvm->slots_lock); |
2649 | |
2650 | if (kvm->arch.apic_access_memslot_enabled) { |
2651 | __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, gpa: 0, size: 0); |
2652 | /* |
2653 | * Clear "enabled" after the memslot is deleted so that a |
2654 | * different vCPU doesn't get a false negative when checking |
2655 | * the flag out of slots_lock. No additional memory barrier is |
2656 | * needed as modifying memslots requires waiting other vCPUs to |
2657 | * drop SRCU (see above), and false positives are ok as the |
2658 | * flag is rechecked after acquiring slots_lock. |
2659 | */ |
2660 | kvm->arch.apic_access_memslot_enabled = false; |
2661 | |
2662 | /* |
2663 | * Mark the memslot as inhibited to prevent reallocating the |
2664 | * memslot during vCPU creation, e.g. if a vCPU is hotplugged. |
2665 | */ |
2666 | kvm->arch.apic_access_memslot_inhibited = true; |
2667 | } |
2668 | |
2669 | mutex_unlock(lock: &kvm->slots_lock); |
2670 | |
2671 | kvm_vcpu_srcu_read_lock(vcpu); |
2672 | } |
2673 | |
2674 | void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) |
2675 | { |
2676 | struct kvm_lapic *apic = vcpu->arch.apic; |
2677 | u64 msr_val; |
2678 | int i; |
2679 | |
2680 | static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu); |
2681 | |
2682 | if (!init_event) { |
2683 | msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; |
2684 | if (kvm_vcpu_is_reset_bsp(vcpu)) |
2685 | msr_val |= MSR_IA32_APICBASE_BSP; |
2686 | kvm_lapic_set_base(vcpu, value: msr_val); |
2687 | } |
2688 | |
2689 | if (!apic) |
2690 | return; |
2691 | |
2692 | /* Stop the timer in case it's a reset to an active apic */ |
2693 | hrtimer_cancel(timer: &apic->lapic_timer.timer); |
2694 | |
2695 | /* The xAPIC ID is set at RESET even if the APIC was already enabled. */ |
2696 | if (!init_event) |
2697 | kvm_apic_set_xapic_id(apic, id: vcpu->vcpu_id); |
2698 | kvm_apic_set_version(vcpu: apic->vcpu); |
2699 | |
2700 | for (i = 0; i < apic->nr_lvt_entries; i++) |
2701 | kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED); |
2702 | apic_update_lvtt(apic); |
2703 | if (kvm_vcpu_is_reset_bsp(vcpu) && |
2704 | kvm_check_has_quirk(kvm: vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) |
2705 | kvm_lapic_set_reg(apic, APIC_LVT0, |
2706 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); |
2707 | apic_manage_nmi_watchdog(apic, lvt0_val: kvm_lapic_get_reg(apic, APIC_LVT0)); |
2708 | |
2709 | kvm_apic_set_dfr(apic, val: 0xffffffffU); |
2710 | apic_set_spiv(apic, val: 0xff); |
2711 | kvm_lapic_set_reg(apic, APIC_TASKPRI, val: 0); |
2712 | if (!apic_x2apic_mode(apic)) |
2713 | kvm_apic_set_ldr(apic, id: 0); |
2714 | kvm_lapic_set_reg(apic, APIC_ESR, val: 0); |
2715 | if (!apic_x2apic_mode(apic)) { |
2716 | kvm_lapic_set_reg(apic, APIC_ICR, val: 0); |
2717 | kvm_lapic_set_reg(apic, APIC_ICR2, val: 0); |
2718 | } else { |
2719 | kvm_lapic_set_reg64(apic, APIC_ICR, val: 0); |
2720 | } |
2721 | kvm_lapic_set_reg(apic, APIC_TDCR, val: 0); |
2722 | kvm_lapic_set_reg(apic, APIC_TMICT, val: 0); |
2723 | for (i = 0; i < 8; i++) { |
2724 | kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, val: 0); |
2725 | kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, val: 0); |
2726 | kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, val: 0); |
2727 | } |
2728 | kvm_apic_update_apicv(vcpu); |
2729 | update_divide_count(apic); |
2730 | atomic_set(v: &apic->lapic_timer.pending, i: 0); |
2731 | |
2732 | vcpu->arch.pv_eoi.msr_val = 0; |
2733 | apic_update_ppr(apic); |
2734 | if (apic->apicv_active) { |
2735 | static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); |
2736 | static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); |
2737 | static_call_cond(kvm_x86_hwapic_isr_update)(-1); |
2738 | } |
2739 | |
2740 | vcpu->arch.apic_arb_prio = 0; |
2741 | vcpu->arch.apic_attention = 0; |
2742 | |
2743 | kvm_recalculate_apic_map(kvm: vcpu->kvm); |
2744 | } |
2745 | |
2746 | /* |
2747 | *---------------------------------------------------------------------- |
2748 | * timer interface |
2749 | *---------------------------------------------------------------------- |
2750 | */ |
2751 | |
2752 | static bool lapic_is_periodic(struct kvm_lapic *apic) |
2753 | { |
2754 | return apic_lvtt_period(apic); |
2755 | } |
2756 | |
2757 | int apic_has_pending_timer(struct kvm_vcpu *vcpu) |
2758 | { |
2759 | struct kvm_lapic *apic = vcpu->arch.apic; |
2760 | |
2761 | if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT)) |
2762 | return atomic_read(v: &apic->lapic_timer.pending); |
2763 | |
2764 | return 0; |
2765 | } |
2766 | |
2767 | int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) |
2768 | { |
2769 | u32 reg = kvm_lapic_get_reg(apic, reg_off: lvt_type); |
2770 | int vector, mode, trig_mode; |
2771 | int r; |
2772 | |
2773 | if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { |
2774 | vector = reg & APIC_VECTOR_MASK; |
2775 | mode = reg & APIC_MODE_MASK; |
2776 | trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; |
2777 | |
2778 | r = __apic_accept_irq(apic, delivery_mode: mode, vector, level: 1, trig_mode, NULL); |
2779 | if (r && lvt_type == APIC_LVTPC && |
2780 | guest_cpuid_is_intel_compatible(vcpu: apic->vcpu)) |
2781 | kvm_lapic_set_reg(apic, APIC_LVTPC, val: reg | APIC_LVT_MASKED); |
2782 | return r; |
2783 | } |
2784 | return 0; |
2785 | } |
2786 | |
2787 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) |
2788 | { |
2789 | struct kvm_lapic *apic = vcpu->arch.apic; |
2790 | |
2791 | if (apic) |
2792 | kvm_apic_local_deliver(apic, APIC_LVT0); |
2793 | } |
2794 | |
2795 | static const struct kvm_io_device_ops apic_mmio_ops = { |
2796 | .read = apic_mmio_read, |
2797 | .write = apic_mmio_write, |
2798 | }; |
2799 | |
2800 | static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) |
2801 | { |
2802 | struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); |
2803 | struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); |
2804 | |
2805 | apic_timer_expired(apic, from_timer_fn: true); |
2806 | |
2807 | if (lapic_is_periodic(apic)) { |
2808 | advance_periodic_target_expiration(apic); |
2809 | hrtimer_add_expires_ns(timer: &ktimer->timer, ns: ktimer->period); |
2810 | return HRTIMER_RESTART; |
2811 | } else |
2812 | return HRTIMER_NORESTART; |
2813 | } |
2814 | |
2815 | int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) |
2816 | { |
2817 | struct kvm_lapic *apic; |
2818 | |
2819 | ASSERT(vcpu != NULL); |
2820 | |
2821 | if (!irqchip_in_kernel(kvm: vcpu->kvm)) { |
2822 | static_branch_inc(&kvm_has_noapic_vcpu); |
2823 | return 0; |
2824 | } |
2825 | |
2826 | apic = kzalloc(size: sizeof(*apic), GFP_KERNEL_ACCOUNT); |
2827 | if (!apic) |
2828 | goto nomem; |
2829 | |
2830 | vcpu->arch.apic = apic; |
2831 | |
2832 | if (kvm_x86_ops.alloc_apic_backing_page) |
2833 | apic->regs = static_call(kvm_x86_alloc_apic_backing_page)(vcpu); |
2834 | else |
2835 | apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
2836 | if (!apic->regs) { |
2837 | printk(KERN_ERR "malloc apic regs error for vcpu %x\n" , |
2838 | vcpu->vcpu_id); |
2839 | goto nomem_free_apic; |
2840 | } |
2841 | apic->vcpu = vcpu; |
2842 | |
2843 | apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu); |
2844 | |
2845 | hrtimer_init(timer: &apic->lapic_timer.timer, CLOCK_MONOTONIC, |
2846 | mode: HRTIMER_MODE_ABS_HARD); |
2847 | apic->lapic_timer.timer.function = apic_timer_fn; |
2848 | if (timer_advance_ns == -1) { |
2849 | apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; |
2850 | lapic_timer_advance_dynamic = true; |
2851 | } else { |
2852 | apic->lapic_timer.timer_advance_ns = timer_advance_ns; |
2853 | lapic_timer_advance_dynamic = false; |
2854 | } |
2855 | |
2856 | /* |
2857 | * Stuff the APIC ENABLE bit in lieu of temporarily incrementing |
2858 | * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset(). |
2859 | */ |
2860 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; |
2861 | static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */ |
2862 | kvm_iodevice_init(dev: &apic->dev, ops: &apic_mmio_ops); |
2863 | |
2864 | /* |
2865 | * Defer evaluating inhibits until the vCPU is first run, as this vCPU |
2866 | * will not get notified of any changes until this vCPU is visible to |
2867 | * other vCPUs (marked online and added to the set of vCPUs). |
2868 | * |
2869 | * Opportunistically mark APICv active as VMX in particularly is highly |
2870 | * unlikely to have inhibits. Ignore the current per-VM APICv state so |
2871 | * that vCPU creation is guaranteed to run with a deterministic value, |
2872 | * the request will ensure the vCPU gets the correct state before VM-Entry. |
2873 | */ |
2874 | if (enable_apicv) { |
2875 | apic->apicv_active = true; |
2876 | kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); |
2877 | } |
2878 | |
2879 | return 0; |
2880 | nomem_free_apic: |
2881 | kfree(objp: apic); |
2882 | vcpu->arch.apic = NULL; |
2883 | nomem: |
2884 | return -ENOMEM; |
2885 | } |
2886 | |
2887 | int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) |
2888 | { |
2889 | struct kvm_lapic *apic = vcpu->arch.apic; |
2890 | u32 ppr; |
2891 | |
2892 | if (!kvm_apic_present(vcpu)) |
2893 | return -1; |
2894 | |
2895 | __apic_update_ppr(apic, new_ppr: &ppr); |
2896 | return apic_has_interrupt_for_ppr(apic, ppr); |
2897 | } |
2898 | EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt); |
2899 | |
2900 | int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) |
2901 | { |
2902 | u32 lvt0 = kvm_lapic_get_reg(apic: vcpu->arch.apic, APIC_LVT0); |
2903 | |
2904 | if (!kvm_apic_hw_enabled(apic: vcpu->arch.apic)) |
2905 | return 1; |
2906 | if ((lvt0 & APIC_LVT_MASKED) == 0 && |
2907 | GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) |
2908 | return 1; |
2909 | return 0; |
2910 | } |
2911 | |
2912 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) |
2913 | { |
2914 | struct kvm_lapic *apic = vcpu->arch.apic; |
2915 | |
2916 | if (atomic_read(v: &apic->lapic_timer.pending) > 0) { |
2917 | kvm_apic_inject_pending_timer_irqs(apic); |
2918 | atomic_set(v: &apic->lapic_timer.pending, i: 0); |
2919 | } |
2920 | } |
2921 | |
2922 | int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) |
2923 | { |
2924 | int vector = kvm_apic_has_interrupt(vcpu); |
2925 | struct kvm_lapic *apic = vcpu->arch.apic; |
2926 | u32 ppr; |
2927 | |
2928 | if (vector == -1) |
2929 | return -1; |
2930 | |
2931 | /* |
2932 | * We get here even with APIC virtualization enabled, if doing |
2933 | * nested virtualization and L1 runs with the "acknowledge interrupt |
2934 | * on exit" mode. Then we cannot inject the interrupt via RVI, |
2935 | * because the process would deliver it through the IDT. |
2936 | */ |
2937 | |
2938 | apic_clear_irr(vec: vector, apic); |
2939 | if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) { |
2940 | /* |
2941 | * For auto-EOI interrupts, there might be another pending |
2942 | * interrupt above PPR, so check whether to raise another |
2943 | * KVM_REQ_EVENT. |
2944 | */ |
2945 | apic_update_ppr(apic); |
2946 | } else { |
2947 | /* |
2948 | * For normal interrupts, PPR has been raised and there cannot |
2949 | * be a higher-priority pending interrupt---except if there was |
2950 | * a concurrent interrupt injection, but that would have |
2951 | * triggered KVM_REQ_EVENT already. |
2952 | */ |
2953 | apic_set_isr(vec: vector, apic); |
2954 | __apic_update_ppr(apic, new_ppr: &ppr); |
2955 | } |
2956 | |
2957 | return vector; |
2958 | } |
2959 | |
2960 | static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, |
2961 | struct kvm_lapic_state *s, bool set) |
2962 | { |
2963 | if (apic_x2apic_mode(apic: vcpu->arch.apic)) { |
2964 | u32 *id = (u32 *)(s->regs + APIC_ID); |
2965 | u32 *ldr = (u32 *)(s->regs + APIC_LDR); |
2966 | u64 icr; |
2967 | |
2968 | if (vcpu->kvm->arch.x2apic_format) { |
2969 | if (*id != vcpu->vcpu_id) |
2970 | return -EINVAL; |
2971 | } else { |
2972 | if (set) |
2973 | *id >>= 24; |
2974 | else |
2975 | *id <<= 24; |
2976 | } |
2977 | |
2978 | /* |
2979 | * In x2APIC mode, the LDR is fixed and based on the id. And |
2980 | * ICR is internally a single 64-bit register, but needs to be |
2981 | * split to ICR+ICR2 in userspace for backwards compatibility. |
2982 | */ |
2983 | if (set) { |
2984 | *ldr = kvm_apic_calc_x2apic_ldr(id: *id); |
2985 | |
2986 | icr = __kvm_lapic_get_reg(regs: s->regs, APIC_ICR) | |
2987 | (u64)__kvm_lapic_get_reg(regs: s->regs, APIC_ICR2) << 32; |
2988 | __kvm_lapic_set_reg64(regs: s->regs, APIC_ICR, val: icr); |
2989 | } else { |
2990 | icr = __kvm_lapic_get_reg64(regs: s->regs, APIC_ICR); |
2991 | __kvm_lapic_set_reg(regs: s->regs, APIC_ICR2, val: icr >> 32); |
2992 | } |
2993 | } |
2994 | |
2995 | return 0; |
2996 | } |
2997 | |
2998 | int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) |
2999 | { |
3000 | memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s)); |
3001 | |
3002 | /* |
3003 | * Get calculated timer current count for remaining timer period (if |
3004 | * any) and store it in the returned register set. |
3005 | */ |
3006 | __kvm_lapic_set_reg(regs: s->regs, APIC_TMCCT, |
3007 | val: __apic_read(apic: vcpu->arch.apic, APIC_TMCCT)); |
3008 | |
3009 | return kvm_apic_state_fixup(vcpu, s, set: false); |
3010 | } |
3011 | |
3012 | int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) |
3013 | { |
3014 | struct kvm_lapic *apic = vcpu->arch.apic; |
3015 | int r; |
3016 | |
3017 | static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu); |
3018 | |
3019 | kvm_lapic_set_base(vcpu, value: vcpu->arch.apic_base); |
3020 | /* set SPIV separately to get count of SW disabled APICs right */ |
3021 | apic_set_spiv(apic, val: *((u32 *)(s->regs + APIC_SPIV))); |
3022 | |
3023 | r = kvm_apic_state_fixup(vcpu, s, set: true); |
3024 | if (r) { |
3025 | kvm_recalculate_apic_map(kvm: vcpu->kvm); |
3026 | return r; |
3027 | } |
3028 | memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); |
3029 | |
3030 | atomic_set_release(v: &apic->vcpu->kvm->arch.apic_map_dirty, i: DIRTY); |
3031 | kvm_recalculate_apic_map(kvm: vcpu->kvm); |
3032 | kvm_apic_set_version(vcpu); |
3033 | |
3034 | apic_update_ppr(apic); |
3035 | cancel_apic_timer(apic); |
3036 | apic->lapic_timer.expired_tscdeadline = 0; |
3037 | apic_update_lvtt(apic); |
3038 | apic_manage_nmi_watchdog(apic, lvt0_val: kvm_lapic_get_reg(apic, APIC_LVT0)); |
3039 | update_divide_count(apic); |
3040 | __start_apic_timer(apic, APIC_TMCCT); |
3041 | kvm_lapic_set_reg(apic, APIC_TMCCT, val: 0); |
3042 | kvm_apic_update_apicv(vcpu); |
3043 | if (apic->apicv_active) { |
3044 | static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); |
3045 | static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); |
3046 | static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); |
3047 | } |
3048 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
3049 | if (ioapic_in_kernel(kvm: vcpu->kvm)) |
3050 | kvm_rtc_eoi_tracking_restore_one(vcpu); |
3051 | |
3052 | vcpu->arch.apic_arb_prio = 0; |
3053 | |
3054 | return 0; |
3055 | } |
3056 | |
3057 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) |
3058 | { |
3059 | struct hrtimer *timer; |
3060 | |
3061 | if (!lapic_in_kernel(vcpu) || |
3062 | kvm_can_post_timer_interrupt(vcpu)) |
3063 | return; |
3064 | |
3065 | timer = &vcpu->arch.apic->lapic_timer.timer; |
3066 | if (hrtimer_cancel(timer)) |
3067 | hrtimer_start_expires(timer, mode: HRTIMER_MODE_ABS_HARD); |
3068 | } |
3069 | |
3070 | /* |
3071 | * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt |
3072 | * |
3073 | * Detect whether guest triggered PV EOI since the |
3074 | * last entry. If yes, set EOI on guests's behalf. |
3075 | * Clear PV EOI in guest memory in any case. |
3076 | */ |
3077 | static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, |
3078 | struct kvm_lapic *apic) |
3079 | { |
3080 | int vector; |
3081 | /* |
3082 | * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host |
3083 | * and KVM_PV_EOI_ENABLED in guest memory as follows: |
3084 | * |
3085 | * KVM_APIC_PV_EOI_PENDING is unset: |
3086 | * -> host disabled PV EOI. |
3087 | * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: |
3088 | * -> host enabled PV EOI, guest did not execute EOI yet. |
3089 | * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: |
3090 | * -> host enabled PV EOI, guest executed EOI. |
3091 | */ |
3092 | BUG_ON(!pv_eoi_enabled(vcpu)); |
3093 | |
3094 | if (pv_eoi_test_and_clr_pending(vcpu)) |
3095 | return; |
3096 | vector = apic_set_eoi(apic); |
3097 | trace_kvm_pv_eoi(apic, vector); |
3098 | } |
3099 | |
3100 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) |
3101 | { |
3102 | u32 data; |
3103 | |
3104 | if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) |
3105 | apic_sync_pv_eoi_from_guest(vcpu, apic: vcpu->arch.apic); |
3106 | |
3107 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) |
3108 | return; |
3109 | |
3110 | if (kvm_read_guest_cached(kvm: vcpu->kvm, ghc: &vcpu->arch.apic->vapic_cache, data: &data, |
3111 | len: sizeof(u32))) |
3112 | return; |
3113 | |
3114 | apic_set_tpr(apic: vcpu->arch.apic, tpr: data & 0xff); |
3115 | } |
3116 | |
3117 | /* |
3118 | * apic_sync_pv_eoi_to_guest - called before vmentry |
3119 | * |
3120 | * Detect whether it's safe to enable PV EOI and |
3121 | * if yes do so. |
3122 | */ |
3123 | static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, |
3124 | struct kvm_lapic *apic) |
3125 | { |
3126 | if (!pv_eoi_enabled(vcpu) || |
3127 | /* IRR set or many bits in ISR: could be nested. */ |
3128 | apic->irr_pending || |
3129 | /* Cache not set: could be safe but we don't bother. */ |
3130 | apic->highest_isr_cache == -1 || |
3131 | /* Need EOI to update ioapic. */ |
3132 | kvm_ioapic_handles_vector(apic, vector: apic->highest_isr_cache)) { |
3133 | /* |
3134 | * PV EOI was disabled by apic_sync_pv_eoi_from_guest |
3135 | * so we need not do anything here. |
3136 | */ |
3137 | return; |
3138 | } |
3139 | |
3140 | pv_eoi_set_pending(vcpu: apic->vcpu); |
3141 | } |
3142 | |
3143 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) |
3144 | { |
3145 | u32 data, tpr; |
3146 | int max_irr, max_isr; |
3147 | struct kvm_lapic *apic = vcpu->arch.apic; |
3148 | |
3149 | apic_sync_pv_eoi_to_guest(vcpu, apic); |
3150 | |
3151 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) |
3152 | return; |
3153 | |
3154 | tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff; |
3155 | max_irr = apic_find_highest_irr(apic); |
3156 | if (max_irr < 0) |
3157 | max_irr = 0; |
3158 | max_isr = apic_find_highest_isr(apic); |
3159 | if (max_isr < 0) |
3160 | max_isr = 0; |
3161 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); |
3162 | |
3163 | kvm_write_guest_cached(kvm: vcpu->kvm, ghc: &vcpu->arch.apic->vapic_cache, data: &data, |
3164 | len: sizeof(u32)); |
3165 | } |
3166 | |
3167 | int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) |
3168 | { |
3169 | if (vapic_addr) { |
3170 | if (kvm_gfn_to_hva_cache_init(kvm: vcpu->kvm, |
3171 | ghc: &vcpu->arch.apic->vapic_cache, |
3172 | gpa: vapic_addr, len: sizeof(u32))) |
3173 | return -EINVAL; |
3174 | __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
3175 | } else { |
3176 | __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
3177 | } |
3178 | |
3179 | vcpu->arch.apic->vapic_addr = vapic_addr; |
3180 | return 0; |
3181 | } |
3182 | |
3183 | int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) |
3184 | { |
3185 | data &= ~APIC_ICR_BUSY; |
3186 | |
3187 | kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); |
3188 | kvm_lapic_set_reg64(apic, APIC_ICR, val: data); |
3189 | trace_kvm_apic_write(APIC_ICR, data); |
3190 | return 0; |
3191 | } |
3192 | |
3193 | static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) |
3194 | { |
3195 | u32 low; |
3196 | |
3197 | if (reg == APIC_ICR) { |
3198 | *data = kvm_lapic_get_reg64(apic, APIC_ICR); |
3199 | return 0; |
3200 | } |
3201 | |
3202 | if (kvm_lapic_reg_read(apic, offset: reg, len: 4, data: &low)) |
3203 | return 1; |
3204 | |
3205 | *data = low; |
3206 | |
3207 | return 0; |
3208 | } |
3209 | |
3210 | static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data) |
3211 | { |
3212 | /* |
3213 | * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and |
3214 | * can be written as such, all other registers remain accessible only |
3215 | * through 32-bit reads/writes. |
3216 | */ |
3217 | if (reg == APIC_ICR) |
3218 | return kvm_x2apic_icr_write(apic, data); |
3219 | |
3220 | /* Bits 63:32 are reserved in all other registers. */ |
3221 | if (data >> 32) |
3222 | return 1; |
3223 | |
3224 | return kvm_lapic_reg_write(apic, reg, val: (u32)data); |
3225 | } |
3226 | |
3227 | int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
3228 | { |
3229 | struct kvm_lapic *apic = vcpu->arch.apic; |
3230 | u32 reg = (msr - APIC_BASE_MSR) << 4; |
3231 | |
3232 | if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) |
3233 | return 1; |
3234 | |
3235 | return kvm_lapic_msr_write(apic, reg, data); |
3236 | } |
3237 | |
3238 | int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
3239 | { |
3240 | struct kvm_lapic *apic = vcpu->arch.apic; |
3241 | u32 reg = (msr - APIC_BASE_MSR) << 4; |
3242 | |
3243 | if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) |
3244 | return 1; |
3245 | |
3246 | return kvm_lapic_msr_read(apic, reg, data); |
3247 | } |
3248 | |
3249 | int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) |
3250 | { |
3251 | if (!lapic_in_kernel(vcpu)) |
3252 | return 1; |
3253 | |
3254 | return kvm_lapic_msr_write(apic: vcpu->arch.apic, reg, data); |
3255 | } |
3256 | |
3257 | int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) |
3258 | { |
3259 | if (!lapic_in_kernel(vcpu)) |
3260 | return 1; |
3261 | |
3262 | return kvm_lapic_msr_read(apic: vcpu->arch.apic, reg, data); |
3263 | } |
3264 | |
3265 | int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) |
3266 | { |
3267 | u64 addr = data & ~KVM_MSR_ENABLED; |
3268 | struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; |
3269 | unsigned long new_len; |
3270 | int ret; |
3271 | |
3272 | if (!IS_ALIGNED(addr, 4)) |
3273 | return 1; |
3274 | |
3275 | if (data & KVM_MSR_ENABLED) { |
3276 | if (addr == ghc->gpa && len <= ghc->len) |
3277 | new_len = ghc->len; |
3278 | else |
3279 | new_len = len; |
3280 | |
3281 | ret = kvm_gfn_to_hva_cache_init(kvm: vcpu->kvm, ghc, gpa: addr, len: new_len); |
3282 | if (ret) |
3283 | return ret; |
3284 | } |
3285 | |
3286 | vcpu->arch.pv_eoi.msr_val = data; |
3287 | |
3288 | return 0; |
3289 | } |
3290 | |
3291 | int kvm_apic_accept_events(struct kvm_vcpu *vcpu) |
3292 | { |
3293 | struct kvm_lapic *apic = vcpu->arch.apic; |
3294 | u8 sipi_vector; |
3295 | int r; |
3296 | |
3297 | if (!kvm_apic_has_pending_init_or_sipi(vcpu)) |
3298 | return 0; |
3299 | |
3300 | if (is_guest_mode(vcpu)) { |
3301 | r = kvm_check_nested_events(vcpu); |
3302 | if (r < 0) |
3303 | return r == -EBUSY ? 0 : r; |
3304 | /* |
3305 | * Continue processing INIT/SIPI even if a nested VM-Exit |
3306 | * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI |
3307 | * are blocked as a result of transitioning to VMX root mode. |
3308 | */ |
3309 | } |
3310 | |
3311 | /* |
3312 | * INITs are blocked while CPU is in specific states (SMM, VMX root |
3313 | * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in |
3314 | * wait-for-SIPI (WFS). |
3315 | */ |
3316 | if (!kvm_apic_init_sipi_allowed(vcpu)) { |
3317 | WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); |
3318 | clear_bit(KVM_APIC_SIPI, addr: &apic->pending_events); |
3319 | return 0; |
3320 | } |
3321 | |
3322 | if (test_and_clear_bit(KVM_APIC_INIT, addr: &apic->pending_events)) { |
3323 | kvm_vcpu_reset(vcpu, init_event: true); |
3324 | if (kvm_vcpu_is_bsp(vcpu: apic->vcpu)) |
3325 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
3326 | else |
3327 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; |
3328 | } |
3329 | if (test_and_clear_bit(KVM_APIC_SIPI, addr: &apic->pending_events)) { |
3330 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
3331 | /* evaluate pending_events before reading the vector */ |
3332 | smp_rmb(); |
3333 | sipi_vector = apic->sipi_vector; |
3334 | static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector); |
3335 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
3336 | } |
3337 | } |
3338 | return 0; |
3339 | } |
3340 | |
3341 | void kvm_lapic_exit(void) |
3342 | { |
3343 | static_key_deferred_flush(&apic_hw_disabled); |
3344 | WARN_ON(static_branch_unlikely(&apic_hw_disabled.key)); |
3345 | static_key_deferred_flush(&apic_sw_disabled); |
3346 | WARN_ON(static_branch_unlikely(&apic_sw_disabled.key)); |
3347 | } |
3348 | |