1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* KVM paravirtual clock driver. A clocksource implementation |
3 | Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc. |
4 | */ |
5 | |
6 | #include <linux/clocksource.h> |
7 | #include <linux/kvm_para.h> |
8 | #include <asm/pvclock.h> |
9 | #include <asm/msr.h> |
10 | #include <asm/apic.h> |
11 | #include <linux/percpu.h> |
12 | #include <linux/hardirq.h> |
13 | #include <linux/cpuhotplug.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/sched/clock.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/set_memory.h> |
19 | |
20 | #include <asm/hypervisor.h> |
21 | #include <asm/mem_encrypt.h> |
22 | #include <asm/x86_init.h> |
23 | #include <asm/reboot.h> |
24 | #include <asm/kvmclock.h> |
25 | |
26 | static int kvmclock __initdata = 1; |
27 | static int kvmclock_vsyscall __initdata = 1; |
28 | static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; |
29 | static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; |
30 | static u64 kvm_sched_clock_offset __ro_after_init; |
31 | |
32 | static int __init parse_no_kvmclock(char *arg) |
33 | { |
34 | kvmclock = 0; |
35 | return 0; |
36 | } |
37 | early_param("no-kvmclock" , parse_no_kvmclock); |
38 | |
39 | static int __init parse_no_kvmclock_vsyscall(char *arg) |
40 | { |
41 | kvmclock_vsyscall = 0; |
42 | return 0; |
43 | } |
44 | early_param("no-kvmclock-vsyscall" , parse_no_kvmclock_vsyscall); |
45 | |
46 | /* Aligned to page sizes to match whats mapped via vsyscalls to userspace */ |
47 | #define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS) |
48 | #define HVC_BOOT_ARRAY_SIZE \ |
49 | (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) |
50 | |
51 | static struct pvclock_vsyscall_time_info |
52 | hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); |
53 | static struct pvclock_wall_clock wall_clock __bss_decrypted; |
54 | static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); |
55 | static struct pvclock_vsyscall_time_info *hvclock_mem; |
56 | |
57 | static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) |
58 | { |
59 | return &this_cpu_read(hv_clock_per_cpu)->pvti; |
60 | } |
61 | |
62 | static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) |
63 | { |
64 | return this_cpu_read(hv_clock_per_cpu); |
65 | } |
66 | |
67 | /* |
68 | * The wallclock is the time of day when we booted. Since then, some time may |
69 | * have elapsed since the hypervisor wrote the data. So we try to account for |
70 | * that with system time |
71 | */ |
72 | static void kvm_get_wallclock(struct timespec64 *now) |
73 | { |
74 | wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); |
75 | preempt_disable(); |
76 | pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now); |
77 | preempt_enable(); |
78 | } |
79 | |
80 | static int kvm_set_wallclock(const struct timespec64 *now) |
81 | { |
82 | return -ENODEV; |
83 | } |
84 | |
85 | static u64 kvm_clock_read(void) |
86 | { |
87 | u64 ret; |
88 | |
89 | preempt_disable_notrace(); |
90 | ret = pvclock_clocksource_read(this_cpu_pvti()); |
91 | preempt_enable_notrace(); |
92 | return ret; |
93 | } |
94 | |
95 | static u64 kvm_clock_get_cycles(struct clocksource *cs) |
96 | { |
97 | return kvm_clock_read(); |
98 | } |
99 | |
100 | static u64 kvm_sched_clock_read(void) |
101 | { |
102 | return kvm_clock_read() - kvm_sched_clock_offset; |
103 | } |
104 | |
105 | static inline void kvm_sched_clock_init(bool stable) |
106 | { |
107 | if (!stable) |
108 | clear_sched_clock_stable(); |
109 | kvm_sched_clock_offset = kvm_clock_read(); |
110 | pv_ops.time.sched_clock = kvm_sched_clock_read; |
111 | |
112 | pr_info("kvm-clock: using sched offset of %llu cycles" , |
113 | kvm_sched_clock_offset); |
114 | |
115 | BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) > |
116 | sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time)); |
117 | } |
118 | |
119 | /* |
120 | * If we don't do that, there is the possibility that the guest |
121 | * will calibrate under heavy load - thus, getting a lower lpj - |
122 | * and execute the delays themselves without load. This is wrong, |
123 | * because no delay loop can finish beforehand. |
124 | * Any heuristics is subject to fail, because ultimately, a large |
125 | * poll of guests can be running and trouble each other. So we preset |
126 | * lpj here |
127 | */ |
128 | static unsigned long kvm_get_tsc_khz(void) |
129 | { |
130 | setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); |
131 | return pvclock_tsc_khz(this_cpu_pvti()); |
132 | } |
133 | |
134 | static void __init kvm_get_preset_lpj(void) |
135 | { |
136 | unsigned long khz; |
137 | u64 lpj; |
138 | |
139 | khz = kvm_get_tsc_khz(); |
140 | |
141 | lpj = ((u64)khz * 1000); |
142 | do_div(lpj, HZ); |
143 | preset_lpj = lpj; |
144 | } |
145 | |
146 | bool kvm_check_and_clear_guest_paused(void) |
147 | { |
148 | struct pvclock_vsyscall_time_info *src = this_cpu_hvclock(); |
149 | bool ret = false; |
150 | |
151 | if (!src) |
152 | return ret; |
153 | |
154 | if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) { |
155 | src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED; |
156 | pvclock_touch_watchdogs(); |
157 | ret = true; |
158 | } |
159 | return ret; |
160 | } |
161 | |
162 | struct clocksource kvm_clock = { |
163 | .name = "kvm-clock" , |
164 | .read = kvm_clock_get_cycles, |
165 | .rating = 400, |
166 | .mask = CLOCKSOURCE_MASK(64), |
167 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
168 | }; |
169 | EXPORT_SYMBOL_GPL(kvm_clock); |
170 | |
171 | static void kvm_register_clock(char *txt) |
172 | { |
173 | struct pvclock_vsyscall_time_info *src = this_cpu_hvclock(); |
174 | u64 pa; |
175 | |
176 | if (!src) |
177 | return; |
178 | |
179 | pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; |
180 | wrmsrl(msr_kvm_system_time, pa); |
181 | pr_info("kvm-clock: cpu %d, msr %llx, %s" , smp_processor_id(), pa, txt); |
182 | } |
183 | |
184 | static void kvm_save_sched_clock_state(void) |
185 | { |
186 | } |
187 | |
188 | static void kvm_restore_sched_clock_state(void) |
189 | { |
190 | kvm_register_clock("primary cpu clock, resume" ); |
191 | } |
192 | |
193 | #ifdef CONFIG_X86_LOCAL_APIC |
194 | static void kvm_setup_secondary_clock(void) |
195 | { |
196 | kvm_register_clock("secondary cpu clock" ); |
197 | } |
198 | #endif |
199 | |
200 | /* |
201 | * After the clock is registered, the host will keep writing to the |
202 | * registered memory location. If the guest happens to shutdown, this memory |
203 | * won't be valid. In cases like kexec, in which you install a new kernel, this |
204 | * means a random memory location will be kept being written. So before any |
205 | * kind of shutdown from our side, we unregister the clock by writing anything |
206 | * that does not have the 'enable' bit set in the msr |
207 | */ |
208 | #ifdef CONFIG_KEXEC_CORE |
209 | static void kvm_crash_shutdown(struct pt_regs *regs) |
210 | { |
211 | native_write_msr(msr_kvm_system_time, 0, 0); |
212 | kvm_disable_steal_time(); |
213 | native_machine_crash_shutdown(regs); |
214 | } |
215 | #endif |
216 | |
217 | static void kvm_shutdown(void) |
218 | { |
219 | native_write_msr(msr_kvm_system_time, 0, 0); |
220 | kvm_disable_steal_time(); |
221 | native_machine_shutdown(); |
222 | } |
223 | |
224 | static void __init kvmclock_init_mem(void) |
225 | { |
226 | unsigned long ncpus; |
227 | unsigned int order; |
228 | struct page *p; |
229 | int r; |
230 | |
231 | if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) |
232 | return; |
233 | |
234 | ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; |
235 | order = get_order(ncpus * sizeof(*hvclock_mem)); |
236 | |
237 | p = alloc_pages(GFP_KERNEL, order); |
238 | if (!p) { |
239 | pr_warn("%s: failed to alloc %d pages" , __func__, (1U << order)); |
240 | return; |
241 | } |
242 | |
243 | hvclock_mem = page_address(p); |
244 | |
245 | /* |
246 | * hvclock is shared between the guest and the hypervisor, must |
247 | * be mapped decrypted. |
248 | */ |
249 | if (sev_active()) { |
250 | r = set_memory_decrypted((unsigned long) hvclock_mem, |
251 | 1UL << order); |
252 | if (r) { |
253 | __free_pages(p, order); |
254 | hvclock_mem = NULL; |
255 | pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n" ); |
256 | return; |
257 | } |
258 | } |
259 | |
260 | memset(hvclock_mem, 0, PAGE_SIZE << order); |
261 | } |
262 | |
263 | static int __init kvm_setup_vsyscall_timeinfo(void) |
264 | { |
265 | #ifdef CONFIG_X86_64 |
266 | u8 flags; |
267 | |
268 | if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall) |
269 | return 0; |
270 | |
271 | flags = pvclock_read_flags(&hv_clock_boot[0].pvti); |
272 | if (!(flags & PVCLOCK_TSC_STABLE_BIT)) |
273 | return 0; |
274 | |
275 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; |
276 | #endif |
277 | |
278 | kvmclock_init_mem(); |
279 | |
280 | return 0; |
281 | } |
282 | early_initcall(kvm_setup_vsyscall_timeinfo); |
283 | |
284 | static int kvmclock_setup_percpu(unsigned int cpu) |
285 | { |
286 | struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu); |
287 | |
288 | /* |
289 | * The per cpu area setup replicates CPU0 data to all cpu |
290 | * pointers. So carefully check. CPU0 has been set up in init |
291 | * already. |
292 | */ |
293 | if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0))) |
294 | return 0; |
295 | |
296 | /* Use the static page for the first CPUs, allocate otherwise */ |
297 | if (cpu < HVC_BOOT_ARRAY_SIZE) |
298 | p = &hv_clock_boot[cpu]; |
299 | else if (hvclock_mem) |
300 | p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; |
301 | else |
302 | return -ENOMEM; |
303 | |
304 | per_cpu(hv_clock_per_cpu, cpu) = p; |
305 | return p ? 0 : -ENOMEM; |
306 | } |
307 | |
308 | void __init kvmclock_init(void) |
309 | { |
310 | u8 flags; |
311 | |
312 | if (!kvm_para_available() || !kvmclock) |
313 | return; |
314 | |
315 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { |
316 | msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; |
317 | msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; |
318 | } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { |
319 | return; |
320 | } |
321 | |
322 | if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu" , |
323 | kvmclock_setup_percpu, NULL) < 0) { |
324 | return; |
325 | } |
326 | |
327 | pr_info("kvm-clock: Using msrs %x and %x" , |
328 | msr_kvm_system_time, msr_kvm_wall_clock); |
329 | |
330 | this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]); |
331 | kvm_register_clock("primary cpu clock" ); |
332 | pvclock_set_pvti_cpu0_va(hv_clock_boot); |
333 | |
334 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) |
335 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); |
336 | |
337 | flags = pvclock_read_flags(&hv_clock_boot[0].pvti); |
338 | kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT); |
339 | |
340 | x86_platform.calibrate_tsc = kvm_get_tsc_khz; |
341 | x86_platform.calibrate_cpu = kvm_get_tsc_khz; |
342 | x86_platform.get_wallclock = kvm_get_wallclock; |
343 | x86_platform.set_wallclock = kvm_set_wallclock; |
344 | #ifdef CONFIG_X86_LOCAL_APIC |
345 | x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock; |
346 | #endif |
347 | x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; |
348 | x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; |
349 | machine_ops.shutdown = kvm_shutdown; |
350 | #ifdef CONFIG_KEXEC_CORE |
351 | machine_ops.crash_shutdown = kvm_crash_shutdown; |
352 | #endif |
353 | kvm_get_preset_lpj(); |
354 | |
355 | /* |
356 | * X86_FEATURE_NONSTOP_TSC is TSC runs at constant rate |
357 | * with P/T states and does not stop in deep C-states. |
358 | * |
359 | * Invariant TSC exposed by host means kvmclock is not necessary: |
360 | * can use TSC as clocksource. |
361 | * |
362 | */ |
363 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && |
364 | boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && |
365 | !check_tsc_unstable()) |
366 | kvm_clock.rating = 299; |
367 | |
368 | clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); |
369 | pv_info.name = "KVM" ; |
370 | } |
371 | |