1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
5 | */ |
6 | |
7 | #include <linux/bug.h> |
8 | #include <linux/cpu_pm.h> |
9 | #include <linux/entry-kvm.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/err.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/list.h> |
14 | #include <linux/module.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/mman.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/kvm.h> |
20 | #include <linux/kvm_irqfd.h> |
21 | #include <linux/irqbypass.h> |
22 | #include <linux/sched/stat.h> |
23 | #include <linux/psci.h> |
24 | #include <trace/events/kvm.h> |
25 | |
26 | #define CREATE_TRACE_POINTS |
27 | #include "trace_arm.h" |
28 | |
29 | #include <linux/uaccess.h> |
30 | #include <asm/ptrace.h> |
31 | #include <asm/mman.h> |
32 | #include <asm/tlbflush.h> |
33 | #include <asm/cacheflush.h> |
34 | #include <asm/cpufeature.h> |
35 | #include <asm/virt.h> |
36 | #include <asm/kvm_arm.h> |
37 | #include <asm/kvm_asm.h> |
38 | #include <asm/kvm_mmu.h> |
39 | #include <asm/kvm_nested.h> |
40 | #include <asm/kvm_pkvm.h> |
41 | #include <asm/kvm_emulate.h> |
42 | #include <asm/sections.h> |
43 | |
44 | #include <kvm/arm_hypercalls.h> |
45 | #include <kvm/arm_pmu.h> |
46 | #include <kvm/arm_psci.h> |
47 | |
48 | static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; |
49 | |
50 | DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); |
51 | |
52 | DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
53 | DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); |
54 | |
55 | DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); |
56 | |
57 | static bool vgic_present, kvm_arm_initialised; |
58 | |
59 | static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); |
60 | DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
61 | |
62 | bool is_kvm_arm_initialised(void) |
63 | { |
64 | return kvm_arm_initialised; |
65 | } |
66 | |
67 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
68 | { |
69 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
70 | } |
71 | |
72 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
73 | struct kvm_enable_cap *cap) |
74 | { |
75 | int r; |
76 | u64 new_cap; |
77 | |
78 | if (cap->flags) |
79 | return -EINVAL; |
80 | |
81 | switch (cap->cap) { |
82 | case KVM_CAP_ARM_NISV_TO_USER: |
83 | r = 0; |
84 | set_bit(nr: KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, |
85 | addr: &kvm->arch.flags); |
86 | break; |
87 | case KVM_CAP_ARM_MTE: |
88 | mutex_lock(&kvm->lock); |
89 | if (!system_supports_mte() || kvm->created_vcpus) { |
90 | r = -EINVAL; |
91 | } else { |
92 | r = 0; |
93 | set_bit(nr: KVM_ARCH_FLAG_MTE_ENABLED, addr: &kvm->arch.flags); |
94 | } |
95 | mutex_unlock(lock: &kvm->lock); |
96 | break; |
97 | case KVM_CAP_ARM_SYSTEM_SUSPEND: |
98 | r = 0; |
99 | set_bit(nr: KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, addr: &kvm->arch.flags); |
100 | break; |
101 | case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: |
102 | new_cap = cap->args[0]; |
103 | |
104 | mutex_lock(&kvm->slots_lock); |
105 | /* |
106 | * To keep things simple, allow changing the chunk |
107 | * size only when no memory slots have been created. |
108 | */ |
109 | if (!kvm_are_all_memslots_empty(kvm)) { |
110 | r = -EINVAL; |
111 | } else if (new_cap && !kvm_is_block_size_supported(new_cap)) { |
112 | r = -EINVAL; |
113 | } else { |
114 | r = 0; |
115 | kvm->arch.mmu.split_page_chunk_size = new_cap; |
116 | } |
117 | mutex_unlock(lock: &kvm->slots_lock); |
118 | break; |
119 | default: |
120 | r = -EINVAL; |
121 | break; |
122 | } |
123 | |
124 | return r; |
125 | } |
126 | |
127 | static int kvm_arm_default_max_vcpus(void) |
128 | { |
129 | return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; |
130 | } |
131 | |
132 | /** |
133 | * kvm_arch_init_vm - initializes a VM data structure |
134 | * @kvm: pointer to the KVM struct |
135 | */ |
136 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
137 | { |
138 | int ret; |
139 | |
140 | mutex_init(&kvm->arch.config_lock); |
141 | |
142 | #ifdef CONFIG_LOCKDEP |
143 | /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ |
144 | mutex_lock(&kvm->lock); |
145 | mutex_lock(&kvm->arch.config_lock); |
146 | mutex_unlock(lock: &kvm->arch.config_lock); |
147 | mutex_unlock(lock: &kvm->lock); |
148 | #endif |
149 | |
150 | ret = kvm_share_hyp(kvm, kvm + 1); |
151 | if (ret) |
152 | return ret; |
153 | |
154 | ret = pkvm_init_host_vm(kvm); |
155 | if (ret) |
156 | goto err_unshare_kvm; |
157 | |
158 | if (!zalloc_cpumask_var(mask: &kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { |
159 | ret = -ENOMEM; |
160 | goto err_unshare_kvm; |
161 | } |
162 | cpumask_copy(dstp: kvm->arch.supported_cpus, cpu_possible_mask); |
163 | |
164 | ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); |
165 | if (ret) |
166 | goto err_free_cpumask; |
167 | |
168 | kvm_vgic_early_init(kvm); |
169 | |
170 | kvm_timer_init_vm(kvm); |
171 | |
172 | /* The maximum number of VCPUs is limited by the host's GIC model */ |
173 | kvm->max_vcpus = kvm_arm_default_max_vcpus(); |
174 | |
175 | kvm_arm_init_hypercalls(kvm); |
176 | |
177 | bitmap_zero(dst: kvm->arch.vcpu_features, nbits: KVM_VCPU_MAX_FEATURES); |
178 | |
179 | return 0; |
180 | |
181 | err_free_cpumask: |
182 | free_cpumask_var(mask: kvm->arch.supported_cpus); |
183 | err_unshare_kvm: |
184 | kvm_unshare_hyp(kvm, kvm + 1); |
185 | return ret; |
186 | } |
187 | |
188 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
189 | { |
190 | return VM_FAULT_SIGBUS; |
191 | } |
192 | |
193 | void kvm_arch_create_vm_debugfs(struct kvm *kvm) |
194 | { |
195 | kvm_sys_regs_create_debugfs(kvm); |
196 | } |
197 | |
198 | /** |
199 | * kvm_arch_destroy_vm - destroy the VM data structure |
200 | * @kvm: pointer to the KVM struct |
201 | */ |
202 | void kvm_arch_destroy_vm(struct kvm *kvm) |
203 | { |
204 | bitmap_free(bitmap: kvm->arch.pmu_filter); |
205 | free_cpumask_var(mask: kvm->arch.supported_cpus); |
206 | |
207 | kvm_vgic_destroy(kvm); |
208 | |
209 | if (is_protected_kvm_enabled()) |
210 | pkvm_destroy_hyp_vm(kvm); |
211 | |
212 | kfree(objp: kvm->arch.mpidr_data); |
213 | kfree(objp: kvm->arch.sysreg_masks); |
214 | kvm_destroy_vcpus(kvm); |
215 | |
216 | kvm_unshare_hyp(kvm, kvm + 1); |
217 | |
218 | kvm_arm_teardown_hypercalls(kvm); |
219 | } |
220 | |
221 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
222 | { |
223 | int r; |
224 | switch (ext) { |
225 | case KVM_CAP_IRQCHIP: |
226 | r = vgic_present; |
227 | break; |
228 | case KVM_CAP_IOEVENTFD: |
229 | case KVM_CAP_USER_MEMORY: |
230 | case KVM_CAP_SYNC_MMU: |
231 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
232 | case KVM_CAP_ONE_REG: |
233 | case KVM_CAP_ARM_PSCI: |
234 | case KVM_CAP_ARM_PSCI_0_2: |
235 | case KVM_CAP_READONLY_MEM: |
236 | case KVM_CAP_MP_STATE: |
237 | case KVM_CAP_IMMEDIATE_EXIT: |
238 | case KVM_CAP_VCPU_EVENTS: |
239 | case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: |
240 | case KVM_CAP_ARM_NISV_TO_USER: |
241 | case KVM_CAP_ARM_INJECT_EXT_DABT: |
242 | case KVM_CAP_SET_GUEST_DEBUG: |
243 | case KVM_CAP_VCPU_ATTRIBUTES: |
244 | case KVM_CAP_PTP_KVM: |
245 | case KVM_CAP_ARM_SYSTEM_SUSPEND: |
246 | case KVM_CAP_IRQFD_RESAMPLE: |
247 | case KVM_CAP_COUNTER_OFFSET: |
248 | r = 1; |
249 | break; |
250 | case KVM_CAP_SET_GUEST_DEBUG2: |
251 | return KVM_GUESTDBG_VALID_MASK; |
252 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
253 | r = 1; |
254 | break; |
255 | case KVM_CAP_NR_VCPUS: |
256 | /* |
257 | * ARM64 treats KVM_CAP_NR_CPUS differently from all other |
258 | * architectures, as it does not always bound it to |
259 | * KVM_CAP_MAX_VCPUS. It should not matter much because |
260 | * this is just an advisory value. |
261 | */ |
262 | r = min_t(unsigned int, num_online_cpus(), |
263 | kvm_arm_default_max_vcpus()); |
264 | break; |
265 | case KVM_CAP_MAX_VCPUS: |
266 | case KVM_CAP_MAX_VCPU_ID: |
267 | if (kvm) |
268 | r = kvm->max_vcpus; |
269 | else |
270 | r = kvm_arm_default_max_vcpus(); |
271 | break; |
272 | case KVM_CAP_MSI_DEVID: |
273 | if (!kvm) |
274 | r = -EINVAL; |
275 | else |
276 | r = kvm->arch.vgic.msis_require_devid; |
277 | break; |
278 | case KVM_CAP_ARM_USER_IRQ: |
279 | /* |
280 | * 1: EL1_VTIMER, EL1_PTIMER, and PMU. |
281 | * (bump this number if adding more devices) |
282 | */ |
283 | r = 1; |
284 | break; |
285 | case KVM_CAP_ARM_MTE: |
286 | r = system_supports_mte(); |
287 | break; |
288 | case KVM_CAP_STEAL_TIME: |
289 | r = kvm_arm_pvtime_supported(); |
290 | break; |
291 | case KVM_CAP_ARM_EL1_32BIT: |
292 | r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1); |
293 | break; |
294 | case KVM_CAP_GUEST_DEBUG_HW_BPS: |
295 | r = get_num_brps(); |
296 | break; |
297 | case KVM_CAP_GUEST_DEBUG_HW_WPS: |
298 | r = get_num_wrps(); |
299 | break; |
300 | case KVM_CAP_ARM_PMU_V3: |
301 | r = kvm_arm_support_pmu_v3(); |
302 | break; |
303 | case KVM_CAP_ARM_INJECT_SERROR_ESR: |
304 | r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN); |
305 | break; |
306 | case KVM_CAP_ARM_VM_IPA_SIZE: |
307 | r = get_kvm_ipa_limit(); |
308 | break; |
309 | case KVM_CAP_ARM_SVE: |
310 | r = system_supports_sve(); |
311 | break; |
312 | case KVM_CAP_ARM_PTRAUTH_ADDRESS: |
313 | case KVM_CAP_ARM_PTRAUTH_GENERIC: |
314 | r = system_has_full_ptr_auth(); |
315 | break; |
316 | case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: |
317 | if (kvm) |
318 | r = kvm->arch.mmu.split_page_chunk_size; |
319 | else |
320 | r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; |
321 | break; |
322 | case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES: |
323 | r = kvm_supported_block_sizes(); |
324 | break; |
325 | case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: |
326 | r = BIT(0); |
327 | break; |
328 | default: |
329 | r = 0; |
330 | } |
331 | |
332 | return r; |
333 | } |
334 | |
335 | long kvm_arch_dev_ioctl(struct file *filp, |
336 | unsigned int ioctl, unsigned long arg) |
337 | { |
338 | return -EINVAL; |
339 | } |
340 | |
341 | struct kvm *kvm_arch_alloc_vm(void) |
342 | { |
343 | size_t sz = sizeof(struct kvm); |
344 | |
345 | if (!has_vhe()) |
346 | return kzalloc(size: sz, GFP_KERNEL_ACCOUNT); |
347 | |
348 | return __vmalloc(size: sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); |
349 | } |
350 | |
351 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
352 | { |
353 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) |
354 | return -EBUSY; |
355 | |
356 | if (id >= kvm->max_vcpus) |
357 | return -EINVAL; |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
363 | { |
364 | int err; |
365 | |
366 | spin_lock_init(&vcpu->arch.mp_state_lock); |
367 | |
368 | #ifdef CONFIG_LOCKDEP |
369 | /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ |
370 | mutex_lock(&vcpu->mutex); |
371 | mutex_lock(&vcpu->kvm->arch.config_lock); |
372 | mutex_unlock(lock: &vcpu->kvm->arch.config_lock); |
373 | mutex_unlock(lock: &vcpu->mutex); |
374 | #endif |
375 | |
376 | /* Force users to call KVM_ARM_VCPU_INIT */ |
377 | vcpu_clear_flag(vcpu, VCPU_INITIALIZED); |
378 | |
379 | vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; |
380 | |
381 | /* |
382 | * Default value for the FP state, will be overloaded at load |
383 | * time if we support FP (pretty likely) |
384 | */ |
385 | vcpu->arch.fp_state = FP_STATE_FREE; |
386 | |
387 | /* Set up the timer */ |
388 | kvm_timer_vcpu_init(vcpu); |
389 | |
390 | kvm_pmu_vcpu_init(vcpu); |
391 | |
392 | kvm_arm_reset_debug_ptr(vcpu); |
393 | |
394 | kvm_arm_pvtime_vcpu_init(&vcpu->arch); |
395 | |
396 | vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; |
397 | |
398 | err = kvm_vgic_vcpu_init(vcpu); |
399 | if (err) |
400 | return err; |
401 | |
402 | return kvm_share_hyp(vcpu, vcpu + 1); |
403 | } |
404 | |
405 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
406 | { |
407 | } |
408 | |
409 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
410 | { |
411 | if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) |
412 | static_branch_dec(&userspace_irqchip_in_use); |
413 | |
414 | kvm_mmu_free_memory_cache(mc: &vcpu->arch.mmu_page_cache); |
415 | kvm_timer_vcpu_terminate(vcpu); |
416 | kvm_pmu_vcpu_destroy(vcpu); |
417 | kvm_vgic_vcpu_destroy(vcpu); |
418 | kvm_arm_vcpu_destroy(vcpu); |
419 | } |
420 | |
421 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
422 | { |
423 | |
424 | } |
425 | |
426 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
427 | { |
428 | |
429 | } |
430 | |
431 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
432 | { |
433 | struct kvm_s2_mmu *mmu; |
434 | int *last_ran; |
435 | |
436 | mmu = vcpu->arch.hw_mmu; |
437 | last_ran = this_cpu_ptr(mmu->last_vcpu_ran); |
438 | |
439 | /* |
440 | * We guarantee that both TLBs and I-cache are private to each |
441 | * vcpu. If detecting that a vcpu from the same VM has |
442 | * previously run on the same physical CPU, call into the |
443 | * hypervisor code to nuke the relevant contexts. |
444 | * |
445 | * We might get preempted before the vCPU actually runs, but |
446 | * over-invalidation doesn't affect correctness. |
447 | */ |
448 | if (*last_ran != vcpu->vcpu_idx) { |
449 | kvm_call_hyp(__kvm_flush_cpu_context, mmu); |
450 | *last_ran = vcpu->vcpu_idx; |
451 | } |
452 | |
453 | vcpu->cpu = cpu; |
454 | |
455 | kvm_vgic_load(vcpu); |
456 | kvm_timer_vcpu_load(vcpu); |
457 | if (has_vhe()) |
458 | kvm_vcpu_load_vhe(vcpu); |
459 | kvm_arch_vcpu_load_fp(vcpu); |
460 | kvm_vcpu_pmu_restore_guest(vcpu); |
461 | if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) |
462 | kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); |
463 | |
464 | if (single_task_running()) |
465 | vcpu_clear_wfx_traps(vcpu); |
466 | else |
467 | vcpu_set_wfx_traps(vcpu); |
468 | |
469 | if (vcpu_has_ptrauth(vcpu)) |
470 | vcpu_ptrauth_disable(vcpu); |
471 | kvm_arch_vcpu_load_debug_state_flags(vcpu); |
472 | |
473 | if (!cpumask_test_cpu(cpu, cpumask: vcpu->kvm->arch.supported_cpus)) |
474 | vcpu_set_on_unsupported_cpu(vcpu); |
475 | } |
476 | |
477 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
478 | { |
479 | kvm_arch_vcpu_put_debug_state_flags(vcpu); |
480 | kvm_arch_vcpu_put_fp(vcpu); |
481 | if (has_vhe()) |
482 | kvm_vcpu_put_vhe(vcpu); |
483 | kvm_timer_vcpu_put(vcpu); |
484 | kvm_vgic_put(vcpu); |
485 | kvm_vcpu_pmu_restore_host(vcpu); |
486 | kvm_arm_vmid_clear_active(); |
487 | |
488 | vcpu_clear_on_unsupported_cpu(vcpu); |
489 | vcpu->cpu = -1; |
490 | } |
491 | |
492 | static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) |
493 | { |
494 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); |
495 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
496 | kvm_vcpu_kick(vcpu); |
497 | } |
498 | |
499 | void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) |
500 | { |
501 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
502 | __kvm_arm_vcpu_power_off(vcpu); |
503 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
504 | } |
505 | |
506 | bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) |
507 | { |
508 | return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; |
509 | } |
510 | |
511 | static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) |
512 | { |
513 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); |
514 | kvm_make_request(KVM_REQ_SUSPEND, vcpu); |
515 | kvm_vcpu_kick(vcpu); |
516 | } |
517 | |
518 | static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) |
519 | { |
520 | return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; |
521 | } |
522 | |
523 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
524 | struct kvm_mp_state *mp_state) |
525 | { |
526 | *mp_state = READ_ONCE(vcpu->arch.mp_state); |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
532 | struct kvm_mp_state *mp_state) |
533 | { |
534 | int ret = 0; |
535 | |
536 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
537 | |
538 | switch (mp_state->mp_state) { |
539 | case KVM_MP_STATE_RUNNABLE: |
540 | WRITE_ONCE(vcpu->arch.mp_state, *mp_state); |
541 | break; |
542 | case KVM_MP_STATE_STOPPED: |
543 | __kvm_arm_vcpu_power_off(vcpu); |
544 | break; |
545 | case KVM_MP_STATE_SUSPENDED: |
546 | kvm_arm_vcpu_suspend(vcpu); |
547 | break; |
548 | default: |
549 | ret = -EINVAL; |
550 | } |
551 | |
552 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
553 | |
554 | return ret; |
555 | } |
556 | |
557 | /** |
558 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled |
559 | * @v: The VCPU pointer |
560 | * |
561 | * If the guest CPU is not waiting for interrupts or an interrupt line is |
562 | * asserted, the CPU is by definition runnable. |
563 | */ |
564 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
565 | { |
566 | bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); |
567 | return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) |
568 | && !kvm_arm_vcpu_stopped(vcpu: v) && !v->arch.pause); |
569 | } |
570 | |
571 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
572 | { |
573 | return vcpu_mode_priv(vcpu); |
574 | } |
575 | |
576 | #ifdef CONFIG_GUEST_PERF_EVENTS |
577 | unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) |
578 | { |
579 | return *vcpu_pc(vcpu); |
580 | } |
581 | #endif |
582 | |
583 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
584 | { |
585 | return vcpu_get_flag(vcpu, VCPU_INITIALIZED); |
586 | } |
587 | |
588 | static void kvm_init_mpidr_data(struct kvm *kvm) |
589 | { |
590 | struct kvm_mpidr_data *data = NULL; |
591 | unsigned long c, mask, nr_entries; |
592 | u64 aff_set = 0, aff_clr = ~0UL; |
593 | struct kvm_vcpu *vcpu; |
594 | |
595 | mutex_lock(&kvm->arch.config_lock); |
596 | |
597 | if (kvm->arch.mpidr_data || atomic_read(v: &kvm->online_vcpus) == 1) |
598 | goto out; |
599 | |
600 | kvm_for_each_vcpu(c, vcpu, kvm) { |
601 | u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); |
602 | aff_set |= aff; |
603 | aff_clr &= aff; |
604 | } |
605 | |
606 | /* |
607 | * A significant bit can be either 0 or 1, and will only appear in |
608 | * aff_set. Use aff_clr to weed out the useless stuff. |
609 | */ |
610 | mask = aff_set ^ aff_clr; |
611 | nr_entries = BIT_ULL(hweight_long(mask)); |
612 | |
613 | /* |
614 | * Don't let userspace fool us. If we need more than a single page |
615 | * to describe the compressed MPIDR array, just fall back to the |
616 | * iterative method. Single vcpu VMs do not need this either. |
617 | */ |
618 | if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE) |
619 | data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries), |
620 | GFP_KERNEL_ACCOUNT); |
621 | |
622 | if (!data) |
623 | goto out; |
624 | |
625 | data->mpidr_mask = mask; |
626 | |
627 | kvm_for_each_vcpu(c, vcpu, kvm) { |
628 | u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); |
629 | u16 index = kvm_mpidr_index(data, aff); |
630 | |
631 | data->cmpidr_to_idx[index] = c; |
632 | } |
633 | |
634 | kvm->arch.mpidr_data = data; |
635 | out: |
636 | mutex_unlock(lock: &kvm->arch.config_lock); |
637 | } |
638 | |
639 | /* |
640 | * Handle both the initialisation that is being done when the vcpu is |
641 | * run for the first time, as well as the updates that must be |
642 | * performed each time we get a new thread dealing with this vcpu. |
643 | */ |
644 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
645 | { |
646 | struct kvm *kvm = vcpu->kvm; |
647 | int ret; |
648 | |
649 | if (!kvm_vcpu_initialized(vcpu)) |
650 | return -ENOEXEC; |
651 | |
652 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
653 | return -EPERM; |
654 | |
655 | ret = kvm_arch_vcpu_run_map_fp(vcpu); |
656 | if (ret) |
657 | return ret; |
658 | |
659 | if (likely(vcpu_has_run_once(vcpu))) |
660 | return 0; |
661 | |
662 | kvm_init_mpidr_data(kvm); |
663 | |
664 | kvm_arm_vcpu_init_debug(vcpu); |
665 | |
666 | if (likely(irqchip_in_kernel(kvm))) { |
667 | /* |
668 | * Map the VGIC hardware resources before running a vcpu the |
669 | * first time on this VM. |
670 | */ |
671 | ret = kvm_vgic_map_resources(kvm); |
672 | if (ret) |
673 | return ret; |
674 | } |
675 | |
676 | if (vcpu_has_nv(vcpu)) { |
677 | ret = kvm_init_nv_sysregs(vcpu->kvm); |
678 | if (ret) |
679 | return ret; |
680 | } |
681 | |
682 | /* |
683 | * This needs to happen after NV has imposed its own restrictions on |
684 | * the feature set |
685 | */ |
686 | kvm_init_sysreg(vcpu); |
687 | |
688 | ret = kvm_timer_enable(vcpu); |
689 | if (ret) |
690 | return ret; |
691 | |
692 | ret = kvm_arm_pmu_v3_enable(vcpu); |
693 | if (ret) |
694 | return ret; |
695 | |
696 | if (is_protected_kvm_enabled()) { |
697 | ret = pkvm_create_hyp_vm(kvm); |
698 | if (ret) |
699 | return ret; |
700 | } |
701 | |
702 | if (!irqchip_in_kernel(kvm)) { |
703 | /* |
704 | * Tell the rest of the code that there are userspace irqchip |
705 | * VMs in the wild. |
706 | */ |
707 | static_branch_inc(&userspace_irqchip_in_use); |
708 | } |
709 | |
710 | /* |
711 | * Initialize traps for protected VMs. |
712 | * NOTE: Move to run in EL2 directly, rather than via a hypercall, once |
713 | * the code is in place for first run initialization at EL2. |
714 | */ |
715 | if (kvm_vm_is_protected(kvm)) |
716 | kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); |
717 | |
718 | mutex_lock(&kvm->arch.config_lock); |
719 | set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); |
720 | mutex_unlock(lock: &kvm->arch.config_lock); |
721 | |
722 | return ret; |
723 | } |
724 | |
725 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
726 | { |
727 | return vgic_initialized(kvm); |
728 | } |
729 | |
730 | void kvm_arm_halt_guest(struct kvm *kvm) |
731 | { |
732 | unsigned long i; |
733 | struct kvm_vcpu *vcpu; |
734 | |
735 | kvm_for_each_vcpu(i, vcpu, kvm) |
736 | vcpu->arch.pause = true; |
737 | kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); |
738 | } |
739 | |
740 | void kvm_arm_resume_guest(struct kvm *kvm) |
741 | { |
742 | unsigned long i; |
743 | struct kvm_vcpu *vcpu; |
744 | |
745 | kvm_for_each_vcpu(i, vcpu, kvm) { |
746 | vcpu->arch.pause = false; |
747 | __kvm_vcpu_wake_up(vcpu); |
748 | } |
749 | } |
750 | |
751 | static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu) |
752 | { |
753 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
754 | |
755 | rcuwait_wait_event(wait, |
756 | (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), |
757 | TASK_INTERRUPTIBLE); |
758 | |
759 | if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { |
760 | /* Awaken to handle a signal, request we sleep again later. */ |
761 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
762 | } |
763 | |
764 | /* |
765 | * Make sure we will observe a potential reset request if we've |
766 | * observed a change to the power state. Pairs with the smp_wmb() in |
767 | * kvm_psci_vcpu_on(). |
768 | */ |
769 | smp_rmb(); |
770 | } |
771 | |
772 | /** |
773 | * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior |
774 | * @vcpu: The VCPU pointer |
775 | * |
776 | * Suspend execution of a vCPU until a valid wake event is detected, i.e. until |
777 | * the vCPU is runnable. The vCPU may or may not be scheduled out, depending |
778 | * on when a wake event arrives, e.g. there may already be a pending wake event. |
779 | */ |
780 | void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) |
781 | { |
782 | /* |
783 | * Sync back the state of the GIC CPU interface so that we have |
784 | * the latest PMR and group enables. This ensures that |
785 | * kvm_arch_vcpu_runnable has up-to-date data to decide whether |
786 | * we have pending interrupts, e.g. when determining if the |
787 | * vCPU should block. |
788 | * |
789 | * For the same reason, we want to tell GICv4 that we need |
790 | * doorbells to be signalled, should an interrupt become pending. |
791 | */ |
792 | preempt_disable(); |
793 | kvm_vgic_vmcr_sync(vcpu); |
794 | vcpu_set_flag(vcpu, IN_WFI); |
795 | vgic_v4_put(vcpu); |
796 | preempt_enable(); |
797 | |
798 | kvm_vcpu_halt(vcpu); |
799 | vcpu_clear_flag(vcpu, IN_WFIT); |
800 | |
801 | preempt_disable(); |
802 | vcpu_clear_flag(vcpu, IN_WFI); |
803 | vgic_v4_load(vcpu); |
804 | preempt_enable(); |
805 | } |
806 | |
807 | static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) |
808 | { |
809 | if (!kvm_arm_vcpu_suspended(vcpu)) |
810 | return 1; |
811 | |
812 | kvm_vcpu_wfi(vcpu); |
813 | |
814 | /* |
815 | * The suspend state is sticky; we do not leave it until userspace |
816 | * explicitly marks the vCPU as runnable. Request that we suspend again |
817 | * later. |
818 | */ |
819 | kvm_make_request(KVM_REQ_SUSPEND, vcpu); |
820 | |
821 | /* |
822 | * Check to make sure the vCPU is actually runnable. If so, exit to |
823 | * userspace informing it of the wakeup condition. |
824 | */ |
825 | if (kvm_arch_vcpu_runnable(v: vcpu)) { |
826 | memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); |
827 | vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; |
828 | vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; |
829 | return 0; |
830 | } |
831 | |
832 | /* |
833 | * Otherwise, we were unblocked to process a different event, such as a |
834 | * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to |
835 | * process the event. |
836 | */ |
837 | return 1; |
838 | } |
839 | |
840 | /** |
841 | * check_vcpu_requests - check and handle pending vCPU requests |
842 | * @vcpu: the VCPU pointer |
843 | * |
844 | * Return: 1 if we should enter the guest |
845 | * 0 if we should exit to userspace |
846 | * < 0 if we should exit to userspace, where the return value indicates |
847 | * an error |
848 | */ |
849 | static int check_vcpu_requests(struct kvm_vcpu *vcpu) |
850 | { |
851 | if (kvm_request_pending(vcpu)) { |
852 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
853 | kvm_vcpu_sleep(vcpu); |
854 | |
855 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
856 | kvm_reset_vcpu(vcpu); |
857 | |
858 | /* |
859 | * Clear IRQ_PENDING requests that were made to guarantee |
860 | * that a VCPU sees new virtual interrupts. |
861 | */ |
862 | kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); |
863 | |
864 | if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) |
865 | kvm_update_stolen_time(vcpu); |
866 | |
867 | if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { |
868 | /* The distributor enable bits were changed */ |
869 | preempt_disable(); |
870 | vgic_v4_put(vcpu); |
871 | vgic_v4_load(vcpu); |
872 | preempt_enable(); |
873 | } |
874 | |
875 | if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) |
876 | kvm_vcpu_reload_pmu(vcpu); |
877 | |
878 | if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) |
879 | kvm_vcpu_pmu_restore_guest(vcpu); |
880 | |
881 | if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) |
882 | return kvm_vcpu_suspend(vcpu); |
883 | |
884 | if (kvm_dirty_ring_check_request(vcpu)) |
885 | return 0; |
886 | } |
887 | |
888 | return 1; |
889 | } |
890 | |
891 | static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) |
892 | { |
893 | if (likely(!vcpu_mode_is_32bit(vcpu))) |
894 | return false; |
895 | |
896 | if (vcpu_has_nv(vcpu)) |
897 | return true; |
898 | |
899 | return !kvm_supports_32bit_el0(); |
900 | } |
901 | |
902 | /** |
903 | * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest |
904 | * @vcpu: The VCPU pointer |
905 | * @ret: Pointer to write optional return code |
906 | * |
907 | * Returns: true if the VCPU needs to return to a preemptible + interruptible |
908 | * and skip guest entry. |
909 | * |
910 | * This function disambiguates between two different types of exits: exits to a |
911 | * preemptible + interruptible kernel context and exits to userspace. For an |
912 | * exit to userspace, this function will write the return code to ret and return |
913 | * true. For an exit to preemptible + interruptible kernel context (i.e. check |
914 | * for pending work and re-enter), return true without writing to ret. |
915 | */ |
916 | static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) |
917 | { |
918 | struct kvm_run *run = vcpu->run; |
919 | |
920 | /* |
921 | * If we're using a userspace irqchip, then check if we need |
922 | * to tell a userspace irqchip about timer or PMU level |
923 | * changes and if so, exit to userspace (the actual level |
924 | * state gets updated in kvm_timer_update_run and |
925 | * kvm_pmu_update_run below). |
926 | */ |
927 | if (static_branch_unlikely(&userspace_irqchip_in_use)) { |
928 | if (kvm_timer_should_notify_user(vcpu) || |
929 | kvm_pmu_should_notify_user(vcpu)) { |
930 | *ret = -EINTR; |
931 | run->exit_reason = KVM_EXIT_INTR; |
932 | return true; |
933 | } |
934 | } |
935 | |
936 | if (unlikely(vcpu_on_unsupported_cpu(vcpu))) { |
937 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
938 | run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; |
939 | run->fail_entry.cpu = smp_processor_id(); |
940 | *ret = 0; |
941 | return true; |
942 | } |
943 | |
944 | return kvm_request_pending(vcpu) || |
945 | xfer_to_guest_mode_work_pending(); |
946 | } |
947 | |
948 | /* |
949 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while |
950 | * the vCPU is running. |
951 | * |
952 | * This must be noinstr as instrumentation may make use of RCU, and this is not |
953 | * safe during the EQS. |
954 | */ |
955 | static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
956 | { |
957 | int ret; |
958 | |
959 | guest_state_enter_irqoff(); |
960 | ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); |
961 | guest_state_exit_irqoff(); |
962 | |
963 | return ret; |
964 | } |
965 | |
966 | /** |
967 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
968 | * @vcpu: The VCPU pointer |
969 | * |
970 | * This function is called through the VCPU_RUN ioctl called from user space. It |
971 | * will execute VM code in a loop until the time slice for the process is used |
972 | * or some emulation is needed from user space in which case the function will |
973 | * return with return value 0 and with the kvm_run structure filled in with the |
974 | * required data for the requested emulation. |
975 | */ |
976 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
977 | { |
978 | struct kvm_run *run = vcpu->run; |
979 | int ret; |
980 | |
981 | if (run->exit_reason == KVM_EXIT_MMIO) { |
982 | ret = kvm_handle_mmio_return(vcpu); |
983 | if (ret) |
984 | return ret; |
985 | } |
986 | |
987 | vcpu_load(vcpu); |
988 | |
989 | if (run->immediate_exit) { |
990 | ret = -EINTR; |
991 | goto out; |
992 | } |
993 | |
994 | kvm_sigset_activate(vcpu); |
995 | |
996 | ret = 1; |
997 | run->exit_reason = KVM_EXIT_UNKNOWN; |
998 | run->flags = 0; |
999 | while (ret > 0) { |
1000 | /* |
1001 | * Check conditions before entering the guest |
1002 | */ |
1003 | ret = xfer_to_guest_mode_handle_work(vcpu); |
1004 | if (!ret) |
1005 | ret = 1; |
1006 | |
1007 | if (ret > 0) |
1008 | ret = check_vcpu_requests(vcpu); |
1009 | |
1010 | /* |
1011 | * Preparing the interrupts to be injected also |
1012 | * involves poking the GIC, which must be done in a |
1013 | * non-preemptible context. |
1014 | */ |
1015 | preempt_disable(); |
1016 | |
1017 | /* |
1018 | * The VMID allocator only tracks active VMIDs per |
1019 | * physical CPU, and therefore the VMID allocated may not be |
1020 | * preserved on VMID roll-over if the task was preempted, |
1021 | * making a thread's VMID inactive. So we need to call |
1022 | * kvm_arm_vmid_update() in non-premptible context. |
1023 | */ |
1024 | if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && |
1025 | has_vhe()) |
1026 | __load_stage2(vcpu->arch.hw_mmu, |
1027 | vcpu->arch.hw_mmu->arch); |
1028 | |
1029 | kvm_pmu_flush_hwstate(vcpu); |
1030 | |
1031 | local_irq_disable(); |
1032 | |
1033 | kvm_vgic_flush_hwstate(vcpu); |
1034 | |
1035 | kvm_pmu_update_vcpu_events(vcpu); |
1036 | |
1037 | /* |
1038 | * Ensure we set mode to IN_GUEST_MODE after we disable |
1039 | * interrupts and before the final VCPU requests check. |
1040 | * See the comment in kvm_vcpu_exiting_guest_mode() and |
1041 | * Documentation/virt/kvm/vcpu-requests.rst |
1042 | */ |
1043 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
1044 | |
1045 | if (ret <= 0 || kvm_vcpu_exit_request(vcpu, ret: &ret)) { |
1046 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1047 | isb(); /* Ensure work in x_flush_hwstate is committed */ |
1048 | kvm_pmu_sync_hwstate(vcpu); |
1049 | if (static_branch_unlikely(&userspace_irqchip_in_use)) |
1050 | kvm_timer_sync_user(vcpu); |
1051 | kvm_vgic_sync_hwstate(vcpu); |
1052 | local_irq_enable(); |
1053 | preempt_enable(); |
1054 | continue; |
1055 | } |
1056 | |
1057 | kvm_arm_setup_debug(vcpu); |
1058 | kvm_arch_vcpu_ctxflush_fp(vcpu); |
1059 | |
1060 | /************************************************************** |
1061 | * Enter the guest |
1062 | */ |
1063 | trace_kvm_entry(vcpu_pc: *vcpu_pc(vcpu)); |
1064 | guest_timing_enter_irqoff(); |
1065 | |
1066 | ret = kvm_arm_vcpu_enter_exit(vcpu); |
1067 | |
1068 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1069 | vcpu->stat.exits++; |
1070 | /* |
1071 | * Back from guest |
1072 | *************************************************************/ |
1073 | |
1074 | kvm_arm_clear_debug(vcpu); |
1075 | |
1076 | /* |
1077 | * We must sync the PMU state before the vgic state so |
1078 | * that the vgic can properly sample the updated state of the |
1079 | * interrupt line. |
1080 | */ |
1081 | kvm_pmu_sync_hwstate(vcpu); |
1082 | |
1083 | /* |
1084 | * Sync the vgic state before syncing the timer state because |
1085 | * the timer code needs to know if the virtual timer |
1086 | * interrupts are active. |
1087 | */ |
1088 | kvm_vgic_sync_hwstate(vcpu); |
1089 | |
1090 | /* |
1091 | * Sync the timer hardware state before enabling interrupts as |
1092 | * we don't want vtimer interrupts to race with syncing the |
1093 | * timer virtual interrupt state. |
1094 | */ |
1095 | if (static_branch_unlikely(&userspace_irqchip_in_use)) |
1096 | kvm_timer_sync_user(vcpu); |
1097 | |
1098 | kvm_arch_vcpu_ctxsync_fp(vcpu); |
1099 | |
1100 | /* |
1101 | * We must ensure that any pending interrupts are taken before |
1102 | * we exit guest timing so that timer ticks are accounted as |
1103 | * guest time. Transiently unmask interrupts so that any |
1104 | * pending interrupts are taken. |
1105 | * |
1106 | * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other |
1107 | * context synchronization event) is necessary to ensure that |
1108 | * pending interrupts are taken. |
1109 | */ |
1110 | if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) { |
1111 | local_irq_enable(); |
1112 | isb(); |
1113 | local_irq_disable(); |
1114 | } |
1115 | |
1116 | guest_timing_exit_irqoff(); |
1117 | |
1118 | local_irq_enable(); |
1119 | |
1120 | trace_kvm_exit(ret, esr_ec: kvm_vcpu_trap_get_class(vcpu), vcpu_pc: *vcpu_pc(vcpu)); |
1121 | |
1122 | /* Exit types that need handling before we can be preempted */ |
1123 | handle_exit_early(vcpu, ret); |
1124 | |
1125 | preempt_enable(); |
1126 | |
1127 | /* |
1128 | * The ARMv8 architecture doesn't give the hypervisor |
1129 | * a mechanism to prevent a guest from dropping to AArch32 EL0 |
1130 | * if implemented by the CPU. If we spot the guest in such |
1131 | * state and that we decided it wasn't supposed to do so (like |
1132 | * with the asymmetric AArch32 case), return to userspace with |
1133 | * a fatal error. |
1134 | */ |
1135 | if (vcpu_mode_is_bad_32bit(vcpu)) { |
1136 | /* |
1137 | * As we have caught the guest red-handed, decide that |
1138 | * it isn't fit for purpose anymore by making the vcpu |
1139 | * invalid. The VMM can try and fix it by issuing a |
1140 | * KVM_ARM_VCPU_INIT if it really wants to. |
1141 | */ |
1142 | vcpu_clear_flag(vcpu, VCPU_INITIALIZED); |
1143 | ret = ARM_EXCEPTION_IL; |
1144 | } |
1145 | |
1146 | ret = handle_exit(vcpu, ret); |
1147 | } |
1148 | |
1149 | /* Tell userspace about in-kernel device output levels */ |
1150 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { |
1151 | kvm_timer_update_run(vcpu); |
1152 | kvm_pmu_update_run(vcpu); |
1153 | } |
1154 | |
1155 | kvm_sigset_deactivate(vcpu); |
1156 | |
1157 | out: |
1158 | /* |
1159 | * In the unlikely event that we are returning to userspace |
1160 | * with pending exceptions or PC adjustment, commit these |
1161 | * adjustments in order to give userspace a consistent view of |
1162 | * the vcpu state. Note that this relies on __kvm_adjust_pc() |
1163 | * being preempt-safe on VHE. |
1164 | */ |
1165 | if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) || |
1166 | vcpu_get_flag(vcpu, INCREMENT_PC))) |
1167 | kvm_call_hyp(__kvm_adjust_pc, vcpu); |
1168 | |
1169 | vcpu_put(vcpu); |
1170 | return ret; |
1171 | } |
1172 | |
1173 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) |
1174 | { |
1175 | int bit_index; |
1176 | bool set; |
1177 | unsigned long *hcr; |
1178 | |
1179 | if (number == KVM_ARM_IRQ_CPU_IRQ) |
1180 | bit_index = __ffs(HCR_VI); |
1181 | else /* KVM_ARM_IRQ_CPU_FIQ */ |
1182 | bit_index = __ffs(HCR_VF); |
1183 | |
1184 | hcr = vcpu_hcr(vcpu); |
1185 | if (level) |
1186 | set = test_and_set_bit(nr: bit_index, addr: hcr); |
1187 | else |
1188 | set = test_and_clear_bit(nr: bit_index, addr: hcr); |
1189 | |
1190 | /* |
1191 | * If we didn't change anything, no need to wake up or kick other CPUs |
1192 | */ |
1193 | if (set == level) |
1194 | return 0; |
1195 | |
1196 | /* |
1197 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and |
1198 | * trigger a world-switch round on the running physical CPU to set the |
1199 | * virtual IRQ/FIQ fields in the HCR appropriately. |
1200 | */ |
1201 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
1202 | kvm_vcpu_kick(vcpu); |
1203 | |
1204 | return 0; |
1205 | } |
1206 | |
1207 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
1208 | bool line_status) |
1209 | { |
1210 | u32 irq = irq_level->irq; |
1211 | unsigned int irq_type, vcpu_id, irq_num; |
1212 | struct kvm_vcpu *vcpu = NULL; |
1213 | bool level = irq_level->level; |
1214 | |
1215 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; |
1216 | vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; |
1217 | vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); |
1218 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; |
1219 | |
1220 | trace_kvm_irq_line(type: irq_type, vcpu_idx: vcpu_id, irq_num, level: irq_level->level); |
1221 | |
1222 | switch (irq_type) { |
1223 | case KVM_ARM_IRQ_TYPE_CPU: |
1224 | if (irqchip_in_kernel(kvm)) |
1225 | return -ENXIO; |
1226 | |
1227 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
1228 | if (!vcpu) |
1229 | return -EINVAL; |
1230 | |
1231 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) |
1232 | return -EINVAL; |
1233 | |
1234 | return vcpu_interrupt_line(vcpu, number: irq_num, level); |
1235 | case KVM_ARM_IRQ_TYPE_PPI: |
1236 | if (!irqchip_in_kernel(kvm)) |
1237 | return -ENXIO; |
1238 | |
1239 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
1240 | if (!vcpu) |
1241 | return -EINVAL; |
1242 | |
1243 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) |
1244 | return -EINVAL; |
1245 | |
1246 | return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL); |
1247 | case KVM_ARM_IRQ_TYPE_SPI: |
1248 | if (!irqchip_in_kernel(kvm)) |
1249 | return -ENXIO; |
1250 | |
1251 | if (irq_num < VGIC_NR_PRIVATE_IRQS) |
1252 | return -EINVAL; |
1253 | |
1254 | return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL); |
1255 | } |
1256 | |
1257 | return -EINVAL; |
1258 | } |
1259 | |
1260 | static unsigned long system_supported_vcpu_features(void) |
1261 | { |
1262 | unsigned long features = KVM_VCPU_VALID_FEATURES; |
1263 | |
1264 | if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) |
1265 | clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); |
1266 | |
1267 | if (!kvm_arm_support_pmu_v3()) |
1268 | clear_bit(KVM_ARM_VCPU_PMU_V3, &features); |
1269 | |
1270 | if (!system_supports_sve()) |
1271 | clear_bit(KVM_ARM_VCPU_SVE, &features); |
1272 | |
1273 | if (!system_has_full_ptr_auth()) { |
1274 | clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features); |
1275 | clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); |
1276 | } |
1277 | |
1278 | if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT)) |
1279 | clear_bit(KVM_ARM_VCPU_HAS_EL2, &features); |
1280 | |
1281 | return features; |
1282 | } |
1283 | |
1284 | static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, |
1285 | const struct kvm_vcpu_init *init) |
1286 | { |
1287 | unsigned long features = init->features[0]; |
1288 | int i; |
1289 | |
1290 | if (features & ~KVM_VCPU_VALID_FEATURES) |
1291 | return -ENOENT; |
1292 | |
1293 | for (i = 1; i < ARRAY_SIZE(init->features); i++) { |
1294 | if (init->features[i]) |
1295 | return -ENOENT; |
1296 | } |
1297 | |
1298 | if (features & ~system_supported_vcpu_features()) |
1299 | return -EINVAL; |
1300 | |
1301 | /* |
1302 | * For now make sure that both address/generic pointer authentication |
1303 | * features are requested by the userspace together. |
1304 | */ |
1305 | if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) != |
1306 | test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features)) |
1307 | return -EINVAL; |
1308 | |
1309 | /* Disallow NV+SVE for the time being */ |
1310 | if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) && |
1311 | test_bit(KVM_ARM_VCPU_SVE, &features)) |
1312 | return -EINVAL; |
1313 | |
1314 | if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) |
1315 | return 0; |
1316 | |
1317 | /* MTE is incompatible with AArch32 */ |
1318 | if (kvm_has_mte(vcpu->kvm)) |
1319 | return -EINVAL; |
1320 | |
1321 | /* NV is incompatible with AArch32 */ |
1322 | if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features)) |
1323 | return -EINVAL; |
1324 | |
1325 | return 0; |
1326 | } |
1327 | |
1328 | static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, |
1329 | const struct kvm_vcpu_init *init) |
1330 | { |
1331 | unsigned long features = init->features[0]; |
1332 | |
1333 | return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, |
1334 | KVM_VCPU_MAX_FEATURES); |
1335 | } |
1336 | |
1337 | static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) |
1338 | { |
1339 | struct kvm *kvm = vcpu->kvm; |
1340 | int ret = 0; |
1341 | |
1342 | /* |
1343 | * When the vCPU has a PMU, but no PMU is set for the guest |
1344 | * yet, set the default one. |
1345 | */ |
1346 | if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) |
1347 | ret = kvm_arm_set_default_pmu(kvm); |
1348 | |
1349 | return ret; |
1350 | } |
1351 | |
1352 | static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
1353 | const struct kvm_vcpu_init *init) |
1354 | { |
1355 | unsigned long features = init->features[0]; |
1356 | struct kvm *kvm = vcpu->kvm; |
1357 | int ret = -EINVAL; |
1358 | |
1359 | mutex_lock(&kvm->arch.config_lock); |
1360 | |
1361 | if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && |
1362 | kvm_vcpu_init_changed(vcpu, init)) |
1363 | goto out_unlock; |
1364 | |
1365 | bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); |
1366 | |
1367 | ret = kvm_setup_vcpu(vcpu); |
1368 | if (ret) |
1369 | goto out_unlock; |
1370 | |
1371 | /* Now we know what it is, we can reset it. */ |
1372 | kvm_reset_vcpu(vcpu); |
1373 | |
1374 | set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); |
1375 | vcpu_set_flag(vcpu, VCPU_INITIALIZED); |
1376 | ret = 0; |
1377 | out_unlock: |
1378 | mutex_unlock(lock: &kvm->arch.config_lock); |
1379 | return ret; |
1380 | } |
1381 | |
1382 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
1383 | const struct kvm_vcpu_init *init) |
1384 | { |
1385 | int ret; |
1386 | |
1387 | if (init->target != KVM_ARM_TARGET_GENERIC_V8 && |
1388 | init->target != kvm_target_cpu()) |
1389 | return -EINVAL; |
1390 | |
1391 | ret = kvm_vcpu_init_check_features(vcpu, init); |
1392 | if (ret) |
1393 | return ret; |
1394 | |
1395 | if (!kvm_vcpu_initialized(vcpu)) |
1396 | return __kvm_vcpu_set_target(vcpu, init); |
1397 | |
1398 | if (kvm_vcpu_init_changed(vcpu, init)) |
1399 | return -EINVAL; |
1400 | |
1401 | kvm_reset_vcpu(vcpu); |
1402 | return 0; |
1403 | } |
1404 | |
1405 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, |
1406 | struct kvm_vcpu_init *init) |
1407 | { |
1408 | bool power_off = false; |
1409 | int ret; |
1410 | |
1411 | /* |
1412 | * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid |
1413 | * reflecting it in the finalized feature set, thus limiting its scope |
1414 | * to a single KVM_ARM_VCPU_INIT call. |
1415 | */ |
1416 | if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { |
1417 | init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); |
1418 | power_off = true; |
1419 | } |
1420 | |
1421 | ret = kvm_vcpu_set_target(vcpu, init); |
1422 | if (ret) |
1423 | return ret; |
1424 | |
1425 | /* |
1426 | * Ensure a rebooted VM will fault in RAM pages and detect if the |
1427 | * guest MMU is turned off and flush the caches as needed. |
1428 | * |
1429 | * S2FWB enforces all memory accesses to RAM being cacheable, |
1430 | * ensuring that the data side is always coherent. We still |
1431 | * need to invalidate the I-cache though, as FWB does *not* |
1432 | * imply CTR_EL0.DIC. |
1433 | */ |
1434 | if (vcpu_has_run_once(vcpu)) { |
1435 | if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
1436 | stage2_unmap_vm(vcpu->kvm); |
1437 | else |
1438 | icache_inval_all_pou(); |
1439 | } |
1440 | |
1441 | vcpu_reset_hcr(vcpu); |
1442 | vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); |
1443 | |
1444 | /* |
1445 | * Handle the "start in power-off" case. |
1446 | */ |
1447 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
1448 | |
1449 | if (power_off) |
1450 | __kvm_arm_vcpu_power_off(vcpu); |
1451 | else |
1452 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); |
1453 | |
1454 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
1455 | |
1456 | return 0; |
1457 | } |
1458 | |
1459 | static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, |
1460 | struct kvm_device_attr *attr) |
1461 | { |
1462 | int ret = -ENXIO; |
1463 | |
1464 | switch (attr->group) { |
1465 | default: |
1466 | ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); |
1467 | break; |
1468 | } |
1469 | |
1470 | return ret; |
1471 | } |
1472 | |
1473 | static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, |
1474 | struct kvm_device_attr *attr) |
1475 | { |
1476 | int ret = -ENXIO; |
1477 | |
1478 | switch (attr->group) { |
1479 | default: |
1480 | ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); |
1481 | break; |
1482 | } |
1483 | |
1484 | return ret; |
1485 | } |
1486 | |
1487 | static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, |
1488 | struct kvm_device_attr *attr) |
1489 | { |
1490 | int ret = -ENXIO; |
1491 | |
1492 | switch (attr->group) { |
1493 | default: |
1494 | ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); |
1495 | break; |
1496 | } |
1497 | |
1498 | return ret; |
1499 | } |
1500 | |
1501 | static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
1502 | struct kvm_vcpu_events *events) |
1503 | { |
1504 | memset(events, 0, sizeof(*events)); |
1505 | |
1506 | return __kvm_arm_vcpu_get_events(vcpu, events); |
1507 | } |
1508 | |
1509 | static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
1510 | struct kvm_vcpu_events *events) |
1511 | { |
1512 | int i; |
1513 | |
1514 | /* check whether the reserved field is zero */ |
1515 | for (i = 0; i < ARRAY_SIZE(events->reserved); i++) |
1516 | if (events->reserved[i]) |
1517 | return -EINVAL; |
1518 | |
1519 | /* check whether the pad field is zero */ |
1520 | for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) |
1521 | if (events->exception.pad[i]) |
1522 | return -EINVAL; |
1523 | |
1524 | return __kvm_arm_vcpu_set_events(vcpu, events); |
1525 | } |
1526 | |
1527 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1528 | unsigned int ioctl, unsigned long arg) |
1529 | { |
1530 | struct kvm_vcpu *vcpu = filp->private_data; |
1531 | void __user *argp = (void __user *)arg; |
1532 | struct kvm_device_attr attr; |
1533 | long r; |
1534 | |
1535 | switch (ioctl) { |
1536 | case KVM_ARM_VCPU_INIT: { |
1537 | struct kvm_vcpu_init init; |
1538 | |
1539 | r = -EFAULT; |
1540 | if (copy_from_user(to: &init, from: argp, n: sizeof(init))) |
1541 | break; |
1542 | |
1543 | r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, init: &init); |
1544 | break; |
1545 | } |
1546 | case KVM_SET_ONE_REG: |
1547 | case KVM_GET_ONE_REG: { |
1548 | struct kvm_one_reg reg; |
1549 | |
1550 | r = -ENOEXEC; |
1551 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
1552 | break; |
1553 | |
1554 | r = -EFAULT; |
1555 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
1556 | break; |
1557 | |
1558 | /* |
1559 | * We could owe a reset due to PSCI. Handle the pending reset |
1560 | * here to ensure userspace register accesses are ordered after |
1561 | * the reset. |
1562 | */ |
1563 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
1564 | kvm_reset_vcpu(vcpu); |
1565 | |
1566 | if (ioctl == KVM_SET_ONE_REG) |
1567 | r = kvm_arm_set_reg(vcpu, ®); |
1568 | else |
1569 | r = kvm_arm_get_reg(vcpu, ®); |
1570 | break; |
1571 | } |
1572 | case KVM_GET_REG_LIST: { |
1573 | struct kvm_reg_list __user *user_list = argp; |
1574 | struct kvm_reg_list reg_list; |
1575 | unsigned n; |
1576 | |
1577 | r = -ENOEXEC; |
1578 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
1579 | break; |
1580 | |
1581 | r = -EPERM; |
1582 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
1583 | break; |
1584 | |
1585 | r = -EFAULT; |
1586 | if (copy_from_user(to: ®_list, from: user_list, n: sizeof(reg_list))) |
1587 | break; |
1588 | n = reg_list.n; |
1589 | reg_list.n = kvm_arm_num_regs(vcpu); |
1590 | if (copy_to_user(to: user_list, from: ®_list, n: sizeof(reg_list))) |
1591 | break; |
1592 | r = -E2BIG; |
1593 | if (n < reg_list.n) |
1594 | break; |
1595 | r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); |
1596 | break; |
1597 | } |
1598 | case KVM_SET_DEVICE_ATTR: { |
1599 | r = -EFAULT; |
1600 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1601 | break; |
1602 | r = kvm_arm_vcpu_set_attr(vcpu, attr: &attr); |
1603 | break; |
1604 | } |
1605 | case KVM_GET_DEVICE_ATTR: { |
1606 | r = -EFAULT; |
1607 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1608 | break; |
1609 | r = kvm_arm_vcpu_get_attr(vcpu, attr: &attr); |
1610 | break; |
1611 | } |
1612 | case KVM_HAS_DEVICE_ATTR: { |
1613 | r = -EFAULT; |
1614 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1615 | break; |
1616 | r = kvm_arm_vcpu_has_attr(vcpu, attr: &attr); |
1617 | break; |
1618 | } |
1619 | case KVM_GET_VCPU_EVENTS: { |
1620 | struct kvm_vcpu_events events; |
1621 | |
1622 | if (kvm_arm_vcpu_get_events(vcpu, events: &events)) |
1623 | return -EINVAL; |
1624 | |
1625 | if (copy_to_user(to: argp, from: &events, n: sizeof(events))) |
1626 | return -EFAULT; |
1627 | |
1628 | return 0; |
1629 | } |
1630 | case KVM_SET_VCPU_EVENTS: { |
1631 | struct kvm_vcpu_events events; |
1632 | |
1633 | if (copy_from_user(to: &events, from: argp, n: sizeof(events))) |
1634 | return -EFAULT; |
1635 | |
1636 | return kvm_arm_vcpu_set_events(vcpu, events: &events); |
1637 | } |
1638 | case KVM_ARM_VCPU_FINALIZE: { |
1639 | int what; |
1640 | |
1641 | if (!kvm_vcpu_initialized(vcpu)) |
1642 | return -ENOEXEC; |
1643 | |
1644 | if (get_user(what, (const int __user *)argp)) |
1645 | return -EFAULT; |
1646 | |
1647 | return kvm_arm_vcpu_finalize(vcpu, what); |
1648 | } |
1649 | default: |
1650 | r = -EINVAL; |
1651 | } |
1652 | |
1653 | return r; |
1654 | } |
1655 | |
1656 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
1657 | { |
1658 | |
1659 | } |
1660 | |
1661 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, |
1662 | struct kvm_arm_device_addr *dev_addr) |
1663 | { |
1664 | switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { |
1665 | case KVM_ARM_DEVICE_VGIC_V2: |
1666 | if (!vgic_present) |
1667 | return -ENXIO; |
1668 | return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr); |
1669 | default: |
1670 | return -ENODEV; |
1671 | } |
1672 | } |
1673 | |
1674 | static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1675 | { |
1676 | switch (attr->group) { |
1677 | case KVM_ARM_VM_SMCCC_CTRL: |
1678 | return kvm_vm_smccc_has_attr(kvm, attr); |
1679 | default: |
1680 | return -ENXIO; |
1681 | } |
1682 | } |
1683 | |
1684 | static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1685 | { |
1686 | switch (attr->group) { |
1687 | case KVM_ARM_VM_SMCCC_CTRL: |
1688 | return kvm_vm_smccc_set_attr(kvm, attr); |
1689 | default: |
1690 | return -ENXIO; |
1691 | } |
1692 | } |
1693 | |
1694 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
1695 | { |
1696 | struct kvm *kvm = filp->private_data; |
1697 | void __user *argp = (void __user *)arg; |
1698 | struct kvm_device_attr attr; |
1699 | |
1700 | switch (ioctl) { |
1701 | case KVM_CREATE_IRQCHIP: { |
1702 | int ret; |
1703 | if (!vgic_present) |
1704 | return -ENXIO; |
1705 | mutex_lock(&kvm->lock); |
1706 | ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); |
1707 | mutex_unlock(lock: &kvm->lock); |
1708 | return ret; |
1709 | } |
1710 | case KVM_ARM_SET_DEVICE_ADDR: { |
1711 | struct kvm_arm_device_addr dev_addr; |
1712 | |
1713 | if (copy_from_user(to: &dev_addr, from: argp, n: sizeof(dev_addr))) |
1714 | return -EFAULT; |
1715 | return kvm_vm_ioctl_set_device_addr(kvm, dev_addr: &dev_addr); |
1716 | } |
1717 | case KVM_ARM_PREFERRED_TARGET: { |
1718 | struct kvm_vcpu_init init = { |
1719 | .target = KVM_ARM_TARGET_GENERIC_V8, |
1720 | }; |
1721 | |
1722 | if (copy_to_user(to: argp, from: &init, n: sizeof(init))) |
1723 | return -EFAULT; |
1724 | |
1725 | return 0; |
1726 | } |
1727 | case KVM_ARM_MTE_COPY_TAGS: { |
1728 | struct kvm_arm_copy_mte_tags copy_tags; |
1729 | |
1730 | if (copy_from_user(to: ©_tags, from: argp, n: sizeof(copy_tags))) |
1731 | return -EFAULT; |
1732 | return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); |
1733 | } |
1734 | case KVM_ARM_SET_COUNTER_OFFSET: { |
1735 | struct kvm_arm_counter_offset offset; |
1736 | |
1737 | if (copy_from_user(to: &offset, from: argp, n: sizeof(offset))) |
1738 | return -EFAULT; |
1739 | return kvm_vm_ioctl_set_counter_offset(kvm, &offset); |
1740 | } |
1741 | case KVM_HAS_DEVICE_ATTR: { |
1742 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1743 | return -EFAULT; |
1744 | |
1745 | return kvm_vm_has_attr(kvm, attr: &attr); |
1746 | } |
1747 | case KVM_SET_DEVICE_ATTR: { |
1748 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1749 | return -EFAULT; |
1750 | |
1751 | return kvm_vm_set_attr(kvm, attr: &attr); |
1752 | } |
1753 | case KVM_ARM_GET_REG_WRITABLE_MASKS: { |
1754 | struct reg_mask_range range; |
1755 | |
1756 | if (copy_from_user(to: &range, from: argp, n: sizeof(range))) |
1757 | return -EFAULT; |
1758 | return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range); |
1759 | } |
1760 | default: |
1761 | return -EINVAL; |
1762 | } |
1763 | } |
1764 | |
1765 | /* unlocks vcpus from @vcpu_lock_idx and smaller */ |
1766 | static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) |
1767 | { |
1768 | struct kvm_vcpu *tmp_vcpu; |
1769 | |
1770 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { |
1771 | tmp_vcpu = kvm_get_vcpu(kvm, i: vcpu_lock_idx); |
1772 | mutex_unlock(lock: &tmp_vcpu->mutex); |
1773 | } |
1774 | } |
1775 | |
1776 | void unlock_all_vcpus(struct kvm *kvm) |
1777 | { |
1778 | lockdep_assert_held(&kvm->lock); |
1779 | |
1780 | unlock_vcpus(kvm, vcpu_lock_idx: atomic_read(v: &kvm->online_vcpus) - 1); |
1781 | } |
1782 | |
1783 | /* Returns true if all vcpus were locked, false otherwise */ |
1784 | bool lock_all_vcpus(struct kvm *kvm) |
1785 | { |
1786 | struct kvm_vcpu *tmp_vcpu; |
1787 | unsigned long c; |
1788 | |
1789 | lockdep_assert_held(&kvm->lock); |
1790 | |
1791 | /* |
1792 | * Any time a vcpu is in an ioctl (including running), the |
1793 | * core KVM code tries to grab the vcpu->mutex. |
1794 | * |
1795 | * By grabbing the vcpu->mutex of all VCPUs we ensure that no |
1796 | * other VCPUs can fiddle with the state while we access it. |
1797 | */ |
1798 | kvm_for_each_vcpu(c, tmp_vcpu, kvm) { |
1799 | if (!mutex_trylock(lock: &tmp_vcpu->mutex)) { |
1800 | unlock_vcpus(kvm, vcpu_lock_idx: c - 1); |
1801 | return false; |
1802 | } |
1803 | } |
1804 | |
1805 | return true; |
1806 | } |
1807 | |
1808 | static unsigned long nvhe_percpu_size(void) |
1809 | { |
1810 | return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - |
1811 | (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); |
1812 | } |
1813 | |
1814 | static unsigned long nvhe_percpu_order(void) |
1815 | { |
1816 | unsigned long size = nvhe_percpu_size(); |
1817 | |
1818 | return size ? get_order(size) : 0; |
1819 | } |
1820 | |
1821 | /* A lookup table holding the hypervisor VA for each vector slot */ |
1822 | static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; |
1823 | |
1824 | static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) |
1825 | { |
1826 | hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); |
1827 | } |
1828 | |
1829 | static int kvm_init_vector_slots(void) |
1830 | { |
1831 | int err; |
1832 | void *base; |
1833 | |
1834 | base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); |
1835 | kvm_init_vector_slot(base, HYP_VECTOR_DIRECT); |
1836 | |
1837 | base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); |
1838 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); |
1839 | |
1840 | if (kvm_system_needs_idmapped_vectors() && |
1841 | !is_protected_kvm_enabled()) { |
1842 | err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), |
1843 | __BP_HARDEN_HYP_VECS_SZ, &base); |
1844 | if (err) |
1845 | return err; |
1846 | } |
1847 | |
1848 | kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT); |
1849 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT); |
1850 | return 0; |
1851 | } |
1852 | |
1853 | static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) |
1854 | { |
1855 | struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); |
1856 | u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
1857 | unsigned long tcr; |
1858 | |
1859 | /* |
1860 | * Calculate the raw per-cpu offset without a translation from the |
1861 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 |
1862 | * so that we can use adr_l to access per-cpu variables in EL2. |
1863 | * Also drop the KASAN tag which gets in the way... |
1864 | */ |
1865 | params->tpidr_el2 = (unsigned long)kasan_reset_tag(addr: per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - |
1866 | (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); |
1867 | |
1868 | params->mair_el2 = read_sysreg(mair_el1); |
1869 | |
1870 | tcr = read_sysreg(tcr_el1); |
1871 | if (cpus_have_final_cap(ARM64_KVM_HVHE)) { |
1872 | tcr |= TCR_EPD1_MASK; |
1873 | } else { |
1874 | tcr &= TCR_EL2_MASK; |
1875 | tcr |= TCR_EL2_RES1; |
1876 | } |
1877 | tcr &= ~TCR_T0SZ_MASK; |
1878 | tcr |= TCR_T0SZ(hyp_va_bits); |
1879 | tcr &= ~TCR_EL2_PS_MASK; |
1880 | tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0)); |
1881 | if (kvm_lpa2_is_enabled()) |
1882 | tcr |= TCR_EL2_DS; |
1883 | params->tcr_el2 = tcr; |
1884 | |
1885 | params->pgd_pa = kvm_mmu_get_httbr(); |
1886 | if (is_protected_kvm_enabled()) |
1887 | params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; |
1888 | else |
1889 | params->hcr_el2 = HCR_HOST_NVHE_FLAGS; |
1890 | if (cpus_have_final_cap(ARM64_KVM_HVHE)) |
1891 | params->hcr_el2 |= HCR_E2H; |
1892 | params->vttbr = params->vtcr = 0; |
1893 | |
1894 | /* |
1895 | * Flush the init params from the data cache because the struct will |
1896 | * be read while the MMU is off. |
1897 | */ |
1898 | kvm_flush_dcache_to_poc(params, sizeof(*params)); |
1899 | } |
1900 | |
1901 | static void hyp_install_host_vector(void) |
1902 | { |
1903 | struct kvm_nvhe_init_params *params; |
1904 | struct arm_smccc_res res; |
1905 | |
1906 | /* Switch from the HYP stub to our own HYP init vector */ |
1907 | __hyp_set_vectors(kvm_get_idmap_vector()); |
1908 | |
1909 | /* |
1910 | * Call initialization code, and switch to the full blown HYP code. |
1911 | * If the cpucaps haven't been finalized yet, something has gone very |
1912 | * wrong, and hyp will crash and burn when it uses any |
1913 | * cpus_have_*_cap() wrapper. |
1914 | */ |
1915 | BUG_ON(!system_capabilities_finalized()); |
1916 | params = this_cpu_ptr_nvhe_sym(kvm_init_params); |
1917 | arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); |
1918 | WARN_ON(res.a0 != SMCCC_RET_SUCCESS); |
1919 | } |
1920 | |
1921 | static void cpu_init_hyp_mode(void) |
1922 | { |
1923 | hyp_install_host_vector(); |
1924 | |
1925 | /* |
1926 | * Disabling SSBD on a non-VHE system requires us to enable SSBS |
1927 | * at EL2. |
1928 | */ |
1929 | if (this_cpu_has_cap(ARM64_SSBS) && |
1930 | arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { |
1931 | kvm_call_hyp_nvhe(__kvm_enable_ssbs); |
1932 | } |
1933 | } |
1934 | |
1935 | static void cpu_hyp_reset(void) |
1936 | { |
1937 | if (!is_kernel_in_hyp_mode()) |
1938 | __hyp_reset_vectors(); |
1939 | } |
1940 | |
1941 | /* |
1942 | * EL2 vectors can be mapped and rerouted in a number of ways, |
1943 | * depending on the kernel configuration and CPU present: |
1944 | * |
1945 | * - If the CPU is affected by Spectre-v2, the hardening sequence is |
1946 | * placed in one of the vector slots, which is executed before jumping |
1947 | * to the real vectors. |
1948 | * |
1949 | * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot |
1950 | * containing the hardening sequence is mapped next to the idmap page, |
1951 | * and executed before jumping to the real vectors. |
1952 | * |
1953 | * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an |
1954 | * empty slot is selected, mapped next to the idmap page, and |
1955 | * executed before jumping to the real vectors. |
1956 | * |
1957 | * Note that ARM64_SPECTRE_V3A is somewhat incompatible with |
1958 | * VHE, as we don't have hypervisor-specific mappings. If the system |
1959 | * is VHE and yet selects this capability, it will be ignored. |
1960 | */ |
1961 | static void cpu_set_hyp_vector(void) |
1962 | { |
1963 | struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); |
1964 | void *vector = hyp_spectre_vector_selector[data->slot]; |
1965 | |
1966 | if (!is_protected_kvm_enabled()) |
1967 | *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; |
1968 | else |
1969 | kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); |
1970 | } |
1971 | |
1972 | static void cpu_hyp_init_context(void) |
1973 | { |
1974 | kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); |
1975 | |
1976 | if (!is_kernel_in_hyp_mode()) |
1977 | cpu_init_hyp_mode(); |
1978 | } |
1979 | |
1980 | static void cpu_hyp_init_features(void) |
1981 | { |
1982 | cpu_set_hyp_vector(); |
1983 | kvm_arm_init_debug(); |
1984 | |
1985 | if (is_kernel_in_hyp_mode()) |
1986 | kvm_timer_init_vhe(); |
1987 | |
1988 | if (vgic_present) |
1989 | kvm_vgic_init_cpu_hardware(); |
1990 | } |
1991 | |
1992 | static void cpu_hyp_reinit(void) |
1993 | { |
1994 | cpu_hyp_reset(); |
1995 | cpu_hyp_init_context(); |
1996 | cpu_hyp_init_features(); |
1997 | } |
1998 | |
1999 | static void cpu_hyp_init(void *discard) |
2000 | { |
2001 | if (!__this_cpu_read(kvm_hyp_initialized)) { |
2002 | cpu_hyp_reinit(); |
2003 | __this_cpu_write(kvm_hyp_initialized, 1); |
2004 | } |
2005 | } |
2006 | |
2007 | static void cpu_hyp_uninit(void *discard) |
2008 | { |
2009 | if (__this_cpu_read(kvm_hyp_initialized)) { |
2010 | cpu_hyp_reset(); |
2011 | __this_cpu_write(kvm_hyp_initialized, 0); |
2012 | } |
2013 | } |
2014 | |
2015 | int kvm_arch_hardware_enable(void) |
2016 | { |
2017 | /* |
2018 | * Most calls to this function are made with migration |
2019 | * disabled, but not with preemption disabled. The former is |
2020 | * enough to ensure correctness, but most of the helpers |
2021 | * expect the later and will throw a tantrum otherwise. |
2022 | */ |
2023 | preempt_disable(); |
2024 | |
2025 | cpu_hyp_init(NULL); |
2026 | |
2027 | kvm_vgic_cpu_up(); |
2028 | kvm_timer_cpu_up(); |
2029 | |
2030 | preempt_enable(); |
2031 | |
2032 | return 0; |
2033 | } |
2034 | |
2035 | void kvm_arch_hardware_disable(void) |
2036 | { |
2037 | kvm_timer_cpu_down(); |
2038 | kvm_vgic_cpu_down(); |
2039 | |
2040 | if (!is_protected_kvm_enabled()) |
2041 | cpu_hyp_uninit(NULL); |
2042 | } |
2043 | |
2044 | #ifdef CONFIG_CPU_PM |
2045 | static int hyp_init_cpu_pm_notifier(struct notifier_block *self, |
2046 | unsigned long cmd, |
2047 | void *v) |
2048 | { |
2049 | /* |
2050 | * kvm_hyp_initialized is left with its old value over |
2051 | * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should |
2052 | * re-enable hyp. |
2053 | */ |
2054 | switch (cmd) { |
2055 | case CPU_PM_ENTER: |
2056 | if (__this_cpu_read(kvm_hyp_initialized)) |
2057 | /* |
2058 | * don't update kvm_hyp_initialized here |
2059 | * so that the hyp will be re-enabled |
2060 | * when we resume. See below. |
2061 | */ |
2062 | cpu_hyp_reset(); |
2063 | |
2064 | return NOTIFY_OK; |
2065 | case CPU_PM_ENTER_FAILED: |
2066 | case CPU_PM_EXIT: |
2067 | if (__this_cpu_read(kvm_hyp_initialized)) |
2068 | /* The hyp was enabled before suspend. */ |
2069 | cpu_hyp_reinit(); |
2070 | |
2071 | return NOTIFY_OK; |
2072 | |
2073 | default: |
2074 | return NOTIFY_DONE; |
2075 | } |
2076 | } |
2077 | |
2078 | static struct notifier_block hyp_init_cpu_pm_nb = { |
2079 | .notifier_call = hyp_init_cpu_pm_notifier, |
2080 | }; |
2081 | |
2082 | static void __init hyp_cpu_pm_init(void) |
2083 | { |
2084 | if (!is_protected_kvm_enabled()) |
2085 | cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); |
2086 | } |
2087 | static void __init hyp_cpu_pm_exit(void) |
2088 | { |
2089 | if (!is_protected_kvm_enabled()) |
2090 | cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); |
2091 | } |
2092 | #else |
2093 | static inline void __init hyp_cpu_pm_init(void) |
2094 | { |
2095 | } |
2096 | static inline void __init hyp_cpu_pm_exit(void) |
2097 | { |
2098 | } |
2099 | #endif |
2100 | |
2101 | static void __init init_cpu_logical_map(void) |
2102 | { |
2103 | unsigned int cpu; |
2104 | |
2105 | /* |
2106 | * Copy the MPIDR <-> logical CPU ID mapping to hyp. |
2107 | * Only copy the set of online CPUs whose features have been checked |
2108 | * against the finalized system capabilities. The hypervisor will not |
2109 | * allow any other CPUs from the `possible` set to boot. |
2110 | */ |
2111 | for_each_online_cpu(cpu) |
2112 | hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); |
2113 | } |
2114 | |
2115 | #define init_psci_0_1_impl_state(config, what) \ |
2116 | config.psci_0_1_ ## what ## _implemented = psci_ops.what |
2117 | |
2118 | static bool __init init_psci_relay(void) |
2119 | { |
2120 | /* |
2121 | * If PSCI has not been initialized, protected KVM cannot install |
2122 | * itself on newly booted CPUs. |
2123 | */ |
2124 | if (!psci_ops.get_version) { |
2125 | kvm_err("Cannot initialize protected mode without PSCI\n" ); |
2126 | return false; |
2127 | } |
2128 | |
2129 | kvm_host_psci_config.version = psci_ops.get_version(); |
2130 | kvm_host_psci_config.smccc_version = arm_smccc_get_version(); |
2131 | |
2132 | if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) { |
2133 | kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids(); |
2134 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend); |
2135 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on); |
2136 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off); |
2137 | init_psci_0_1_impl_state(kvm_host_psci_config, migrate); |
2138 | } |
2139 | return true; |
2140 | } |
2141 | |
2142 | static int __init init_subsystems(void) |
2143 | { |
2144 | int err = 0; |
2145 | |
2146 | /* |
2147 | * Enable hardware so that subsystem initialisation can access EL2. |
2148 | */ |
2149 | on_each_cpu(func: cpu_hyp_init, NULL, wait: 1); |
2150 | |
2151 | /* |
2152 | * Register CPU lower-power notifier |
2153 | */ |
2154 | hyp_cpu_pm_init(); |
2155 | |
2156 | /* |
2157 | * Init HYP view of VGIC |
2158 | */ |
2159 | err = kvm_vgic_hyp_init(); |
2160 | switch (err) { |
2161 | case 0: |
2162 | vgic_present = true; |
2163 | break; |
2164 | case -ENODEV: |
2165 | case -ENXIO: |
2166 | vgic_present = false; |
2167 | err = 0; |
2168 | break; |
2169 | default: |
2170 | goto out; |
2171 | } |
2172 | |
2173 | /* |
2174 | * Init HYP architected timer support |
2175 | */ |
2176 | err = kvm_timer_hyp_init(has_gic: vgic_present); |
2177 | if (err) |
2178 | goto out; |
2179 | |
2180 | kvm_register_perf_callbacks(NULL); |
2181 | |
2182 | out: |
2183 | if (err) |
2184 | hyp_cpu_pm_exit(); |
2185 | |
2186 | if (err || !is_protected_kvm_enabled()) |
2187 | on_each_cpu(func: cpu_hyp_uninit, NULL, wait: 1); |
2188 | |
2189 | return err; |
2190 | } |
2191 | |
2192 | static void __init teardown_subsystems(void) |
2193 | { |
2194 | kvm_unregister_perf_callbacks(); |
2195 | hyp_cpu_pm_exit(); |
2196 | } |
2197 | |
2198 | static void __init teardown_hyp_mode(void) |
2199 | { |
2200 | int cpu; |
2201 | |
2202 | free_hyp_pgds(); |
2203 | for_each_possible_cpu(cpu) { |
2204 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); |
2205 | free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); |
2206 | } |
2207 | } |
2208 | |
2209 | static int __init do_pkvm_init(u32 hyp_va_bits) |
2210 | { |
2211 | void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); |
2212 | int ret; |
2213 | |
2214 | preempt_disable(); |
2215 | cpu_hyp_init_context(); |
2216 | ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, |
2217 | num_possible_cpus(), kern_hyp_va(per_cpu_base), |
2218 | hyp_va_bits); |
2219 | cpu_hyp_init_features(); |
2220 | |
2221 | /* |
2222 | * The stub hypercalls are now disabled, so set our local flag to |
2223 | * prevent a later re-init attempt in kvm_arch_hardware_enable(). |
2224 | */ |
2225 | __this_cpu_write(kvm_hyp_initialized, 1); |
2226 | preempt_enable(); |
2227 | |
2228 | return ret; |
2229 | } |
2230 | |
2231 | static u64 get_hyp_id_aa64pfr0_el1(void) |
2232 | { |
2233 | /* |
2234 | * Track whether the system isn't affected by spectre/meltdown in the |
2235 | * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. |
2236 | * Although this is per-CPU, we make it global for simplicity, e.g., not |
2237 | * to have to worry about vcpu migration. |
2238 | * |
2239 | * Unlike for non-protected VMs, userspace cannot override this for |
2240 | * protected VMs. |
2241 | */ |
2242 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
2243 | |
2244 | val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | |
2245 | ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); |
2246 | |
2247 | val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), |
2248 | arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); |
2249 | val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), |
2250 | arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); |
2251 | |
2252 | return val; |
2253 | } |
2254 | |
2255 | static void kvm_hyp_init_symbols(void) |
2256 | { |
2257 | kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); |
2258 | kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); |
2259 | kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); |
2260 | kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
2261 | kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); |
2262 | kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
2263 | kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
2264 | kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); |
2265 | kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1); |
2266 | kvm_nvhe_sym(__icache_flags) = __icache_flags; |
2267 | kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; |
2268 | } |
2269 | |
2270 | static int __init kvm_hyp_init_protection(u32 hyp_va_bits) |
2271 | { |
2272 | void *addr = phys_to_virt(hyp_mem_base); |
2273 | int ret; |
2274 | |
2275 | ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); |
2276 | if (ret) |
2277 | return ret; |
2278 | |
2279 | ret = do_pkvm_init(hyp_va_bits); |
2280 | if (ret) |
2281 | return ret; |
2282 | |
2283 | free_hyp_pgds(); |
2284 | |
2285 | return 0; |
2286 | } |
2287 | |
2288 | static void pkvm_hyp_init_ptrauth(void) |
2289 | { |
2290 | struct kvm_cpu_context *hyp_ctxt; |
2291 | int cpu; |
2292 | |
2293 | for_each_possible_cpu(cpu) { |
2294 | hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu); |
2295 | hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); |
2296 | hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); |
2297 | hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); |
2298 | hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); |
2299 | hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); |
2300 | hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); |
2301 | hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); |
2302 | hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); |
2303 | hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); |
2304 | hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); |
2305 | } |
2306 | } |
2307 | |
2308 | /* Inits Hyp-mode on all online CPUs */ |
2309 | static int __init init_hyp_mode(void) |
2310 | { |
2311 | u32 hyp_va_bits; |
2312 | int cpu; |
2313 | int err = -ENOMEM; |
2314 | |
2315 | /* |
2316 | * The protected Hyp-mode cannot be initialized if the memory pool |
2317 | * allocation has failed. |
2318 | */ |
2319 | if (is_protected_kvm_enabled() && !hyp_mem_base) |
2320 | goto out_err; |
2321 | |
2322 | /* |
2323 | * Allocate Hyp PGD and setup Hyp identity mapping |
2324 | */ |
2325 | err = kvm_mmu_init(&hyp_va_bits); |
2326 | if (err) |
2327 | goto out_err; |
2328 | |
2329 | /* |
2330 | * Allocate stack pages for Hypervisor-mode |
2331 | */ |
2332 | for_each_possible_cpu(cpu) { |
2333 | unsigned long stack_page; |
2334 | |
2335 | stack_page = __get_free_page(GFP_KERNEL); |
2336 | if (!stack_page) { |
2337 | err = -ENOMEM; |
2338 | goto out_err; |
2339 | } |
2340 | |
2341 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; |
2342 | } |
2343 | |
2344 | /* |
2345 | * Allocate and initialize pages for Hypervisor-mode percpu regions. |
2346 | */ |
2347 | for_each_possible_cpu(cpu) { |
2348 | struct page *page; |
2349 | void *page_addr; |
2350 | |
2351 | page = alloc_pages(GFP_KERNEL, order: nvhe_percpu_order()); |
2352 | if (!page) { |
2353 | err = -ENOMEM; |
2354 | goto out_err; |
2355 | } |
2356 | |
2357 | page_addr = page_address(page); |
2358 | memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); |
2359 | kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; |
2360 | } |
2361 | |
2362 | /* |
2363 | * Map the Hyp-code called directly from the host |
2364 | */ |
2365 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), |
2366 | kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); |
2367 | if (err) { |
2368 | kvm_err("Cannot map world-switch code\n" ); |
2369 | goto out_err; |
2370 | } |
2371 | |
2372 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), |
2373 | kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); |
2374 | if (err) { |
2375 | kvm_err("Cannot map .hyp.rodata section\n" ); |
2376 | goto out_err; |
2377 | } |
2378 | |
2379 | err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), |
2380 | kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); |
2381 | if (err) { |
2382 | kvm_err("Cannot map rodata section\n" ); |
2383 | goto out_err; |
2384 | } |
2385 | |
2386 | /* |
2387 | * .hyp.bss is guaranteed to be placed at the beginning of the .bss |
2388 | * section thanks to an assertion in the linker script. Map it RW and |
2389 | * the rest of .bss RO. |
2390 | */ |
2391 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), |
2392 | kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); |
2393 | if (err) { |
2394 | kvm_err("Cannot map hyp bss section: %d\n" , err); |
2395 | goto out_err; |
2396 | } |
2397 | |
2398 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), |
2399 | kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); |
2400 | if (err) { |
2401 | kvm_err("Cannot map bss section\n" ); |
2402 | goto out_err; |
2403 | } |
2404 | |
2405 | /* |
2406 | * Map the Hyp stack pages |
2407 | */ |
2408 | for_each_possible_cpu(cpu) { |
2409 | struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); |
2410 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); |
2411 | |
2412 | err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); |
2413 | if (err) { |
2414 | kvm_err("Cannot map hyp stack\n" ); |
2415 | goto out_err; |
2416 | } |
2417 | |
2418 | /* |
2419 | * Save the stack PA in nvhe_init_params. This will be needed |
2420 | * to recreate the stack mapping in protected nVHE mode. |
2421 | * __hyp_pa() won't do the right thing there, since the stack |
2422 | * has been mapped in the flexible private VA space. |
2423 | */ |
2424 | params->stack_pa = __pa(stack_page); |
2425 | } |
2426 | |
2427 | for_each_possible_cpu(cpu) { |
2428 | char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; |
2429 | char *percpu_end = percpu_begin + nvhe_percpu_size(); |
2430 | |
2431 | /* Map Hyp percpu pages */ |
2432 | err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); |
2433 | if (err) { |
2434 | kvm_err("Cannot map hyp percpu region\n" ); |
2435 | goto out_err; |
2436 | } |
2437 | |
2438 | /* Prepare the CPU initialization parameters */ |
2439 | cpu_prepare_hyp_mode(cpu, hyp_va_bits); |
2440 | } |
2441 | |
2442 | kvm_hyp_init_symbols(); |
2443 | |
2444 | if (is_protected_kvm_enabled()) { |
2445 | if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && |
2446 | cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH)) |
2447 | pkvm_hyp_init_ptrauth(); |
2448 | |
2449 | init_cpu_logical_map(); |
2450 | |
2451 | if (!init_psci_relay()) { |
2452 | err = -ENODEV; |
2453 | goto out_err; |
2454 | } |
2455 | |
2456 | err = kvm_hyp_init_protection(hyp_va_bits); |
2457 | if (err) { |
2458 | kvm_err("Failed to init hyp memory protection\n" ); |
2459 | goto out_err; |
2460 | } |
2461 | } |
2462 | |
2463 | return 0; |
2464 | |
2465 | out_err: |
2466 | teardown_hyp_mode(); |
2467 | kvm_err("error initializing Hyp mode: %d\n" , err); |
2468 | return err; |
2469 | } |
2470 | |
2471 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) |
2472 | { |
2473 | struct kvm_vcpu *vcpu; |
2474 | unsigned long i; |
2475 | |
2476 | mpidr &= MPIDR_HWID_BITMASK; |
2477 | |
2478 | if (kvm->arch.mpidr_data) { |
2479 | u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr); |
2480 | |
2481 | vcpu = kvm_get_vcpu(kvm, |
2482 | i: kvm->arch.mpidr_data->cmpidr_to_idx[idx]); |
2483 | if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu)) |
2484 | vcpu = NULL; |
2485 | |
2486 | return vcpu; |
2487 | } |
2488 | |
2489 | kvm_for_each_vcpu(i, vcpu, kvm) { |
2490 | if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) |
2491 | return vcpu; |
2492 | } |
2493 | return NULL; |
2494 | } |
2495 | |
2496 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) |
2497 | { |
2498 | return irqchip_in_kernel(kvm); |
2499 | } |
2500 | |
2501 | bool kvm_arch_has_irq_bypass(void) |
2502 | { |
2503 | return true; |
2504 | } |
2505 | |
2506 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
2507 | struct irq_bypass_producer *prod) |
2508 | { |
2509 | struct kvm_kernel_irqfd *irqfd = |
2510 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2511 | |
2512 | return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, |
2513 | &irqfd->irq_entry); |
2514 | } |
2515 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
2516 | struct irq_bypass_producer *prod) |
2517 | { |
2518 | struct kvm_kernel_irqfd *irqfd = |
2519 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2520 | |
2521 | kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, |
2522 | &irqfd->irq_entry); |
2523 | } |
2524 | |
2525 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) |
2526 | { |
2527 | struct kvm_kernel_irqfd *irqfd = |
2528 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2529 | |
2530 | kvm_arm_halt_guest(kvm: irqfd->kvm); |
2531 | } |
2532 | |
2533 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) |
2534 | { |
2535 | struct kvm_kernel_irqfd *irqfd = |
2536 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2537 | |
2538 | kvm_arm_resume_guest(kvm: irqfd->kvm); |
2539 | } |
2540 | |
2541 | /* Initialize Hyp-mode and memory mappings on all CPUs */ |
2542 | static __init int kvm_arm_init(void) |
2543 | { |
2544 | int err; |
2545 | bool in_hyp_mode; |
2546 | |
2547 | if (!is_hyp_mode_available()) { |
2548 | kvm_info("HYP mode not available\n" ); |
2549 | return -ENODEV; |
2550 | } |
2551 | |
2552 | if (kvm_get_mode() == KVM_MODE_NONE) { |
2553 | kvm_info("KVM disabled from command line\n" ); |
2554 | return -ENODEV; |
2555 | } |
2556 | |
2557 | err = kvm_sys_reg_table_init(); |
2558 | if (err) { |
2559 | kvm_info("Error initializing system register tables" ); |
2560 | return err; |
2561 | } |
2562 | |
2563 | in_hyp_mode = is_kernel_in_hyp_mode(); |
2564 | |
2565 | if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || |
2566 | cpus_have_final_cap(ARM64_WORKAROUND_1508412)) |
2567 | kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ |
2568 | "Only trusted guests should be used on this system.\n" ); |
2569 | |
2570 | err = kvm_set_ipa_limit(); |
2571 | if (err) |
2572 | return err; |
2573 | |
2574 | err = kvm_arm_init_sve(); |
2575 | if (err) |
2576 | return err; |
2577 | |
2578 | err = kvm_arm_vmid_alloc_init(); |
2579 | if (err) { |
2580 | kvm_err("Failed to initialize VMID allocator.\n" ); |
2581 | return err; |
2582 | } |
2583 | |
2584 | if (!in_hyp_mode) { |
2585 | err = init_hyp_mode(); |
2586 | if (err) |
2587 | goto out_err; |
2588 | } |
2589 | |
2590 | err = kvm_init_vector_slots(); |
2591 | if (err) { |
2592 | kvm_err("Cannot initialise vector slots\n" ); |
2593 | goto out_hyp; |
2594 | } |
2595 | |
2596 | err = init_subsystems(); |
2597 | if (err) |
2598 | goto out_hyp; |
2599 | |
2600 | kvm_info("%s%sVHE mode initialized successfully\n" , |
2601 | in_hyp_mode ? "" : (is_protected_kvm_enabled() ? |
2602 | "Protected " : "Hyp " ), |
2603 | in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ? |
2604 | "h" : "n" )); |
2605 | |
2606 | /* |
2607 | * FIXME: Do something reasonable if kvm_init() fails after pKVM |
2608 | * hypervisor protection is finalized. |
2609 | */ |
2610 | err = kvm_init(vcpu_size: sizeof(struct kvm_vcpu), vcpu_align: 0, THIS_MODULE); |
2611 | if (err) |
2612 | goto out_subs; |
2613 | |
2614 | kvm_arm_initialised = true; |
2615 | |
2616 | return 0; |
2617 | |
2618 | out_subs: |
2619 | teardown_subsystems(); |
2620 | out_hyp: |
2621 | if (!in_hyp_mode) |
2622 | teardown_hyp_mode(); |
2623 | out_err: |
2624 | kvm_arm_vmid_alloc_free(); |
2625 | return err; |
2626 | } |
2627 | |
2628 | static int __init early_kvm_mode_cfg(char *arg) |
2629 | { |
2630 | if (!arg) |
2631 | return -EINVAL; |
2632 | |
2633 | if (strcmp(arg, "none" ) == 0) { |
2634 | kvm_mode = KVM_MODE_NONE; |
2635 | return 0; |
2636 | } |
2637 | |
2638 | if (!is_hyp_mode_available()) { |
2639 | pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n" ); |
2640 | return 0; |
2641 | } |
2642 | |
2643 | if (strcmp(arg, "protected" ) == 0) { |
2644 | if (!is_kernel_in_hyp_mode()) |
2645 | kvm_mode = KVM_MODE_PROTECTED; |
2646 | else |
2647 | pr_warn_once("Protected KVM not available with VHE\n" ); |
2648 | |
2649 | return 0; |
2650 | } |
2651 | |
2652 | if (strcmp(arg, "nvhe" ) == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { |
2653 | kvm_mode = KVM_MODE_DEFAULT; |
2654 | return 0; |
2655 | } |
2656 | |
2657 | if (strcmp(arg, "nested" ) == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) { |
2658 | kvm_mode = KVM_MODE_NV; |
2659 | return 0; |
2660 | } |
2661 | |
2662 | return -EINVAL; |
2663 | } |
2664 | early_param("kvm-arm.mode" , early_kvm_mode_cfg); |
2665 | |
2666 | enum kvm_mode kvm_get_mode(void) |
2667 | { |
2668 | return kvm_mode; |
2669 | } |
2670 | |
2671 | module_init(kvm_arm_init); |
2672 | |