1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright IBM Corp. 2007 |
5 | * |
6 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
7 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
8 | */ |
9 | |
10 | #include <linux/errno.h> |
11 | #include <linux/err.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/vmalloc.h> |
14 | #include <linux/hrtimer.h> |
15 | #include <linux/sched/signal.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/file.h> |
19 | #include <linux/module.h> |
20 | #include <linux/irqbypass.h> |
21 | #include <linux/kvm_irqfd.h> |
22 | #include <linux/of.h> |
23 | #include <asm/cputable.h> |
24 | #include <linux/uaccess.h> |
25 | #include <asm/kvm_ppc.h> |
26 | #include <asm/cputhreads.h> |
27 | #include <asm/irqflags.h> |
28 | #include <asm/iommu.h> |
29 | #include <asm/switch_to.h> |
30 | #include <asm/xive.h> |
31 | #ifdef CONFIG_PPC_PSERIES |
32 | #include <asm/hvcall.h> |
33 | #include <asm/plpar_wrappers.h> |
34 | #endif |
35 | #include <asm/ultravisor.h> |
36 | #include <asm/setup.h> |
37 | |
38 | #include "timing.h" |
39 | #include "../mm/mmu_decl.h" |
40 | |
41 | #define CREATE_TRACE_POINTS |
42 | #include "trace.h" |
43 | |
44 | struct kvmppc_ops *kvmppc_hv_ops; |
45 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); |
46 | struct kvmppc_ops *kvmppc_pr_ops; |
47 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); |
48 | |
49 | |
50 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
51 | { |
52 | return !!(v->arch.pending_exceptions) || kvm_request_pending(vcpu: v); |
53 | } |
54 | |
55 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
56 | { |
57 | return kvm_arch_vcpu_runnable(v: vcpu); |
58 | } |
59 | |
60 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
61 | { |
62 | return false; |
63 | } |
64 | |
65 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
66 | { |
67 | return 1; |
68 | } |
69 | |
70 | /* |
71 | * Common checks before entering the guest world. Call with interrupts |
72 | * disabled. |
73 | * |
74 | * returns: |
75 | * |
76 | * == 1 if we're ready to go into guest state |
77 | * <= 0 if we need to go back to the host with return value |
78 | */ |
79 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
80 | { |
81 | int r; |
82 | |
83 | WARN_ON(irqs_disabled()); |
84 | hard_irq_disable(); |
85 | |
86 | while (true) { |
87 | if (need_resched()) { |
88 | local_irq_enable(); |
89 | cond_resched(); |
90 | hard_irq_disable(); |
91 | continue; |
92 | } |
93 | |
94 | if (signal_pending(current)) { |
95 | kvmppc_account_exit(vcpu, type: SIGNAL_EXITS); |
96 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
97 | r = -EINTR; |
98 | break; |
99 | } |
100 | |
101 | vcpu->mode = IN_GUEST_MODE; |
102 | |
103 | /* |
104 | * Reading vcpu->requests must happen after setting vcpu->mode, |
105 | * so we don't miss a request because the requester sees |
106 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests |
107 | * before next entering the guest (and thus doesn't IPI). |
108 | * This also orders the write to mode from any reads |
109 | * to the page tables done while the VCPU is running. |
110 | * Please see the comment in kvm_flush_remote_tlbs. |
111 | */ |
112 | smp_mb(); |
113 | |
114 | if (kvm_request_pending(vcpu)) { |
115 | /* Make sure we process requests preemptable */ |
116 | local_irq_enable(); |
117 | trace_kvm_check_requests(vcpu); |
118 | r = kvmppc_core_check_requests(vcpu); |
119 | hard_irq_disable(); |
120 | if (r > 0) |
121 | continue; |
122 | break; |
123 | } |
124 | |
125 | if (kvmppc_core_prepare_to_enter(vcpu)) { |
126 | /* interrupts got enabled in between, so we |
127 | are back at square 1 */ |
128 | continue; |
129 | } |
130 | |
131 | guest_enter_irqoff(); |
132 | return 1; |
133 | } |
134 | |
135 | /* return to host */ |
136 | local_irq_enable(); |
137 | return r; |
138 | } |
139 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
140 | |
141 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
142 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) |
143 | { |
144 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; |
145 | int i; |
146 | |
147 | shared->sprg0 = swab64(shared->sprg0); |
148 | shared->sprg1 = swab64(shared->sprg1); |
149 | shared->sprg2 = swab64(shared->sprg2); |
150 | shared->sprg3 = swab64(shared->sprg3); |
151 | shared->srr0 = swab64(shared->srr0); |
152 | shared->srr1 = swab64(shared->srr1); |
153 | shared->dar = swab64(shared->dar); |
154 | shared->msr = swab64(shared->msr); |
155 | shared->dsisr = swab32(shared->dsisr); |
156 | shared->int_pending = swab32(shared->int_pending); |
157 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) |
158 | shared->sr[i] = swab32(shared->sr[i]); |
159 | } |
160 | #endif |
161 | |
162 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
163 | { |
164 | int nr = kvmppc_get_gpr(vcpu, 11); |
165 | int r; |
166 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); |
167 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); |
168 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); |
169 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
170 | unsigned long r2 = 0; |
171 | |
172 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
173 | /* 32 bit mode */ |
174 | param1 &= 0xffffffff; |
175 | param2 &= 0xffffffff; |
176 | param3 &= 0xffffffff; |
177 | param4 &= 0xffffffff; |
178 | } |
179 | |
180 | switch (nr) { |
181 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
182 | { |
183 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
184 | /* Book3S can be little endian, find it out here */ |
185 | int shared_big_endian = true; |
186 | if (vcpu->arch.intr_msr & MSR_LE) |
187 | shared_big_endian = false; |
188 | if (shared_big_endian != vcpu->arch.shared_big_endian) |
189 | kvmppc_swab_shared(vcpu); |
190 | vcpu->arch.shared_big_endian = shared_big_endian; |
191 | #endif |
192 | |
193 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
194 | /* |
195 | * Older versions of the Linux magic page code had |
196 | * a bug where they would map their trampoline code |
197 | * NX. If that's the case, remove !PR NX capability. |
198 | */ |
199 | vcpu->arch.disable_kernel_nx = true; |
200 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
201 | } |
202 | |
203 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; |
204 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; |
205 | |
206 | #ifdef CONFIG_PPC_64K_PAGES |
207 | /* |
208 | * Make sure our 4k magic page is in the same window of a 64k |
209 | * page within the guest and within the host's page. |
210 | */ |
211 | if ((vcpu->arch.magic_page_pa & 0xf000) != |
212 | ((ulong)vcpu->arch.shared & 0xf000)) { |
213 | void *old_shared = vcpu->arch.shared; |
214 | ulong shared = (ulong)vcpu->arch.shared; |
215 | void *new_shared; |
216 | |
217 | shared &= PAGE_MASK; |
218 | shared |= vcpu->arch.magic_page_pa & 0xf000; |
219 | new_shared = (void*)shared; |
220 | memcpy(new_shared, old_shared, 0x1000); |
221 | vcpu->arch.shared = new_shared; |
222 | } |
223 | #endif |
224 | |
225 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
226 | |
227 | r = EV_SUCCESS; |
228 | break; |
229 | } |
230 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
231 | r = EV_SUCCESS; |
232 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
233 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
234 | #endif |
235 | |
236 | /* Second return value is in r4 */ |
237 | break; |
238 | case EV_HCALL_TOKEN(EV_IDLE): |
239 | r = EV_SUCCESS; |
240 | kvm_vcpu_halt(vcpu); |
241 | break; |
242 | default: |
243 | r = EV_UNIMPLEMENTED; |
244 | break; |
245 | } |
246 | |
247 | kvmppc_set_gpr(vcpu, 4, r2); |
248 | |
249 | return r; |
250 | } |
251 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
252 | |
253 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
254 | { |
255 | int r = false; |
256 | |
257 | /* We have to know what CPU to virtualize */ |
258 | if (!vcpu->arch.pvr) |
259 | goto out; |
260 | |
261 | /* PAPR only works with book3s_64 */ |
262 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
263 | goto out; |
264 | |
265 | /* HV KVM can only do PAPR mode for now */ |
266 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
267 | goto out; |
268 | |
269 | #ifdef CONFIG_KVM_BOOKE_HV |
270 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) |
271 | goto out; |
272 | #endif |
273 | |
274 | r = true; |
275 | |
276 | out: |
277 | vcpu->arch.sane = r; |
278 | return r ? 0 : -EINVAL; |
279 | } |
280 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
281 | |
282 | int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) |
283 | { |
284 | enum emulation_result er; |
285 | int r; |
286 | |
287 | er = kvmppc_emulate_loadstore(vcpu); |
288 | switch (er) { |
289 | case EMULATE_DONE: |
290 | /* Future optimization: only reload non-volatiles if they were |
291 | * actually modified. */ |
292 | r = RESUME_GUEST_NV; |
293 | break; |
294 | case EMULATE_AGAIN: |
295 | r = RESUME_GUEST; |
296 | break; |
297 | case EMULATE_DO_MMIO: |
298 | vcpu->run->exit_reason = KVM_EXIT_MMIO; |
299 | /* We must reload nonvolatiles because "update" load/store |
300 | * instructions modify register state. */ |
301 | /* Future optimization: only reload non-volatiles if they were |
302 | * actually modified. */ |
303 | r = RESUME_HOST_NV; |
304 | break; |
305 | case EMULATE_FAIL: |
306 | { |
307 | ppc_inst_t last_inst; |
308 | |
309 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
310 | kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n" , |
311 | ppc_inst_val(last_inst)); |
312 | |
313 | /* |
314 | * Injecting a Data Storage here is a bit more |
315 | * accurate since the instruction that caused the |
316 | * access could still be a valid one. |
317 | */ |
318 | if (!IS_ENABLED(CONFIG_BOOKE)) { |
319 | ulong dsisr = DSISR_BADACCESS; |
320 | |
321 | if (vcpu->mmio_is_write) |
322 | dsisr |= DSISR_ISSTORE; |
323 | |
324 | kvmppc_core_queue_data_storage(vcpu, |
325 | kvmppc_get_msr(vcpu) & SRR1_PREFIXED, |
326 | vcpu->arch.vaddr_accessed, dsisr); |
327 | } else { |
328 | /* |
329 | * BookE does not send a SIGBUS on a bad |
330 | * fault, so use a Program interrupt instead |
331 | * to avoid a fault loop. |
332 | */ |
333 | kvmppc_core_queue_program(vcpu, 0); |
334 | } |
335 | |
336 | r = RESUME_GUEST; |
337 | break; |
338 | } |
339 | default: |
340 | WARN_ON(1); |
341 | r = RESUME_GUEST; |
342 | } |
343 | |
344 | return r; |
345 | } |
346 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
347 | |
348 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
349 | bool data) |
350 | { |
351 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
352 | struct kvmppc_pte pte; |
353 | int r = -EINVAL; |
354 | |
355 | vcpu->stat.st++; |
356 | |
357 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) |
358 | r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, |
359 | size); |
360 | |
361 | if ((!r) || (r == -EAGAIN)) |
362 | return r; |
363 | |
364 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
365 | XLATE_WRITE, &pte); |
366 | if (r < 0) |
367 | return r; |
368 | |
369 | *eaddr = pte.raddr; |
370 | |
371 | if (!pte.may_write) |
372 | return -EPERM; |
373 | |
374 | /* Magic page override */ |
375 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
376 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
377 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
378 | void *magic = vcpu->arch.shared; |
379 | magic += pte.eaddr & 0xfff; |
380 | memcpy(magic, ptr, size); |
381 | return EMULATE_DONE; |
382 | } |
383 | |
384 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
385 | return EMULATE_DO_MMIO; |
386 | |
387 | return EMULATE_DONE; |
388 | } |
389 | EXPORT_SYMBOL_GPL(kvmppc_st); |
390 | |
391 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
392 | bool data) |
393 | { |
394 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
395 | struct kvmppc_pte pte; |
396 | int rc = -EINVAL; |
397 | |
398 | vcpu->stat.ld++; |
399 | |
400 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) |
401 | rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, |
402 | size); |
403 | |
404 | if ((!rc) || (rc == -EAGAIN)) |
405 | return rc; |
406 | |
407 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
408 | XLATE_READ, &pte); |
409 | if (rc) |
410 | return rc; |
411 | |
412 | *eaddr = pte.raddr; |
413 | |
414 | if (!pte.may_read) |
415 | return -EPERM; |
416 | |
417 | if (!data && !pte.may_execute) |
418 | return -ENOEXEC; |
419 | |
420 | /* Magic page override */ |
421 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
422 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
423 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
424 | void *magic = vcpu->arch.shared; |
425 | magic += pte.eaddr & 0xfff; |
426 | memcpy(ptr, magic, size); |
427 | return EMULATE_DONE; |
428 | } |
429 | |
430 | kvm_vcpu_srcu_read_lock(vcpu); |
431 | rc = kvm_read_guest(kvm: vcpu->kvm, gpa: pte.raddr, data: ptr, len: size); |
432 | kvm_vcpu_srcu_read_unlock(vcpu); |
433 | if (rc) |
434 | return EMULATE_DO_MMIO; |
435 | |
436 | return EMULATE_DONE; |
437 | } |
438 | EXPORT_SYMBOL_GPL(kvmppc_ld); |
439 | |
440 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
441 | { |
442 | struct kvmppc_ops *kvm_ops = NULL; |
443 | int r; |
444 | |
445 | /* |
446 | * if we have both HV and PR enabled, default is HV |
447 | */ |
448 | if (type == 0) { |
449 | if (kvmppc_hv_ops) |
450 | kvm_ops = kvmppc_hv_ops; |
451 | else |
452 | kvm_ops = kvmppc_pr_ops; |
453 | if (!kvm_ops) |
454 | goto err_out; |
455 | } else if (type == KVM_VM_PPC_HV) { |
456 | if (!kvmppc_hv_ops) |
457 | goto err_out; |
458 | kvm_ops = kvmppc_hv_ops; |
459 | } else if (type == KVM_VM_PPC_PR) { |
460 | if (!kvmppc_pr_ops) |
461 | goto err_out; |
462 | kvm_ops = kvmppc_pr_ops; |
463 | } else |
464 | goto err_out; |
465 | |
466 | if (!try_module_get(module: kvm_ops->owner)) |
467 | return -ENOENT; |
468 | |
469 | kvm->arch.kvm_ops = kvm_ops; |
470 | r = kvmppc_core_init_vm(kvm); |
471 | if (r) |
472 | module_put(module: kvm_ops->owner); |
473 | return r; |
474 | err_out: |
475 | return -EINVAL; |
476 | } |
477 | |
478 | void kvm_arch_destroy_vm(struct kvm *kvm) |
479 | { |
480 | #ifdef CONFIG_KVM_XICS |
481 | /* |
482 | * We call kick_all_cpus_sync() to ensure that all |
483 | * CPUs have executed any pending IPIs before we |
484 | * continue and free VCPUs structures below. |
485 | */ |
486 | if (is_kvmppc_hv_enabled(kvm)) |
487 | kick_all_cpus_sync(); |
488 | #endif |
489 | |
490 | kvm_destroy_vcpus(kvm); |
491 | |
492 | mutex_lock(&kvm->lock); |
493 | |
494 | kvmppc_core_destroy_vm(kvm); |
495 | |
496 | mutex_unlock(lock: &kvm->lock); |
497 | |
498 | /* drop the module reference */ |
499 | module_put(module: kvm->arch.kvm_ops->owner); |
500 | } |
501 | |
502 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
503 | { |
504 | int r; |
505 | /* Assume we're using HV mode when the HV module is loaded */ |
506 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
507 | |
508 | if (kvm) { |
509 | /* |
510 | * Hooray - we know which VM type we're running on. Depend on |
511 | * that rather than the guess above. |
512 | */ |
513 | hv_enabled = is_kvmppc_hv_enabled(kvm); |
514 | } |
515 | |
516 | switch (ext) { |
517 | #ifdef CONFIG_BOOKE |
518 | case KVM_CAP_PPC_BOOKE_SREGS: |
519 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
520 | case KVM_CAP_PPC_EPR: |
521 | #else |
522 | case KVM_CAP_PPC_SEGSTATE: |
523 | case KVM_CAP_PPC_HIOR: |
524 | case KVM_CAP_PPC_PAPR: |
525 | #endif |
526 | case KVM_CAP_PPC_UNSET_IRQ: |
527 | case KVM_CAP_PPC_IRQ_LEVEL: |
528 | case KVM_CAP_ENABLE_CAP: |
529 | case KVM_CAP_ONE_REG: |
530 | case KVM_CAP_IOEVENTFD: |
531 | case KVM_CAP_IMMEDIATE_EXIT: |
532 | case KVM_CAP_SET_GUEST_DEBUG: |
533 | r = 1; |
534 | break; |
535 | case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: |
536 | case KVM_CAP_PPC_PAIRED_SINGLES: |
537 | case KVM_CAP_PPC_OSI: |
538 | case KVM_CAP_PPC_GET_PVINFO: |
539 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
540 | case KVM_CAP_SW_TLB: |
541 | #endif |
542 | /* We support this only for PR */ |
543 | r = !hv_enabled; |
544 | break; |
545 | #ifdef CONFIG_KVM_MPIC |
546 | case KVM_CAP_IRQ_MPIC: |
547 | r = 1; |
548 | break; |
549 | #endif |
550 | |
551 | #ifdef CONFIG_PPC_BOOK3S_64 |
552 | case KVM_CAP_SPAPR_TCE: |
553 | case KVM_CAP_SPAPR_TCE_64: |
554 | r = 1; |
555 | break; |
556 | case KVM_CAP_SPAPR_TCE_VFIO: |
557 | r = !!cpu_has_feature(CPU_FTR_HVMODE); |
558 | break; |
559 | case KVM_CAP_PPC_RTAS: |
560 | case KVM_CAP_PPC_FIXUP_HCALL: |
561 | case KVM_CAP_PPC_ENABLE_HCALL: |
562 | #ifdef CONFIG_KVM_XICS |
563 | case KVM_CAP_IRQ_XICS: |
564 | #endif |
565 | case KVM_CAP_PPC_GET_CPU_CHAR: |
566 | r = 1; |
567 | break; |
568 | #ifdef CONFIG_KVM_XIVE |
569 | case KVM_CAP_PPC_IRQ_XIVE: |
570 | /* |
571 | * We need XIVE to be enabled on the platform (implies |
572 | * a POWER9 processor) and the PowerNV platform, as |
573 | * nested is not yet supported. |
574 | */ |
575 | r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && |
576 | kvmppc_xive_native_supported(); |
577 | break; |
578 | #endif |
579 | |
580 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
581 | case KVM_CAP_IRQFD_RESAMPLE: |
582 | r = !xive_enabled(); |
583 | break; |
584 | #endif |
585 | |
586 | case KVM_CAP_PPC_ALLOC_HTAB: |
587 | r = hv_enabled; |
588 | break; |
589 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
590 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
591 | case KVM_CAP_PPC_SMT: |
592 | r = 0; |
593 | if (kvm) { |
594 | if (kvm->arch.emul_smt_mode > 1) |
595 | r = kvm->arch.emul_smt_mode; |
596 | else |
597 | r = kvm->arch.smt_mode; |
598 | } else if (hv_enabled) { |
599 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
600 | r = 1; |
601 | else |
602 | r = threads_per_subcore; |
603 | } |
604 | break; |
605 | case KVM_CAP_PPC_SMT_POSSIBLE: |
606 | r = 1; |
607 | if (hv_enabled) { |
608 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
609 | r = ((threads_per_subcore << 1) - 1); |
610 | else |
611 | /* P9 can emulate dbells, so allow any mode */ |
612 | r = 8 | 4 | 2 | 1; |
613 | } |
614 | break; |
615 | case KVM_CAP_PPC_RMA: |
616 | r = 0; |
617 | break; |
618 | case KVM_CAP_PPC_HWRNG: |
619 | r = kvmppc_hwrng_present(); |
620 | break; |
621 | case KVM_CAP_PPC_MMU_RADIX: |
622 | r = !!(hv_enabled && radix_enabled()); |
623 | break; |
624 | case KVM_CAP_PPC_MMU_HASH_V3: |
625 | r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && |
626 | kvmppc_hv_ops->hash_v3_possible()); |
627 | break; |
628 | case KVM_CAP_PPC_NESTED_HV: |
629 | r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && |
630 | !kvmppc_hv_ops->enable_nested(NULL)); |
631 | break; |
632 | #endif |
633 | case KVM_CAP_SYNC_MMU: |
634 | BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); |
635 | r = 1; |
636 | break; |
637 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
638 | case KVM_CAP_PPC_HTAB_FD: |
639 | r = hv_enabled; |
640 | break; |
641 | #endif |
642 | case KVM_CAP_NR_VCPUS: |
643 | /* |
644 | * Recommending a number of CPUs is somewhat arbitrary; we |
645 | * return the number of present CPUs for -HV (since a host |
646 | * will have secondary threads "offline"), and for other KVM |
647 | * implementations just count online CPUs. |
648 | */ |
649 | if (hv_enabled) |
650 | r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS); |
651 | else |
652 | r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); |
653 | break; |
654 | case KVM_CAP_MAX_VCPUS: |
655 | r = KVM_MAX_VCPUS; |
656 | break; |
657 | case KVM_CAP_MAX_VCPU_ID: |
658 | r = KVM_MAX_VCPU_IDS; |
659 | break; |
660 | #ifdef CONFIG_PPC_BOOK3S_64 |
661 | case KVM_CAP_PPC_GET_SMMU_INFO: |
662 | r = 1; |
663 | break; |
664 | case KVM_CAP_SPAPR_MULTITCE: |
665 | r = 1; |
666 | break; |
667 | case KVM_CAP_SPAPR_RESIZE_HPT: |
668 | r = !!hv_enabled; |
669 | break; |
670 | #endif |
671 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
672 | case KVM_CAP_PPC_FWNMI: |
673 | r = hv_enabled; |
674 | break; |
675 | #endif |
676 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
677 | case KVM_CAP_PPC_HTM: |
678 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
679 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
680 | break; |
681 | #endif |
682 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
683 | case KVM_CAP_PPC_SECURE_GUEST: |
684 | r = hv_enabled && kvmppc_hv_ops->enable_svm && |
685 | !kvmppc_hv_ops->enable_svm(NULL); |
686 | break; |
687 | case KVM_CAP_PPC_DAWR1: |
688 | r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && |
689 | !kvmppc_hv_ops->enable_dawr1(NULL)); |
690 | break; |
691 | case KVM_CAP_PPC_RPT_INVALIDATE: |
692 | r = 1; |
693 | break; |
694 | #endif |
695 | case KVM_CAP_PPC_AIL_MODE_3: |
696 | r = 0; |
697 | /* |
698 | * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode. |
699 | * The POWER9s can support it if the guest runs in hash mode, |
700 | * but QEMU doesn't necessarily query the capability in time. |
701 | */ |
702 | if (hv_enabled) { |
703 | if (kvmhv_on_pseries()) { |
704 | if (pseries_reloc_on_exception()) |
705 | r = 1; |
706 | } else if (cpu_has_feature(CPU_FTR_ARCH_207S) && |
707 | !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { |
708 | r = 1; |
709 | } |
710 | } |
711 | break; |
712 | default: |
713 | r = 0; |
714 | break; |
715 | } |
716 | return r; |
717 | |
718 | } |
719 | |
720 | long kvm_arch_dev_ioctl(struct file *filp, |
721 | unsigned int ioctl, unsigned long arg) |
722 | { |
723 | return -EINVAL; |
724 | } |
725 | |
726 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
727 | { |
728 | kvmppc_core_free_memslot(kvm, slot); |
729 | } |
730 | |
731 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
732 | const struct kvm_memory_slot *old, |
733 | struct kvm_memory_slot *new, |
734 | enum kvm_mr_change change) |
735 | { |
736 | return kvmppc_core_prepare_memory_region(kvm, old, new, change); |
737 | } |
738 | |
739 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
740 | struct kvm_memory_slot *old, |
741 | const struct kvm_memory_slot *new, |
742 | enum kvm_mr_change change) |
743 | { |
744 | kvmppc_core_commit_memory_region(kvm, old, new, change); |
745 | } |
746 | |
747 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
748 | struct kvm_memory_slot *slot) |
749 | { |
750 | kvmppc_core_flush_memslot(kvm, slot); |
751 | } |
752 | |
753 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
754 | { |
755 | return 0; |
756 | } |
757 | |
758 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
759 | { |
760 | struct kvm_vcpu *vcpu; |
761 | |
762 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); |
763 | kvmppc_decrementer_func(vcpu); |
764 | |
765 | return HRTIMER_NORESTART; |
766 | } |
767 | |
768 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
769 | { |
770 | int err; |
771 | |
772 | hrtimer_init(timer: &vcpu->arch.dec_timer, CLOCK_REALTIME, mode: HRTIMER_MODE_ABS); |
773 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
774 | |
775 | #ifdef CONFIG_KVM_EXIT_TIMING |
776 | mutex_init(&vcpu->arch.exit_timing_lock); |
777 | #endif |
778 | err = kvmppc_subarch_vcpu_init(vcpu); |
779 | if (err) |
780 | return err; |
781 | |
782 | err = kvmppc_core_vcpu_create(vcpu); |
783 | if (err) |
784 | goto out_vcpu_uninit; |
785 | |
786 | rcuwait_init(w: &vcpu->arch.wait); |
787 | vcpu->arch.waitp = &vcpu->arch.wait; |
788 | return 0; |
789 | |
790 | out_vcpu_uninit: |
791 | kvmppc_subarch_vcpu_uninit(vcpu); |
792 | return err; |
793 | } |
794 | |
795 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
796 | { |
797 | } |
798 | |
799 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
800 | { |
801 | /* Make sure we're not using the vcpu anymore */ |
802 | hrtimer_cancel(timer: &vcpu->arch.dec_timer); |
803 | |
804 | switch (vcpu->arch.irq_type) { |
805 | case KVMPPC_IRQ_MPIC: |
806 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
807 | break; |
808 | case KVMPPC_IRQ_XICS: |
809 | if (xics_on_xive()) |
810 | kvmppc_xive_cleanup_vcpu(vcpu); |
811 | else |
812 | kvmppc_xics_free_icp(vcpu); |
813 | break; |
814 | case KVMPPC_IRQ_XIVE: |
815 | kvmppc_xive_native_cleanup_vcpu(vcpu); |
816 | break; |
817 | } |
818 | |
819 | kvmppc_core_vcpu_free(vcpu); |
820 | |
821 | kvmppc_subarch_vcpu_uninit(vcpu); |
822 | } |
823 | |
824 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
825 | { |
826 | return kvmppc_core_pending_dec(vcpu); |
827 | } |
828 | |
829 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
830 | { |
831 | #ifdef CONFIG_BOOKE |
832 | /* |
833 | * vrsave (formerly usprg0) isn't used by Linux, but may |
834 | * be used by the guest. |
835 | * |
836 | * On non-booke this is associated with Altivec and |
837 | * is handled by code in book3s.c. |
838 | */ |
839 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
840 | #endif |
841 | kvmppc_core_vcpu_load(vcpu, cpu); |
842 | } |
843 | |
844 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
845 | { |
846 | kvmppc_core_vcpu_put(vcpu); |
847 | #ifdef CONFIG_BOOKE |
848 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
849 | #endif |
850 | } |
851 | |
852 | /* |
853 | * irq_bypass_add_producer and irq_bypass_del_producer are only |
854 | * useful if the architecture supports PCI passthrough. |
855 | * irq_bypass_stop and irq_bypass_start are not needed and so |
856 | * kvm_ops are not defined for them. |
857 | */ |
858 | bool kvm_arch_has_irq_bypass(void) |
859 | { |
860 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || |
861 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); |
862 | } |
863 | |
864 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
865 | struct irq_bypass_producer *prod) |
866 | { |
867 | struct kvm_kernel_irqfd *irqfd = |
868 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
869 | struct kvm *kvm = irqfd->kvm; |
870 | |
871 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) |
872 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); |
873 | |
874 | return 0; |
875 | } |
876 | |
877 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
878 | struct irq_bypass_producer *prod) |
879 | { |
880 | struct kvm_kernel_irqfd *irqfd = |
881 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
882 | struct kvm *kvm = irqfd->kvm; |
883 | |
884 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) |
885 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); |
886 | } |
887 | |
888 | #ifdef CONFIG_VSX |
889 | static inline int kvmppc_get_vsr_dword_offset(int index) |
890 | { |
891 | int offset; |
892 | |
893 | if ((index != 0) && (index != 1)) |
894 | return -1; |
895 | |
896 | #ifdef __BIG_ENDIAN |
897 | offset = index; |
898 | #else |
899 | offset = 1 - index; |
900 | #endif |
901 | |
902 | return offset; |
903 | } |
904 | |
905 | static inline int kvmppc_get_vsr_word_offset(int index) |
906 | { |
907 | int offset; |
908 | |
909 | if ((index > 3) || (index < 0)) |
910 | return -1; |
911 | |
912 | #ifdef __BIG_ENDIAN |
913 | offset = index; |
914 | #else |
915 | offset = 3 - index; |
916 | #endif |
917 | return offset; |
918 | } |
919 | |
920 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, |
921 | u64 gpr) |
922 | { |
923 | union kvmppc_one_reg val; |
924 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
925 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
926 | |
927 | if (offset == -1) |
928 | return; |
929 | |
930 | if (index >= 32) { |
931 | kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); |
932 | val.vsxval[offset] = gpr; |
933 | kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); |
934 | } else { |
935 | kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); |
936 | } |
937 | } |
938 | |
939 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, |
940 | u64 gpr) |
941 | { |
942 | union kvmppc_one_reg val; |
943 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
944 | |
945 | if (index >= 32) { |
946 | kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); |
947 | val.vsxval[0] = gpr; |
948 | val.vsxval[1] = gpr; |
949 | kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); |
950 | } else { |
951 | kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); |
952 | kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); |
953 | } |
954 | } |
955 | |
956 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, |
957 | u32 gpr) |
958 | { |
959 | union kvmppc_one_reg val; |
960 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
961 | |
962 | if (index >= 32) { |
963 | val.vsx32val[0] = gpr; |
964 | val.vsx32val[1] = gpr; |
965 | val.vsx32val[2] = gpr; |
966 | val.vsx32val[3] = gpr; |
967 | kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); |
968 | } else { |
969 | val.vsx32val[0] = gpr; |
970 | val.vsx32val[1] = gpr; |
971 | kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); |
972 | kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); |
973 | } |
974 | } |
975 | |
976 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
977 | u32 gpr32) |
978 | { |
979 | union kvmppc_one_reg val; |
980 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
981 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
982 | int dword_offset, word_offset; |
983 | |
984 | if (offset == -1) |
985 | return; |
986 | |
987 | if (index >= 32) { |
988 | kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); |
989 | val.vsx32val[offset] = gpr32; |
990 | kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); |
991 | } else { |
992 | dword_offset = offset / 2; |
993 | word_offset = offset % 2; |
994 | val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); |
995 | val.vsx32val[word_offset] = gpr32; |
996 | kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); |
997 | } |
998 | } |
999 | #endif /* CONFIG_VSX */ |
1000 | |
1001 | #ifdef CONFIG_ALTIVEC |
1002 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, |
1003 | int index, int element_size) |
1004 | { |
1005 | int offset; |
1006 | int elts = sizeof(vector128)/element_size; |
1007 | |
1008 | if ((index < 0) || (index >= elts)) |
1009 | return -1; |
1010 | |
1011 | if (kvmppc_need_byteswap(vcpu)) |
1012 | offset = elts - index - 1; |
1013 | else |
1014 | offset = index; |
1015 | |
1016 | return offset; |
1017 | } |
1018 | |
1019 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, |
1020 | int index) |
1021 | { |
1022 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); |
1023 | } |
1024 | |
1025 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, |
1026 | int index) |
1027 | { |
1028 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); |
1029 | } |
1030 | |
1031 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, |
1032 | int index) |
1033 | { |
1034 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); |
1035 | } |
1036 | |
1037 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, |
1038 | int index) |
1039 | { |
1040 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); |
1041 | } |
1042 | |
1043 | |
1044 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
1045 | u64 gpr) |
1046 | { |
1047 | union kvmppc_one_reg val; |
1048 | int offset = kvmppc_get_vmx_dword_offset(vcpu, |
1049 | vcpu->arch.mmio_vmx_offset); |
1050 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
1051 | |
1052 | if (offset == -1) |
1053 | return; |
1054 | |
1055 | kvmppc_get_vsx_vr(vcpu, index, &val.vval); |
1056 | val.vsxval[offset] = gpr; |
1057 | kvmppc_set_vsx_vr(vcpu, index, &val.vval); |
1058 | } |
1059 | |
1060 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, |
1061 | u32 gpr32) |
1062 | { |
1063 | union kvmppc_one_reg val; |
1064 | int offset = kvmppc_get_vmx_word_offset(vcpu, |
1065 | vcpu->arch.mmio_vmx_offset); |
1066 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
1067 | |
1068 | if (offset == -1) |
1069 | return; |
1070 | |
1071 | kvmppc_get_vsx_vr(vcpu, index, &val.vval); |
1072 | val.vsx32val[offset] = gpr32; |
1073 | kvmppc_set_vsx_vr(vcpu, index, &val.vval); |
1074 | } |
1075 | |
1076 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, |
1077 | u16 gpr16) |
1078 | { |
1079 | union kvmppc_one_reg val; |
1080 | int offset = kvmppc_get_vmx_hword_offset(vcpu, |
1081 | vcpu->arch.mmio_vmx_offset); |
1082 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
1083 | |
1084 | if (offset == -1) |
1085 | return; |
1086 | |
1087 | kvmppc_get_vsx_vr(vcpu, index, &val.vval); |
1088 | val.vsx16val[offset] = gpr16; |
1089 | kvmppc_set_vsx_vr(vcpu, index, &val.vval); |
1090 | } |
1091 | |
1092 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, |
1093 | u8 gpr8) |
1094 | { |
1095 | union kvmppc_one_reg val; |
1096 | int offset = kvmppc_get_vmx_byte_offset(vcpu, |
1097 | vcpu->arch.mmio_vmx_offset); |
1098 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
1099 | |
1100 | if (offset == -1) |
1101 | return; |
1102 | |
1103 | kvmppc_get_vsx_vr(vcpu, index, &val.vval); |
1104 | val.vsx8val[offset] = gpr8; |
1105 | kvmppc_set_vsx_vr(vcpu, index, &val.vval); |
1106 | } |
1107 | #endif /* CONFIG_ALTIVEC */ |
1108 | |
1109 | #ifdef CONFIG_PPC_FPU |
1110 | static inline u64 sp_to_dp(u32 fprs) |
1111 | { |
1112 | u64 fprd; |
1113 | |
1114 | preempt_disable(); |
1115 | enable_kernel_fp(); |
1116 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs) |
1117 | : "fr0" ); |
1118 | preempt_enable(); |
1119 | return fprd; |
1120 | } |
1121 | |
1122 | static inline u32 dp_to_sp(u64 fprd) |
1123 | { |
1124 | u32 fprs; |
1125 | |
1126 | preempt_disable(); |
1127 | enable_kernel_fp(); |
1128 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd) |
1129 | : "fr0" ); |
1130 | preempt_enable(); |
1131 | return fprs; |
1132 | } |
1133 | |
1134 | #else |
1135 | #define sp_to_dp(x) (x) |
1136 | #define dp_to_sp(x) (x) |
1137 | #endif /* CONFIG_PPC_FPU */ |
1138 | |
1139 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) |
1140 | { |
1141 | struct kvm_run *run = vcpu->run; |
1142 | u64 gpr; |
1143 | |
1144 | if (run->mmio.len > sizeof(gpr)) |
1145 | return; |
1146 | |
1147 | if (!vcpu->arch.mmio_host_swabbed) { |
1148 | switch (run->mmio.len) { |
1149 | case 8: gpr = *(u64 *)run->mmio.data; break; |
1150 | case 4: gpr = *(u32 *)run->mmio.data; break; |
1151 | case 2: gpr = *(u16 *)run->mmio.data; break; |
1152 | case 1: gpr = *(u8 *)run->mmio.data; break; |
1153 | } |
1154 | } else { |
1155 | switch (run->mmio.len) { |
1156 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
1157 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; |
1158 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; |
1159 | case 1: gpr = *(u8 *)run->mmio.data; break; |
1160 | } |
1161 | } |
1162 | |
1163 | /* conversion between single and double precision */ |
1164 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) |
1165 | gpr = sp_to_dp(gpr); |
1166 | |
1167 | if (vcpu->arch.mmio_sign_extend) { |
1168 | switch (run->mmio.len) { |
1169 | #ifdef CONFIG_PPC64 |
1170 | case 4: |
1171 | gpr = (s64)(s32)gpr; |
1172 | break; |
1173 | #endif |
1174 | case 2: |
1175 | gpr = (s64)(s16)gpr; |
1176 | break; |
1177 | case 1: |
1178 | gpr = (s64)(s8)gpr; |
1179 | break; |
1180 | } |
1181 | } |
1182 | |
1183 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
1184 | case KVM_MMIO_REG_GPR: |
1185 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
1186 | break; |
1187 | case KVM_MMIO_REG_FPR: |
1188 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1189 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); |
1190 | |
1191 | kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); |
1192 | break; |
1193 | #ifdef CONFIG_PPC_BOOK3S |
1194 | case KVM_MMIO_REG_QPR: |
1195 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
1196 | break; |
1197 | case KVM_MMIO_REG_FQPR: |
1198 | kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); |
1199 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
1200 | break; |
1201 | #endif |
1202 | #ifdef CONFIG_VSX |
1203 | case KVM_MMIO_REG_VSX: |
1204 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1205 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); |
1206 | |
1207 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) |
1208 | kvmppc_set_vsr_dword(vcpu, gpr); |
1209 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
1210 | kvmppc_set_vsr_word(vcpu, gpr); |
1211 | else if (vcpu->arch.mmio_copy_type == |
1212 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
1213 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
1214 | else if (vcpu->arch.mmio_copy_type == |
1215 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) |
1216 | kvmppc_set_vsr_word_dump(vcpu, gpr); |
1217 | break; |
1218 | #endif |
1219 | #ifdef CONFIG_ALTIVEC |
1220 | case KVM_MMIO_REG_VMX: |
1221 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1222 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); |
1223 | |
1224 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) |
1225 | kvmppc_set_vmx_dword(vcpu, gpr); |
1226 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) |
1227 | kvmppc_set_vmx_word(vcpu, gpr); |
1228 | else if (vcpu->arch.mmio_copy_type == |
1229 | KVMPPC_VMX_COPY_HWORD) |
1230 | kvmppc_set_vmx_hword(vcpu, gpr); |
1231 | else if (vcpu->arch.mmio_copy_type == |
1232 | KVMPPC_VMX_COPY_BYTE) |
1233 | kvmppc_set_vmx_byte(vcpu, gpr); |
1234 | break; |
1235 | #endif |
1236 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
1237 | case KVM_MMIO_REG_NESTED_GPR: |
1238 | if (kvmppc_need_byteswap(vcpu)) |
1239 | gpr = swab64(gpr); |
1240 | kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, |
1241 | sizeof(gpr)); |
1242 | break; |
1243 | #endif |
1244 | default: |
1245 | BUG(); |
1246 | } |
1247 | } |
1248 | |
1249 | static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, |
1250 | unsigned int rt, unsigned int bytes, |
1251 | int is_default_endian, int sign_extend) |
1252 | { |
1253 | struct kvm_run *run = vcpu->run; |
1254 | int idx, ret; |
1255 | bool host_swabbed; |
1256 | |
1257 | /* Pity C doesn't have a logical XOR operator */ |
1258 | if (kvmppc_need_byteswap(vcpu)) { |
1259 | host_swabbed = is_default_endian; |
1260 | } else { |
1261 | host_swabbed = !is_default_endian; |
1262 | } |
1263 | |
1264 | if (bytes > sizeof(run->mmio.data)) |
1265 | return EMULATE_FAIL; |
1266 | |
1267 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
1268 | run->mmio.len = bytes; |
1269 | run->mmio.is_write = 0; |
1270 | |
1271 | vcpu->arch.io_gpr = rt; |
1272 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
1273 | vcpu->mmio_needed = 1; |
1274 | vcpu->mmio_is_write = 0; |
1275 | vcpu->arch.mmio_sign_extend = sign_extend; |
1276 | |
1277 | idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
1278 | |
1279 | ret = kvm_io_bus_read(vcpu, bus_idx: KVM_MMIO_BUS, addr: run->mmio.phys_addr, |
1280 | len: bytes, val: &run->mmio.data); |
1281 | |
1282 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
1283 | |
1284 | if (!ret) { |
1285 | kvmppc_complete_mmio_load(vcpu); |
1286 | vcpu->mmio_needed = 0; |
1287 | return EMULATE_DONE; |
1288 | } |
1289 | |
1290 | return EMULATE_DO_MMIO; |
1291 | } |
1292 | |
1293 | int kvmppc_handle_load(struct kvm_vcpu *vcpu, |
1294 | unsigned int rt, unsigned int bytes, |
1295 | int is_default_endian) |
1296 | { |
1297 | return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, sign_extend: 0); |
1298 | } |
1299 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
1300 | |
1301 | /* Same as above, but sign extends */ |
1302 | int kvmppc_handle_loads(struct kvm_vcpu *vcpu, |
1303 | unsigned int rt, unsigned int bytes, |
1304 | int is_default_endian) |
1305 | { |
1306 | return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, sign_extend: 1); |
1307 | } |
1308 | |
1309 | #ifdef CONFIG_VSX |
1310 | int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, |
1311 | unsigned int rt, unsigned int bytes, |
1312 | int is_default_endian, int mmio_sign_extend) |
1313 | { |
1314 | enum emulation_result emulated = EMULATE_DONE; |
1315 | |
1316 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1317 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
1318 | return EMULATE_FAIL; |
1319 | |
1320 | while (vcpu->arch.mmio_vsx_copy_nums) { |
1321 | emulated = __kvmppc_handle_load(vcpu, rt, bytes, |
1322 | is_default_endian, mmio_sign_extend); |
1323 | |
1324 | if (emulated != EMULATE_DONE) |
1325 | break; |
1326 | |
1327 | vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
1328 | |
1329 | vcpu->arch.mmio_vsx_copy_nums--; |
1330 | vcpu->arch.mmio_vsx_offset++; |
1331 | } |
1332 | return emulated; |
1333 | } |
1334 | #endif /* CONFIG_VSX */ |
1335 | |
1336 | int kvmppc_handle_store(struct kvm_vcpu *vcpu, |
1337 | u64 val, unsigned int bytes, int is_default_endian) |
1338 | { |
1339 | struct kvm_run *run = vcpu->run; |
1340 | void *data = run->mmio.data; |
1341 | int idx, ret; |
1342 | bool host_swabbed; |
1343 | |
1344 | /* Pity C doesn't have a logical XOR operator */ |
1345 | if (kvmppc_need_byteswap(vcpu)) { |
1346 | host_swabbed = is_default_endian; |
1347 | } else { |
1348 | host_swabbed = !is_default_endian; |
1349 | } |
1350 | |
1351 | if (bytes > sizeof(run->mmio.data)) |
1352 | return EMULATE_FAIL; |
1353 | |
1354 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
1355 | run->mmio.len = bytes; |
1356 | run->mmio.is_write = 1; |
1357 | vcpu->mmio_needed = 1; |
1358 | vcpu->mmio_is_write = 1; |
1359 | |
1360 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
1361 | val = dp_to_sp(val); |
1362 | |
1363 | /* Store the value at the lowest bytes in 'data'. */ |
1364 | if (!host_swabbed) { |
1365 | switch (bytes) { |
1366 | case 8: *(u64 *)data = val; break; |
1367 | case 4: *(u32 *)data = val; break; |
1368 | case 2: *(u16 *)data = val; break; |
1369 | case 1: *(u8 *)data = val; break; |
1370 | } |
1371 | } else { |
1372 | switch (bytes) { |
1373 | case 8: *(u64 *)data = swab64(val); break; |
1374 | case 4: *(u32 *)data = swab32(val); break; |
1375 | case 2: *(u16 *)data = swab16(val); break; |
1376 | case 1: *(u8 *)data = val; break; |
1377 | } |
1378 | } |
1379 | |
1380 | idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
1381 | |
1382 | ret = kvm_io_bus_write(vcpu, bus_idx: KVM_MMIO_BUS, addr: run->mmio.phys_addr, |
1383 | len: bytes, val: &run->mmio.data); |
1384 | |
1385 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
1386 | |
1387 | if (!ret) { |
1388 | vcpu->mmio_needed = 0; |
1389 | return EMULATE_DONE; |
1390 | } |
1391 | |
1392 | return EMULATE_DO_MMIO; |
1393 | } |
1394 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
1395 | |
1396 | #ifdef CONFIG_VSX |
1397 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) |
1398 | { |
1399 | u32 dword_offset, word_offset; |
1400 | union kvmppc_one_reg reg; |
1401 | int vsx_offset = 0; |
1402 | int copy_type = vcpu->arch.mmio_copy_type; |
1403 | int result = 0; |
1404 | |
1405 | switch (copy_type) { |
1406 | case KVMPPC_VSX_COPY_DWORD: |
1407 | vsx_offset = |
1408 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
1409 | |
1410 | if (vsx_offset == -1) { |
1411 | result = -1; |
1412 | break; |
1413 | } |
1414 | |
1415 | if (rs < 32) { |
1416 | *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); |
1417 | } else { |
1418 | kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); |
1419 | *val = reg.vsxval[vsx_offset]; |
1420 | } |
1421 | break; |
1422 | |
1423 | case KVMPPC_VSX_COPY_WORD: |
1424 | vsx_offset = |
1425 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
1426 | |
1427 | if (vsx_offset == -1) { |
1428 | result = -1; |
1429 | break; |
1430 | } |
1431 | |
1432 | if (rs < 32) { |
1433 | dword_offset = vsx_offset / 2; |
1434 | word_offset = vsx_offset % 2; |
1435 | reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); |
1436 | *val = reg.vsx32val[word_offset]; |
1437 | } else { |
1438 | kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); |
1439 | *val = reg.vsx32val[vsx_offset]; |
1440 | } |
1441 | break; |
1442 | |
1443 | default: |
1444 | result = -1; |
1445 | break; |
1446 | } |
1447 | |
1448 | return result; |
1449 | } |
1450 | |
1451 | int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, |
1452 | int rs, unsigned int bytes, int is_default_endian) |
1453 | { |
1454 | u64 val; |
1455 | enum emulation_result emulated = EMULATE_DONE; |
1456 | |
1457 | vcpu->arch.io_gpr = rs; |
1458 | |
1459 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1460 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
1461 | return EMULATE_FAIL; |
1462 | |
1463 | while (vcpu->arch.mmio_vsx_copy_nums) { |
1464 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) |
1465 | return EMULATE_FAIL; |
1466 | |
1467 | emulated = kvmppc_handle_store(vcpu, |
1468 | val, bytes, is_default_endian); |
1469 | |
1470 | if (emulated != EMULATE_DONE) |
1471 | break; |
1472 | |
1473 | vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
1474 | |
1475 | vcpu->arch.mmio_vsx_copy_nums--; |
1476 | vcpu->arch.mmio_vsx_offset++; |
1477 | } |
1478 | |
1479 | return emulated; |
1480 | } |
1481 | |
1482 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) |
1483 | { |
1484 | struct kvm_run *run = vcpu->run; |
1485 | enum emulation_result emulated = EMULATE_FAIL; |
1486 | int r; |
1487 | |
1488 | vcpu->arch.paddr_accessed += run->mmio.len; |
1489 | |
1490 | if (!vcpu->mmio_is_write) { |
1491 | emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, |
1492 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); |
1493 | } else { |
1494 | emulated = kvmppc_handle_vsx_store(vcpu, |
1495 | vcpu->arch.io_gpr, run->mmio.len, 1); |
1496 | } |
1497 | |
1498 | switch (emulated) { |
1499 | case EMULATE_DO_MMIO: |
1500 | run->exit_reason = KVM_EXIT_MMIO; |
1501 | r = RESUME_HOST; |
1502 | break; |
1503 | case EMULATE_FAIL: |
1504 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n" ); |
1505 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1506 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
1507 | r = RESUME_HOST; |
1508 | break; |
1509 | default: |
1510 | r = RESUME_GUEST; |
1511 | break; |
1512 | } |
1513 | return r; |
1514 | } |
1515 | #endif /* CONFIG_VSX */ |
1516 | |
1517 | #ifdef CONFIG_ALTIVEC |
1518 | int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, |
1519 | unsigned int rt, unsigned int bytes, int is_default_endian) |
1520 | { |
1521 | enum emulation_result emulated = EMULATE_DONE; |
1522 | |
1523 | if (vcpu->arch.mmio_vmx_copy_nums > 2) |
1524 | return EMULATE_FAIL; |
1525 | |
1526 | while (vcpu->arch.mmio_vmx_copy_nums) { |
1527 | emulated = __kvmppc_handle_load(vcpu, rt, bytes, |
1528 | is_default_endian, 0); |
1529 | |
1530 | if (emulated != EMULATE_DONE) |
1531 | break; |
1532 | |
1533 | vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
1534 | vcpu->arch.mmio_vmx_copy_nums--; |
1535 | vcpu->arch.mmio_vmx_offset++; |
1536 | } |
1537 | |
1538 | return emulated; |
1539 | } |
1540 | |
1541 | static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
1542 | { |
1543 | union kvmppc_one_reg reg; |
1544 | int vmx_offset = 0; |
1545 | int result = 0; |
1546 | |
1547 | vmx_offset = |
1548 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
1549 | |
1550 | if (vmx_offset == -1) |
1551 | return -1; |
1552 | |
1553 | kvmppc_get_vsx_vr(vcpu, index, ®.vval); |
1554 | *val = reg.vsxval[vmx_offset]; |
1555 | |
1556 | return result; |
1557 | } |
1558 | |
1559 | static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
1560 | { |
1561 | union kvmppc_one_reg reg; |
1562 | int vmx_offset = 0; |
1563 | int result = 0; |
1564 | |
1565 | vmx_offset = |
1566 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
1567 | |
1568 | if (vmx_offset == -1) |
1569 | return -1; |
1570 | |
1571 | kvmppc_get_vsx_vr(vcpu, index, ®.vval); |
1572 | *val = reg.vsx32val[vmx_offset]; |
1573 | |
1574 | return result; |
1575 | } |
1576 | |
1577 | static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) |
1578 | { |
1579 | union kvmppc_one_reg reg; |
1580 | int vmx_offset = 0; |
1581 | int result = 0; |
1582 | |
1583 | vmx_offset = |
1584 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
1585 | |
1586 | if (vmx_offset == -1) |
1587 | return -1; |
1588 | |
1589 | kvmppc_get_vsx_vr(vcpu, index, ®.vval); |
1590 | *val = reg.vsx16val[vmx_offset]; |
1591 | |
1592 | return result; |
1593 | } |
1594 | |
1595 | static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
1596 | { |
1597 | union kvmppc_one_reg reg; |
1598 | int vmx_offset = 0; |
1599 | int result = 0; |
1600 | |
1601 | vmx_offset = |
1602 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
1603 | |
1604 | if (vmx_offset == -1) |
1605 | return -1; |
1606 | |
1607 | kvmppc_get_vsx_vr(vcpu, index, ®.vval); |
1608 | *val = reg.vsx8val[vmx_offset]; |
1609 | |
1610 | return result; |
1611 | } |
1612 | |
1613 | int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, |
1614 | unsigned int rs, unsigned int bytes, int is_default_endian) |
1615 | { |
1616 | u64 val = 0; |
1617 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
1618 | enum emulation_result emulated = EMULATE_DONE; |
1619 | |
1620 | if (vcpu->arch.mmio_vmx_copy_nums > 2) |
1621 | return EMULATE_FAIL; |
1622 | |
1623 | vcpu->arch.io_gpr = rs; |
1624 | |
1625 | while (vcpu->arch.mmio_vmx_copy_nums) { |
1626 | switch (vcpu->arch.mmio_copy_type) { |
1627 | case KVMPPC_VMX_COPY_DWORD: |
1628 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) |
1629 | return EMULATE_FAIL; |
1630 | |
1631 | break; |
1632 | case KVMPPC_VMX_COPY_WORD: |
1633 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) |
1634 | return EMULATE_FAIL; |
1635 | break; |
1636 | case KVMPPC_VMX_COPY_HWORD: |
1637 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) |
1638 | return EMULATE_FAIL; |
1639 | break; |
1640 | case KVMPPC_VMX_COPY_BYTE: |
1641 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) |
1642 | return EMULATE_FAIL; |
1643 | break; |
1644 | default: |
1645 | return EMULATE_FAIL; |
1646 | } |
1647 | |
1648 | emulated = kvmppc_handle_store(vcpu, val, bytes, |
1649 | is_default_endian); |
1650 | if (emulated != EMULATE_DONE) |
1651 | break; |
1652 | |
1653 | vcpu->arch.paddr_accessed += vcpu->run->mmio.len; |
1654 | vcpu->arch.mmio_vmx_copy_nums--; |
1655 | vcpu->arch.mmio_vmx_offset++; |
1656 | } |
1657 | |
1658 | return emulated; |
1659 | } |
1660 | |
1661 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) |
1662 | { |
1663 | struct kvm_run *run = vcpu->run; |
1664 | enum emulation_result emulated = EMULATE_FAIL; |
1665 | int r; |
1666 | |
1667 | vcpu->arch.paddr_accessed += run->mmio.len; |
1668 | |
1669 | if (!vcpu->mmio_is_write) { |
1670 | emulated = kvmppc_handle_vmx_load(vcpu, |
1671 | vcpu->arch.io_gpr, run->mmio.len, 1); |
1672 | } else { |
1673 | emulated = kvmppc_handle_vmx_store(vcpu, |
1674 | vcpu->arch.io_gpr, run->mmio.len, 1); |
1675 | } |
1676 | |
1677 | switch (emulated) { |
1678 | case EMULATE_DO_MMIO: |
1679 | run->exit_reason = KVM_EXIT_MMIO; |
1680 | r = RESUME_HOST; |
1681 | break; |
1682 | case EMULATE_FAIL: |
1683 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n" ); |
1684 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1685 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
1686 | r = RESUME_HOST; |
1687 | break; |
1688 | default: |
1689 | r = RESUME_GUEST; |
1690 | break; |
1691 | } |
1692 | return r; |
1693 | } |
1694 | #endif /* CONFIG_ALTIVEC */ |
1695 | |
1696 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1697 | { |
1698 | int r = 0; |
1699 | union kvmppc_one_reg val; |
1700 | int size; |
1701 | |
1702 | size = one_reg_size(reg->id); |
1703 | if (size > sizeof(val)) |
1704 | return -EINVAL; |
1705 | |
1706 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); |
1707 | if (r == -EINVAL) { |
1708 | r = 0; |
1709 | switch (reg->id) { |
1710 | #ifdef CONFIG_ALTIVEC |
1711 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
1712 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1713 | r = -ENXIO; |
1714 | break; |
1715 | } |
1716 | kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); |
1717 | break; |
1718 | case KVM_REG_PPC_VSCR: |
1719 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1720 | r = -ENXIO; |
1721 | break; |
1722 | } |
1723 | val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); |
1724 | break; |
1725 | case KVM_REG_PPC_VRSAVE: |
1726 | val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); |
1727 | break; |
1728 | #endif /* CONFIG_ALTIVEC */ |
1729 | default: |
1730 | r = -EINVAL; |
1731 | break; |
1732 | } |
1733 | } |
1734 | |
1735 | if (r) |
1736 | return r; |
1737 | |
1738 | if (copy_to_user(to: (char __user *)(unsigned long)reg->addr, from: &val, n: size)) |
1739 | r = -EFAULT; |
1740 | |
1741 | return r; |
1742 | } |
1743 | |
1744 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1745 | { |
1746 | int r; |
1747 | union kvmppc_one_reg val; |
1748 | int size; |
1749 | |
1750 | size = one_reg_size(reg->id); |
1751 | if (size > sizeof(val)) |
1752 | return -EINVAL; |
1753 | |
1754 | if (copy_from_user(to: &val, from: (char __user *)(unsigned long)reg->addr, n: size)) |
1755 | return -EFAULT; |
1756 | |
1757 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); |
1758 | if (r == -EINVAL) { |
1759 | r = 0; |
1760 | switch (reg->id) { |
1761 | #ifdef CONFIG_ALTIVEC |
1762 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
1763 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1764 | r = -ENXIO; |
1765 | break; |
1766 | } |
1767 | kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); |
1768 | break; |
1769 | case KVM_REG_PPC_VSCR: |
1770 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1771 | r = -ENXIO; |
1772 | break; |
1773 | } |
1774 | kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); |
1775 | break; |
1776 | case KVM_REG_PPC_VRSAVE: |
1777 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1778 | r = -ENXIO; |
1779 | break; |
1780 | } |
1781 | kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); |
1782 | break; |
1783 | #endif /* CONFIG_ALTIVEC */ |
1784 | default: |
1785 | r = -EINVAL; |
1786 | break; |
1787 | } |
1788 | } |
1789 | |
1790 | return r; |
1791 | } |
1792 | |
1793 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
1794 | { |
1795 | struct kvm_run *run = vcpu->run; |
1796 | int r; |
1797 | |
1798 | vcpu_load(vcpu); |
1799 | |
1800 | if (vcpu->mmio_needed) { |
1801 | vcpu->mmio_needed = 0; |
1802 | if (!vcpu->mmio_is_write) |
1803 | kvmppc_complete_mmio_load(vcpu); |
1804 | #ifdef CONFIG_VSX |
1805 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
1806 | vcpu->arch.mmio_vsx_copy_nums--; |
1807 | vcpu->arch.mmio_vsx_offset++; |
1808 | } |
1809 | |
1810 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
1811 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); |
1812 | if (r == RESUME_HOST) { |
1813 | vcpu->mmio_needed = 1; |
1814 | goto out; |
1815 | } |
1816 | } |
1817 | #endif |
1818 | #ifdef CONFIG_ALTIVEC |
1819 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
1820 | vcpu->arch.mmio_vmx_copy_nums--; |
1821 | vcpu->arch.mmio_vmx_offset++; |
1822 | } |
1823 | |
1824 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
1825 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); |
1826 | if (r == RESUME_HOST) { |
1827 | vcpu->mmio_needed = 1; |
1828 | goto out; |
1829 | } |
1830 | } |
1831 | #endif |
1832 | } else if (vcpu->arch.osi_needed) { |
1833 | u64 *gprs = run->osi.gprs; |
1834 | int i; |
1835 | |
1836 | for (i = 0; i < 32; i++) |
1837 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
1838 | vcpu->arch.osi_needed = 0; |
1839 | } else if (vcpu->arch.hcall_needed) { |
1840 | int i; |
1841 | |
1842 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); |
1843 | for (i = 0; i < 9; ++i) |
1844 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); |
1845 | vcpu->arch.hcall_needed = 0; |
1846 | #ifdef CONFIG_BOOKE |
1847 | } else if (vcpu->arch.epr_needed) { |
1848 | kvmppc_set_epr(vcpu, run->epr.epr); |
1849 | vcpu->arch.epr_needed = 0; |
1850 | #endif |
1851 | } |
1852 | |
1853 | kvm_sigset_activate(vcpu); |
1854 | |
1855 | if (run->immediate_exit) |
1856 | r = -EINTR; |
1857 | else |
1858 | r = kvmppc_vcpu_run(vcpu); |
1859 | |
1860 | kvm_sigset_deactivate(vcpu); |
1861 | |
1862 | #ifdef CONFIG_ALTIVEC |
1863 | out: |
1864 | #endif |
1865 | |
1866 | /* |
1867 | * We're already returning to userspace, don't pass the |
1868 | * RESUME_HOST flags along. |
1869 | */ |
1870 | if (r > 0) |
1871 | r = 0; |
1872 | |
1873 | vcpu_put(vcpu); |
1874 | return r; |
1875 | } |
1876 | |
1877 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
1878 | { |
1879 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
1880 | kvmppc_core_dequeue_external(vcpu); |
1881 | return 0; |
1882 | } |
1883 | |
1884 | kvmppc_core_queue_external(vcpu, irq); |
1885 | |
1886 | kvm_vcpu_kick(vcpu); |
1887 | |
1888 | return 0; |
1889 | } |
1890 | |
1891 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1892 | struct kvm_enable_cap *cap) |
1893 | { |
1894 | int r; |
1895 | |
1896 | if (cap->flags) |
1897 | return -EINVAL; |
1898 | |
1899 | switch (cap->cap) { |
1900 | case KVM_CAP_PPC_OSI: |
1901 | r = 0; |
1902 | vcpu->arch.osi_enabled = true; |
1903 | break; |
1904 | case KVM_CAP_PPC_PAPR: |
1905 | r = 0; |
1906 | vcpu->arch.papr_enabled = true; |
1907 | break; |
1908 | case KVM_CAP_PPC_EPR: |
1909 | r = 0; |
1910 | if (cap->args[0]) |
1911 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; |
1912 | else |
1913 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; |
1914 | break; |
1915 | #ifdef CONFIG_BOOKE |
1916 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
1917 | r = 0; |
1918 | vcpu->arch.watchdog_enabled = true; |
1919 | break; |
1920 | #endif |
1921 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
1922 | case KVM_CAP_SW_TLB: { |
1923 | struct kvm_config_tlb cfg; |
1924 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
1925 | |
1926 | r = -EFAULT; |
1927 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) |
1928 | break; |
1929 | |
1930 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); |
1931 | break; |
1932 | } |
1933 | #endif |
1934 | #ifdef CONFIG_KVM_MPIC |
1935 | case KVM_CAP_IRQ_MPIC: { |
1936 | struct fd f; |
1937 | struct kvm_device *dev; |
1938 | |
1939 | r = -EBADF; |
1940 | f = fdget(cap->args[0]); |
1941 | if (!f.file) |
1942 | break; |
1943 | |
1944 | r = -EPERM; |
1945 | dev = kvm_device_from_filp(f.file); |
1946 | if (dev) |
1947 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); |
1948 | |
1949 | fdput(f); |
1950 | break; |
1951 | } |
1952 | #endif |
1953 | #ifdef CONFIG_KVM_XICS |
1954 | case KVM_CAP_IRQ_XICS: { |
1955 | struct fd f; |
1956 | struct kvm_device *dev; |
1957 | |
1958 | r = -EBADF; |
1959 | f = fdget(cap->args[0]); |
1960 | if (!f.file) |
1961 | break; |
1962 | |
1963 | r = -EPERM; |
1964 | dev = kvm_device_from_filp(f.file); |
1965 | if (dev) { |
1966 | if (xics_on_xive()) |
1967 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
1968 | else |
1969 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); |
1970 | } |
1971 | |
1972 | fdput(f); |
1973 | break; |
1974 | } |
1975 | #endif /* CONFIG_KVM_XICS */ |
1976 | #ifdef CONFIG_KVM_XIVE |
1977 | case KVM_CAP_PPC_IRQ_XIVE: { |
1978 | struct fd f; |
1979 | struct kvm_device *dev; |
1980 | |
1981 | r = -EBADF; |
1982 | f = fdget(cap->args[0]); |
1983 | if (!f.file) |
1984 | break; |
1985 | |
1986 | r = -ENXIO; |
1987 | if (!xive_enabled()) |
1988 | break; |
1989 | |
1990 | r = -EPERM; |
1991 | dev = kvm_device_from_filp(f.file); |
1992 | if (dev) |
1993 | r = kvmppc_xive_native_connect_vcpu(dev, vcpu, |
1994 | cap->args[1]); |
1995 | |
1996 | fdput(f); |
1997 | break; |
1998 | } |
1999 | #endif /* CONFIG_KVM_XIVE */ |
2000 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
2001 | case KVM_CAP_PPC_FWNMI: |
2002 | r = -EINVAL; |
2003 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
2004 | break; |
2005 | r = 0; |
2006 | vcpu->kvm->arch.fwnmi_enabled = true; |
2007 | break; |
2008 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
2009 | default: |
2010 | r = -EINVAL; |
2011 | break; |
2012 | } |
2013 | |
2014 | if (!r) |
2015 | r = kvmppc_sanity_check(vcpu); |
2016 | |
2017 | return r; |
2018 | } |
2019 | |
2020 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
2021 | { |
2022 | #ifdef CONFIG_KVM_MPIC |
2023 | if (kvm->arch.mpic) |
2024 | return true; |
2025 | #endif |
2026 | #ifdef CONFIG_KVM_XICS |
2027 | if (kvm->arch.xics || kvm->arch.xive) |
2028 | return true; |
2029 | #endif |
2030 | return false; |
2031 | } |
2032 | |
2033 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
2034 | struct kvm_mp_state *mp_state) |
2035 | { |
2036 | return -EINVAL; |
2037 | } |
2038 | |
2039 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
2040 | struct kvm_mp_state *mp_state) |
2041 | { |
2042 | return -EINVAL; |
2043 | } |
2044 | |
2045 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
2046 | unsigned int ioctl, unsigned long arg) |
2047 | { |
2048 | struct kvm_vcpu *vcpu = filp->private_data; |
2049 | void __user *argp = (void __user *)arg; |
2050 | |
2051 | if (ioctl == KVM_INTERRUPT) { |
2052 | struct kvm_interrupt irq; |
2053 | if (copy_from_user(to: &irq, from: argp, n: sizeof(irq))) |
2054 | return -EFAULT; |
2055 | return kvm_vcpu_ioctl_interrupt(vcpu, irq: &irq); |
2056 | } |
2057 | return -ENOIOCTLCMD; |
2058 | } |
2059 | |
2060 | long kvm_arch_vcpu_ioctl(struct file *filp, |
2061 | unsigned int ioctl, unsigned long arg) |
2062 | { |
2063 | struct kvm_vcpu *vcpu = filp->private_data; |
2064 | void __user *argp = (void __user *)arg; |
2065 | long r; |
2066 | |
2067 | switch (ioctl) { |
2068 | case KVM_ENABLE_CAP: |
2069 | { |
2070 | struct kvm_enable_cap cap; |
2071 | r = -EFAULT; |
2072 | if (copy_from_user(to: &cap, from: argp, n: sizeof(cap))) |
2073 | goto out; |
2074 | vcpu_load(vcpu); |
2075 | r = kvm_vcpu_ioctl_enable_cap(vcpu, cap: &cap); |
2076 | vcpu_put(vcpu); |
2077 | break; |
2078 | } |
2079 | |
2080 | case KVM_SET_ONE_REG: |
2081 | case KVM_GET_ONE_REG: |
2082 | { |
2083 | struct kvm_one_reg reg; |
2084 | r = -EFAULT; |
2085 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
2086 | goto out; |
2087 | if (ioctl == KVM_SET_ONE_REG) |
2088 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, reg: ®); |
2089 | else |
2090 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, reg: ®); |
2091 | break; |
2092 | } |
2093 | |
2094 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
2095 | case KVM_DIRTY_TLB: { |
2096 | struct kvm_dirty_tlb dirty; |
2097 | r = -EFAULT; |
2098 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
2099 | goto out; |
2100 | vcpu_load(vcpu); |
2101 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
2102 | vcpu_put(vcpu); |
2103 | break; |
2104 | } |
2105 | #endif |
2106 | default: |
2107 | r = -EINVAL; |
2108 | } |
2109 | |
2110 | out: |
2111 | return r; |
2112 | } |
2113 | |
2114 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
2115 | { |
2116 | return VM_FAULT_SIGBUS; |
2117 | } |
2118 | |
2119 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
2120 | { |
2121 | u32 inst_nop = 0x60000000; |
2122 | #ifdef CONFIG_KVM_BOOKE_HV |
2123 | u32 inst_sc1 = 0x44000022; |
2124 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
2125 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); |
2126 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); |
2127 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
2128 | #else |
2129 | u32 inst_lis = 0x3c000000; |
2130 | u32 inst_ori = 0x60000000; |
2131 | u32 inst_sc = 0x44000002; |
2132 | u32 inst_imm_mask = 0xffff; |
2133 | |
2134 | /* |
2135 | * The hypercall to get into KVM from within guest context is as |
2136 | * follows: |
2137 | * |
2138 | * lis r0, r0, KVM_SC_MAGIC_R0@h |
2139 | * ori r0, KVM_SC_MAGIC_R0@l |
2140 | * sc |
2141 | * nop |
2142 | */ |
2143 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
2144 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); |
2145 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); |
2146 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
2147 | #endif |
2148 | |
2149 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
2150 | |
2151 | return 0; |
2152 | } |
2153 | |
2154 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) |
2155 | { |
2156 | int ret = 0; |
2157 | |
2158 | #ifdef CONFIG_KVM_MPIC |
2159 | ret = ret || (kvm->arch.mpic != NULL); |
2160 | #endif |
2161 | #ifdef CONFIG_KVM_XICS |
2162 | ret = ret || (kvm->arch.xics != NULL); |
2163 | ret = ret || (kvm->arch.xive != NULL); |
2164 | #endif |
2165 | smp_rmb(); |
2166 | return ret; |
2167 | } |
2168 | |
2169 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
2170 | bool line_status) |
2171 | { |
2172 | if (!kvm_arch_irqchip_in_kernel(kvm)) |
2173 | return -ENXIO; |
2174 | |
2175 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
2176 | irq: irq_event->irq, level: irq_event->level, |
2177 | line_status); |
2178 | return 0; |
2179 | } |
2180 | |
2181 | |
2182 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
2183 | struct kvm_enable_cap *cap) |
2184 | { |
2185 | int r; |
2186 | |
2187 | if (cap->flags) |
2188 | return -EINVAL; |
2189 | |
2190 | switch (cap->cap) { |
2191 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
2192 | case KVM_CAP_PPC_ENABLE_HCALL: { |
2193 | unsigned long hcall = cap->args[0]; |
2194 | |
2195 | r = -EINVAL; |
2196 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || |
2197 | cap->args[1] > 1) |
2198 | break; |
2199 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
2200 | break; |
2201 | if (cap->args[1]) |
2202 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); |
2203 | else |
2204 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); |
2205 | r = 0; |
2206 | break; |
2207 | } |
2208 | case KVM_CAP_PPC_SMT: { |
2209 | unsigned long mode = cap->args[0]; |
2210 | unsigned long flags = cap->args[1]; |
2211 | |
2212 | r = -EINVAL; |
2213 | if (kvm->arch.kvm_ops->set_smt_mode) |
2214 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); |
2215 | break; |
2216 | } |
2217 | |
2218 | case KVM_CAP_PPC_NESTED_HV: |
2219 | r = -EINVAL; |
2220 | if (!is_kvmppc_hv_enabled(kvm) || |
2221 | !kvm->arch.kvm_ops->enable_nested) |
2222 | break; |
2223 | r = kvm->arch.kvm_ops->enable_nested(kvm); |
2224 | break; |
2225 | #endif |
2226 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
2227 | case KVM_CAP_PPC_SECURE_GUEST: |
2228 | r = -EINVAL; |
2229 | if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) |
2230 | break; |
2231 | r = kvm->arch.kvm_ops->enable_svm(kvm); |
2232 | break; |
2233 | case KVM_CAP_PPC_DAWR1: |
2234 | r = -EINVAL; |
2235 | if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) |
2236 | break; |
2237 | r = kvm->arch.kvm_ops->enable_dawr1(kvm); |
2238 | break; |
2239 | #endif |
2240 | default: |
2241 | r = -EINVAL; |
2242 | break; |
2243 | } |
2244 | |
2245 | return r; |
2246 | } |
2247 | |
2248 | #ifdef CONFIG_PPC_BOOK3S_64 |
2249 | /* |
2250 | * These functions check whether the underlying hardware is safe |
2251 | * against attacks based on observing the effects of speculatively |
2252 | * executed instructions, and whether it supplies instructions for |
2253 | * use in workarounds. The information comes from firmware, either |
2254 | * via the device tree on powernv platforms or from an hcall on |
2255 | * pseries platforms. |
2256 | */ |
2257 | #ifdef CONFIG_PPC_PSERIES |
2258 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
2259 | { |
2260 | struct h_cpu_char_result c; |
2261 | unsigned long rc; |
2262 | |
2263 | if (!machine_is(pseries)) |
2264 | return -ENOTTY; |
2265 | |
2266 | rc = plpar_get_cpu_characteristics(&c); |
2267 | if (rc == H_SUCCESS) { |
2268 | cp->character = c.character; |
2269 | cp->behaviour = c.behaviour; |
2270 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
2271 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
2272 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
2273 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
2274 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
2275 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | |
2276 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | |
2277 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
2278 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
2279 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
2280 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
2281 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
2282 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
2283 | } |
2284 | return 0; |
2285 | } |
2286 | #else |
2287 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
2288 | { |
2289 | return -ENOTTY; |
2290 | } |
2291 | #endif |
2292 | |
2293 | static inline bool have_fw_feat(struct device_node *fw_features, |
2294 | const char *state, const char *name) |
2295 | { |
2296 | struct device_node *np; |
2297 | bool r = false; |
2298 | |
2299 | np = of_get_child_by_name(fw_features, name); |
2300 | if (np) { |
2301 | r = of_property_read_bool(np, state); |
2302 | of_node_put(np); |
2303 | } |
2304 | return r; |
2305 | } |
2306 | |
2307 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
2308 | { |
2309 | struct device_node *np, *fw_features; |
2310 | int r; |
2311 | |
2312 | memset(cp, 0, sizeof(*cp)); |
2313 | r = pseries_get_cpu_char(cp); |
2314 | if (r != -ENOTTY) |
2315 | return r; |
2316 | |
2317 | np = of_find_node_by_name(NULL, "ibm,opal" ); |
2318 | if (np) { |
2319 | fw_features = of_get_child_by_name(np, "fw-features" ); |
2320 | of_node_put(np); |
2321 | if (!fw_features) |
2322 | return 0; |
2323 | if (have_fw_feat(fw_features, "enabled" , |
2324 | "inst-spec-barrier-ori31,31,0" )) |
2325 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; |
2326 | if (have_fw_feat(fw_features, "enabled" , |
2327 | "fw-bcctrl-serialized" )) |
2328 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; |
2329 | if (have_fw_feat(fw_features, "enabled" , |
2330 | "inst-l1d-flush-ori30,30,0" )) |
2331 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; |
2332 | if (have_fw_feat(fw_features, "enabled" , |
2333 | "inst-l1d-flush-trig2" )) |
2334 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; |
2335 | if (have_fw_feat(fw_features, "enabled" , |
2336 | "fw-l1d-thread-split" )) |
2337 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; |
2338 | if (have_fw_feat(fw_features, "enabled" , |
2339 | "fw-count-cache-disabled" )) |
2340 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
2341 | if (have_fw_feat(fw_features, "enabled" , |
2342 | "fw-count-cache-flush-bcctr2,0,0" )) |
2343 | cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
2344 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
2345 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
2346 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
2347 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
2348 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
2349 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
2350 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
2351 | |
2352 | if (have_fw_feat(fw_features, "enabled" , |
2353 | "speculation-policy-favor-security" )) |
2354 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; |
2355 | if (!have_fw_feat(fw_features, "disabled" , |
2356 | "needs-l1d-flush-msr-pr-0-to-1" )) |
2357 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; |
2358 | if (!have_fw_feat(fw_features, "disabled" , |
2359 | "needs-spec-barrier-for-bound-checks" )) |
2360 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
2361 | if (have_fw_feat(fw_features, "enabled" , |
2362 | "needs-count-cache-flush-on-context-switch" )) |
2363 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
2364 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
2365 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
2366 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
2367 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
2368 | |
2369 | of_node_put(fw_features); |
2370 | } |
2371 | |
2372 | return 0; |
2373 | } |
2374 | #endif |
2375 | |
2376 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
2377 | { |
2378 | struct kvm *kvm __maybe_unused = filp->private_data; |
2379 | void __user *argp = (void __user *)arg; |
2380 | int r; |
2381 | |
2382 | switch (ioctl) { |
2383 | case KVM_PPC_GET_PVINFO: { |
2384 | struct kvm_ppc_pvinfo pvinfo; |
2385 | memset(&pvinfo, 0, sizeof(pvinfo)); |
2386 | r = kvm_vm_ioctl_get_pvinfo(pvinfo: &pvinfo); |
2387 | if (copy_to_user(to: argp, from: &pvinfo, n: sizeof(pvinfo))) { |
2388 | r = -EFAULT; |
2389 | goto out; |
2390 | } |
2391 | |
2392 | break; |
2393 | } |
2394 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
2395 | case KVM_CREATE_SPAPR_TCE_64: { |
2396 | struct kvm_create_spapr_tce_64 create_tce_64; |
2397 | |
2398 | r = -EFAULT; |
2399 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) |
2400 | goto out; |
2401 | if (create_tce_64.flags) { |
2402 | r = -EINVAL; |
2403 | goto out; |
2404 | } |
2405 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
2406 | goto out; |
2407 | } |
2408 | case KVM_CREATE_SPAPR_TCE: { |
2409 | struct kvm_create_spapr_tce create_tce; |
2410 | struct kvm_create_spapr_tce_64 create_tce_64; |
2411 | |
2412 | r = -EFAULT; |
2413 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) |
2414 | goto out; |
2415 | |
2416 | create_tce_64.liobn = create_tce.liobn; |
2417 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; |
2418 | create_tce_64.offset = 0; |
2419 | create_tce_64.size = create_tce.window_size >> |
2420 | IOMMU_PAGE_SHIFT_4K; |
2421 | create_tce_64.flags = 0; |
2422 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
2423 | goto out; |
2424 | } |
2425 | #endif |
2426 | #ifdef CONFIG_PPC_BOOK3S_64 |
2427 | case KVM_PPC_GET_SMMU_INFO: { |
2428 | struct kvm_ppc_smmu_info info; |
2429 | struct kvm *kvm = filp->private_data; |
2430 | |
2431 | memset(&info, 0, sizeof(info)); |
2432 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
2433 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
2434 | r = -EFAULT; |
2435 | break; |
2436 | } |
2437 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
2438 | struct kvm *kvm = filp->private_data; |
2439 | |
2440 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
2441 | break; |
2442 | } |
2443 | case KVM_PPC_CONFIGURE_V3_MMU: { |
2444 | struct kvm *kvm = filp->private_data; |
2445 | struct kvm_ppc_mmuv3_cfg cfg; |
2446 | |
2447 | r = -EINVAL; |
2448 | if (!kvm->arch.kvm_ops->configure_mmu) |
2449 | goto out; |
2450 | r = -EFAULT; |
2451 | if (copy_from_user(&cfg, argp, sizeof(cfg))) |
2452 | goto out; |
2453 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); |
2454 | break; |
2455 | } |
2456 | case KVM_PPC_GET_RMMU_INFO: { |
2457 | struct kvm *kvm = filp->private_data; |
2458 | struct kvm_ppc_rmmu_info info; |
2459 | |
2460 | r = -EINVAL; |
2461 | if (!kvm->arch.kvm_ops->get_rmmu_info) |
2462 | goto out; |
2463 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); |
2464 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
2465 | r = -EFAULT; |
2466 | break; |
2467 | } |
2468 | case KVM_PPC_GET_CPU_CHAR: { |
2469 | struct kvm_ppc_cpu_char cpuchar; |
2470 | |
2471 | r = kvmppc_get_cpu_char(&cpuchar); |
2472 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) |
2473 | r = -EFAULT; |
2474 | break; |
2475 | } |
2476 | case KVM_PPC_SVM_OFF: { |
2477 | struct kvm *kvm = filp->private_data; |
2478 | |
2479 | r = 0; |
2480 | if (!kvm->arch.kvm_ops->svm_off) |
2481 | goto out; |
2482 | |
2483 | r = kvm->arch.kvm_ops->svm_off(kvm); |
2484 | break; |
2485 | } |
2486 | default: { |
2487 | struct kvm *kvm = filp->private_data; |
2488 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); |
2489 | } |
2490 | #else /* CONFIG_PPC_BOOK3S_64 */ |
2491 | default: |
2492 | r = -ENOTTY; |
2493 | #endif |
2494 | } |
2495 | out: |
2496 | return r; |
2497 | } |
2498 | |
2499 | static DEFINE_IDA(lpid_inuse); |
2500 | static unsigned long nr_lpids; |
2501 | |
2502 | long kvmppc_alloc_lpid(void) |
2503 | { |
2504 | int lpid; |
2505 | |
2506 | /* The host LPID must always be 0 (allocation starts at 1) */ |
2507 | lpid = ida_alloc_range(&lpid_inuse, min: 1, max: nr_lpids - 1, GFP_KERNEL); |
2508 | if (lpid < 0) { |
2509 | if (lpid == -ENOMEM) |
2510 | pr_err("%s: Out of memory\n" , __func__); |
2511 | else |
2512 | pr_err("%s: No LPIDs free\n" , __func__); |
2513 | return -ENOMEM; |
2514 | } |
2515 | |
2516 | return lpid; |
2517 | } |
2518 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
2519 | |
2520 | void kvmppc_free_lpid(long lpid) |
2521 | { |
2522 | ida_free(&lpid_inuse, id: lpid); |
2523 | } |
2524 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
2525 | |
2526 | /* nr_lpids_param includes the host LPID */ |
2527 | void kvmppc_init_lpid(unsigned long nr_lpids_param) |
2528 | { |
2529 | nr_lpids = nr_lpids_param; |
2530 | } |
2531 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
2532 | |
2533 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |
2534 | |
2535 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) |
2536 | { |
2537 | if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) |
2538 | vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); |
2539 | } |
2540 | |
2541 | void kvm_arch_create_vm_debugfs(struct kvm *kvm) |
2542 | { |
2543 | if (kvm->arch.kvm_ops->create_vm_debugfs) |
2544 | kvm->arch.kvm_ops->create_vm_debugfs(kvm); |
2545 | } |
2546 | |