1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * KVM/MIPS: MIPS specific KVM APIs |
7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ |
11 | |
12 | #include <linux/bitops.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/err.h> |
15 | #include <linux/kdebug.h> |
16 | #include <linux/module.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <linux/sched/signal.h> |
20 | #include <linux/fs.h> |
21 | #include <linux/memblock.h> |
22 | #include <linux/pgtable.h> |
23 | |
24 | #include <asm/fpu.h> |
25 | #include <asm/page.h> |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/mmu_context.h> |
28 | #include <asm/pgalloc.h> |
29 | |
30 | #include <linux/kvm_host.h> |
31 | |
32 | #include "interrupt.h" |
33 | |
34 | #define CREATE_TRACE_POINTS |
35 | #include "trace.h" |
36 | |
37 | #ifndef VECTORSPACING |
38 | #define VECTORSPACING 0x100 /* for EI/VI mode */ |
39 | #endif |
40 | |
41 | const struct _kvm_stats_desc kvm_vm_stats_desc[] = { |
42 | KVM_GENERIC_VM_STATS() |
43 | }; |
44 | |
45 | const struct kvm_stats_header = { |
46 | .name_size = KVM_STATS_NAME_SIZE, |
47 | .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), |
48 | .id_offset = sizeof(struct kvm_stats_header), |
49 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
50 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
51 | sizeof(kvm_vm_stats_desc), |
52 | }; |
53 | |
54 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
55 | KVM_GENERIC_VCPU_STATS(), |
56 | STATS_DESC_COUNTER(VCPU, wait_exits), |
57 | STATS_DESC_COUNTER(VCPU, cache_exits), |
58 | STATS_DESC_COUNTER(VCPU, signal_exits), |
59 | STATS_DESC_COUNTER(VCPU, int_exits), |
60 | STATS_DESC_COUNTER(VCPU, cop_unusable_exits), |
61 | STATS_DESC_COUNTER(VCPU, tlbmod_exits), |
62 | STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits), |
63 | STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits), |
64 | STATS_DESC_COUNTER(VCPU, addrerr_st_exits), |
65 | STATS_DESC_COUNTER(VCPU, addrerr_ld_exits), |
66 | STATS_DESC_COUNTER(VCPU, syscall_exits), |
67 | STATS_DESC_COUNTER(VCPU, resvd_inst_exits), |
68 | STATS_DESC_COUNTER(VCPU, break_inst_exits), |
69 | STATS_DESC_COUNTER(VCPU, trap_inst_exits), |
70 | STATS_DESC_COUNTER(VCPU, msa_fpe_exits), |
71 | STATS_DESC_COUNTER(VCPU, fpe_exits), |
72 | STATS_DESC_COUNTER(VCPU, msa_disabled_exits), |
73 | STATS_DESC_COUNTER(VCPU, flush_dcache_exits), |
74 | STATS_DESC_COUNTER(VCPU, vz_gpsi_exits), |
75 | STATS_DESC_COUNTER(VCPU, vz_gsfc_exits), |
76 | STATS_DESC_COUNTER(VCPU, vz_hc_exits), |
77 | STATS_DESC_COUNTER(VCPU, vz_grr_exits), |
78 | STATS_DESC_COUNTER(VCPU, vz_gva_exits), |
79 | STATS_DESC_COUNTER(VCPU, vz_ghfc_exits), |
80 | STATS_DESC_COUNTER(VCPU, vz_gpa_exits), |
81 | STATS_DESC_COUNTER(VCPU, vz_resvd_exits), |
82 | #ifdef CONFIG_CPU_LOONGSON64 |
83 | STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits), |
84 | #endif |
85 | }; |
86 | |
87 | const struct kvm_stats_header = { |
88 | .name_size = KVM_STATS_NAME_SIZE, |
89 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
90 | .id_offset = sizeof(struct kvm_stats_header), |
91 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
92 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
93 | sizeof(kvm_vcpu_stats_desc), |
94 | }; |
95 | |
96 | bool kvm_trace_guest_mode_change; |
97 | |
98 | int kvm_guest_mode_change_trace_reg(void) |
99 | { |
100 | kvm_trace_guest_mode_change = true; |
101 | return 0; |
102 | } |
103 | |
104 | void kvm_guest_mode_change_trace_unreg(void) |
105 | { |
106 | kvm_trace_guest_mode_change = false; |
107 | } |
108 | |
109 | /* |
110 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in |
111 | * Config7, so we are "runnable" if interrupts are pending |
112 | */ |
113 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
114 | { |
115 | return !!(vcpu->arch.pending_exceptions); |
116 | } |
117 | |
118 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
119 | { |
120 | return false; |
121 | } |
122 | |
123 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
124 | { |
125 | return 1; |
126 | } |
127 | |
128 | int kvm_arch_hardware_enable(void) |
129 | { |
130 | return kvm_mips_callbacks->hardware_enable(); |
131 | } |
132 | |
133 | void kvm_arch_hardware_disable(void) |
134 | { |
135 | kvm_mips_callbacks->hardware_disable(); |
136 | } |
137 | |
138 | extern void kvm_init_loongson_ipi(struct kvm *kvm); |
139 | |
140 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
141 | { |
142 | switch (type) { |
143 | case KVM_VM_MIPS_AUTO: |
144 | break; |
145 | case KVM_VM_MIPS_VZ: |
146 | break; |
147 | default: |
148 | /* Unsupported KVM type */ |
149 | return -EINVAL; |
150 | } |
151 | |
152 | /* Allocate page table to map GPA -> RPA */ |
153 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); |
154 | if (!kvm->arch.gpa_mm.pgd) |
155 | return -ENOMEM; |
156 | |
157 | #ifdef CONFIG_CPU_LOONGSON64 |
158 | kvm_init_loongson_ipi(kvm); |
159 | #endif |
160 | |
161 | return 0; |
162 | } |
163 | |
164 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) |
165 | { |
166 | /* It should always be safe to remove after flushing the whole range */ |
167 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); |
168 | pgd_free(NULL, pgd: kvm->arch.gpa_mm.pgd); |
169 | } |
170 | |
171 | void kvm_arch_destroy_vm(struct kvm *kvm) |
172 | { |
173 | kvm_destroy_vcpus(kvm); |
174 | kvm_mips_free_gpa_pt(kvm); |
175 | } |
176 | |
177 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
178 | unsigned long arg) |
179 | { |
180 | return -ENOIOCTLCMD; |
181 | } |
182 | |
183 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
184 | { |
185 | /* Flush whole GPA */ |
186 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); |
187 | kvm_flush_remote_tlbs(kvm); |
188 | } |
189 | |
190 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
191 | struct kvm_memory_slot *slot) |
192 | { |
193 | /* |
194 | * The slot has been made invalid (ready for moving or deletion), so we |
195 | * need to ensure that it can no longer be accessed by any guest VCPUs. |
196 | */ |
197 | |
198 | spin_lock(lock: &kvm->mmu_lock); |
199 | /* Flush slot from GPA */ |
200 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, |
201 | slot->base_gfn + slot->npages - 1); |
202 | kvm_flush_remote_tlbs_memslot(kvm, memslot: slot); |
203 | spin_unlock(lock: &kvm->mmu_lock); |
204 | } |
205 | |
206 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
207 | const struct kvm_memory_slot *old, |
208 | struct kvm_memory_slot *new, |
209 | enum kvm_mr_change change) |
210 | { |
211 | return 0; |
212 | } |
213 | |
214 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
215 | struct kvm_memory_slot *old, |
216 | const struct kvm_memory_slot *new, |
217 | enum kvm_mr_change change) |
218 | { |
219 | int needs_flush; |
220 | |
221 | /* |
222 | * If dirty page logging is enabled, write protect all pages in the slot |
223 | * ready for dirty logging. |
224 | * |
225 | * There is no need to do this in any of the following cases: |
226 | * CREATE: No dirty mappings will already exist. |
227 | * MOVE/DELETE: The old mappings will already have been cleaned up by |
228 | * kvm_arch_flush_shadow_memslot() |
229 | */ |
230 | if (change == KVM_MR_FLAGS_ONLY && |
231 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && |
232 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { |
233 | spin_lock(lock: &kvm->mmu_lock); |
234 | /* Write protect GPA page table entries */ |
235 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, |
236 | new->base_gfn + new->npages - 1); |
237 | if (needs_flush) |
238 | kvm_flush_remote_tlbs_memslot(kvm, memslot: new); |
239 | spin_unlock(lock: &kvm->mmu_lock); |
240 | } |
241 | } |
242 | |
243 | static inline void dump_handler(const char *symbol, void *start, void *end) |
244 | { |
245 | u32 *p; |
246 | |
247 | pr_debug("LEAF(%s)\n" , symbol); |
248 | |
249 | pr_debug("\t.set push\n" ); |
250 | pr_debug("\t.set noreorder\n" ); |
251 | |
252 | for (p = start; p < (u32 *)end; ++p) |
253 | pr_debug("\t.word\t0x%08x\t\t# %p\n" , *p, p); |
254 | |
255 | pr_debug("\t.set\tpop\n" ); |
256 | |
257 | pr_debug("\tEND(%s)\n" , symbol); |
258 | } |
259 | |
260 | /* low level hrtimer wake routine */ |
261 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) |
262 | { |
263 | struct kvm_vcpu *vcpu; |
264 | |
265 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); |
266 | |
267 | kvm_mips_callbacks->queue_timer_int(vcpu); |
268 | |
269 | vcpu->arch.wait = 0; |
270 | rcuwait_wake_up(w: &vcpu->wait); |
271 | |
272 | return kvm_mips_count_timeout(vcpu); |
273 | } |
274 | |
275 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
276 | { |
277 | return 0; |
278 | } |
279 | |
280 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
281 | { |
282 | int err, size; |
283 | void *gebase, *p, *handler, *refill_start, *refill_end; |
284 | int i; |
285 | |
286 | kvm_debug("kvm @ %p: create cpu %d at %p\n" , |
287 | vcpu->kvm, vcpu->vcpu_id, vcpu); |
288 | |
289 | err = kvm_mips_callbacks->vcpu_init(vcpu); |
290 | if (err) |
291 | return err; |
292 | |
293 | hrtimer_init(timer: &vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, |
294 | mode: HRTIMER_MODE_REL); |
295 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; |
296 | |
297 | /* |
298 | * Allocate space for host mode exception handlers that handle |
299 | * guest mode exits |
300 | */ |
301 | if (cpu_has_veic || cpu_has_vint) |
302 | size = 0x200 + VECTORSPACING * 64; |
303 | else |
304 | size = 0x4000; |
305 | |
306 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); |
307 | |
308 | if (!gebase) { |
309 | err = -ENOMEM; |
310 | goto out_uninit_vcpu; |
311 | } |
312 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n" , |
313 | ALIGN(size, PAGE_SIZE), gebase); |
314 | |
315 | /* |
316 | * Check new ebase actually fits in CP0_EBase. The lack of a write gate |
317 | * limits us to the low 512MB of physical address space. If the memory |
318 | * we allocate is out of range, just give up now. |
319 | */ |
320 | if (!cpu_has_ebase_wg && virt_to_phys(address: gebase) >= 0x20000000) { |
321 | kvm_err("CP0_EBase.WG required for guest exception base %pK\n" , |
322 | gebase); |
323 | err = -ENOMEM; |
324 | goto out_free_gebase; |
325 | } |
326 | |
327 | /* Save new ebase */ |
328 | vcpu->arch.guest_ebase = gebase; |
329 | |
330 | /* Build guest exception vectors dynamically in unmapped memory */ |
331 | handler = gebase + 0x2000; |
332 | |
333 | /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ |
334 | refill_start = gebase; |
335 | if (IS_ENABLED(CONFIG_64BIT)) |
336 | refill_start += 0x080; |
337 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); |
338 | |
339 | /* General Exception Entry point */ |
340 | kvm_mips_build_exception(gebase + 0x180, handler); |
341 | |
342 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ |
343 | for (i = 0; i < 8; i++) { |
344 | kvm_debug("L1 Vectored handler @ %p\n" , |
345 | gebase + 0x200 + (i * VECTORSPACING)); |
346 | kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, |
347 | handler); |
348 | } |
349 | |
350 | /* General exit handler */ |
351 | p = handler; |
352 | p = kvm_mips_build_exit(p); |
353 | |
354 | /* Guest entry routine */ |
355 | vcpu->arch.vcpu_run = p; |
356 | p = kvm_mips_build_vcpu_run(p); |
357 | |
358 | /* Dump the generated code */ |
359 | pr_debug("#include <asm/asm.h>\n" ); |
360 | pr_debug("#include <asm/regdef.h>\n" ); |
361 | pr_debug("\n" ); |
362 | dump_handler(symbol: "kvm_vcpu_run" , start: vcpu->arch.vcpu_run, end: p); |
363 | dump_handler(symbol: "kvm_tlb_refill" , start: refill_start, end: refill_end); |
364 | dump_handler(symbol: "kvm_gen_exc" , start: gebase + 0x180, end: gebase + 0x200); |
365 | dump_handler(symbol: "kvm_exit" , start: gebase + 0x2000, end: vcpu->arch.vcpu_run); |
366 | |
367 | /* Invalidate the icache for these ranges */ |
368 | flush_icache_range(start: (unsigned long)gebase, |
369 | end: (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
370 | |
371 | /* Init */ |
372 | vcpu->arch.last_sched_cpu = -1; |
373 | vcpu->arch.last_exec_cpu = -1; |
374 | |
375 | /* Initial guest state */ |
376 | err = kvm_mips_callbacks->vcpu_setup(vcpu); |
377 | if (err) |
378 | goto out_free_gebase; |
379 | |
380 | return 0; |
381 | |
382 | out_free_gebase: |
383 | kfree(objp: gebase); |
384 | out_uninit_vcpu: |
385 | kvm_mips_callbacks->vcpu_uninit(vcpu); |
386 | return err; |
387 | } |
388 | |
389 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
390 | { |
391 | hrtimer_cancel(timer: &vcpu->arch.comparecount_timer); |
392 | |
393 | kvm_mips_dump_stats(vcpu); |
394 | |
395 | kvm_mmu_free_memory_caches(vcpu); |
396 | kfree(objp: vcpu->arch.guest_ebase); |
397 | |
398 | kvm_mips_callbacks->vcpu_uninit(vcpu); |
399 | } |
400 | |
401 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
402 | struct kvm_guest_debug *dbg) |
403 | { |
404 | return -ENOIOCTLCMD; |
405 | } |
406 | |
407 | /* |
408 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while |
409 | * the vCPU is running. |
410 | * |
411 | * This must be noinstr as instrumentation may make use of RCU, and this is not |
412 | * safe during the EQS. |
413 | */ |
414 | static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
415 | { |
416 | int ret; |
417 | |
418 | guest_state_enter_irqoff(); |
419 | ret = kvm_mips_callbacks->vcpu_run(vcpu); |
420 | guest_state_exit_irqoff(); |
421 | |
422 | return ret; |
423 | } |
424 | |
425 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
426 | { |
427 | int r = -EINTR; |
428 | |
429 | vcpu_load(vcpu); |
430 | |
431 | kvm_sigset_activate(vcpu); |
432 | |
433 | if (vcpu->mmio_needed) { |
434 | if (!vcpu->mmio_is_write) |
435 | kvm_mips_complete_mmio_load(vcpu); |
436 | vcpu->mmio_needed = 0; |
437 | } |
438 | |
439 | if (vcpu->run->immediate_exit) |
440 | goto out; |
441 | |
442 | lose_fpu(1); |
443 | |
444 | local_irq_disable(); |
445 | guest_timing_enter_irqoff(); |
446 | trace_kvm_enter(vcpu); |
447 | |
448 | /* |
449 | * Make sure the read of VCPU requests in vcpu_run() callback is not |
450 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB |
451 | * flush request while the requester sees the VCPU as outside of guest |
452 | * mode and not needing an IPI. |
453 | */ |
454 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
455 | |
456 | r = kvm_mips_vcpu_enter_exit(vcpu); |
457 | |
458 | /* |
459 | * We must ensure that any pending interrupts are taken before |
460 | * we exit guest timing so that timer ticks are accounted as |
461 | * guest time. Transiently unmask interrupts so that any |
462 | * pending interrupts are taken. |
463 | * |
464 | * TODO: is there a barrier which ensures that pending interrupts are |
465 | * recognised? Currently this just hopes that the CPU takes any pending |
466 | * interrupts between the enable and disable. |
467 | */ |
468 | local_irq_enable(); |
469 | local_irq_disable(); |
470 | |
471 | trace_kvm_out(vcpu); |
472 | guest_timing_exit_irqoff(); |
473 | local_irq_enable(); |
474 | |
475 | out: |
476 | kvm_sigset_deactivate(vcpu); |
477 | |
478 | vcpu_put(vcpu); |
479 | return r; |
480 | } |
481 | |
482 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
483 | struct kvm_mips_interrupt *irq) |
484 | { |
485 | int intr = (int)irq->irq; |
486 | struct kvm_vcpu *dvcpu = NULL; |
487 | |
488 | if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] || |
489 | intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] || |
490 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || |
491 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) |
492 | kvm_debug("%s: CPU: %d, INTR: %d\n" , __func__, irq->cpu, |
493 | (int)intr); |
494 | |
495 | if (irq->cpu == -1) |
496 | dvcpu = vcpu; |
497 | else |
498 | dvcpu = kvm_get_vcpu(kvm: vcpu->kvm, i: irq->cpu); |
499 | |
500 | if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { |
501 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); |
502 | |
503 | } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { |
504 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); |
505 | } else { |
506 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n" , __func__, |
507 | irq->cpu, irq->irq); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | dvcpu->arch.wait = 0; |
512 | |
513 | rcuwait_wake_up(w: &dvcpu->wait); |
514 | |
515 | return 0; |
516 | } |
517 | |
518 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
519 | struct kvm_mp_state *mp_state) |
520 | { |
521 | return -ENOIOCTLCMD; |
522 | } |
523 | |
524 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
525 | struct kvm_mp_state *mp_state) |
526 | { |
527 | return -ENOIOCTLCMD; |
528 | } |
529 | |
530 | static u64 kvm_mips_get_one_regs[] = { |
531 | KVM_REG_MIPS_R0, |
532 | KVM_REG_MIPS_R1, |
533 | KVM_REG_MIPS_R2, |
534 | KVM_REG_MIPS_R3, |
535 | KVM_REG_MIPS_R4, |
536 | KVM_REG_MIPS_R5, |
537 | KVM_REG_MIPS_R6, |
538 | KVM_REG_MIPS_R7, |
539 | KVM_REG_MIPS_R8, |
540 | KVM_REG_MIPS_R9, |
541 | KVM_REG_MIPS_R10, |
542 | KVM_REG_MIPS_R11, |
543 | KVM_REG_MIPS_R12, |
544 | KVM_REG_MIPS_R13, |
545 | KVM_REG_MIPS_R14, |
546 | KVM_REG_MIPS_R15, |
547 | KVM_REG_MIPS_R16, |
548 | KVM_REG_MIPS_R17, |
549 | KVM_REG_MIPS_R18, |
550 | KVM_REG_MIPS_R19, |
551 | KVM_REG_MIPS_R20, |
552 | KVM_REG_MIPS_R21, |
553 | KVM_REG_MIPS_R22, |
554 | KVM_REG_MIPS_R23, |
555 | KVM_REG_MIPS_R24, |
556 | KVM_REG_MIPS_R25, |
557 | KVM_REG_MIPS_R26, |
558 | KVM_REG_MIPS_R27, |
559 | KVM_REG_MIPS_R28, |
560 | KVM_REG_MIPS_R29, |
561 | KVM_REG_MIPS_R30, |
562 | KVM_REG_MIPS_R31, |
563 | |
564 | #ifndef CONFIG_CPU_MIPSR6 |
565 | KVM_REG_MIPS_HI, |
566 | KVM_REG_MIPS_LO, |
567 | #endif |
568 | KVM_REG_MIPS_PC, |
569 | }; |
570 | |
571 | static u64 kvm_mips_get_one_regs_fpu[] = { |
572 | KVM_REG_MIPS_FCR_IR, |
573 | KVM_REG_MIPS_FCR_CSR, |
574 | }; |
575 | |
576 | static u64 kvm_mips_get_one_regs_msa[] = { |
577 | KVM_REG_MIPS_MSA_IR, |
578 | KVM_REG_MIPS_MSA_CSR, |
579 | }; |
580 | |
581 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) |
582 | { |
583 | unsigned long ret; |
584 | |
585 | ret = ARRAY_SIZE(kvm_mips_get_one_regs); |
586 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
587 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; |
588 | /* odd doubles */ |
589 | if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) |
590 | ret += 16; |
591 | } |
592 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) |
593 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; |
594 | ret += kvm_mips_callbacks->num_regs(vcpu); |
595 | |
596 | return ret; |
597 | } |
598 | |
599 | static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) |
600 | { |
601 | u64 index; |
602 | unsigned int i; |
603 | |
604 | if (copy_to_user(indices, kvm_mips_get_one_regs, |
605 | sizeof(kvm_mips_get_one_regs))) |
606 | return -EFAULT; |
607 | indices += ARRAY_SIZE(kvm_mips_get_one_regs); |
608 | |
609 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
610 | if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, |
611 | sizeof(kvm_mips_get_one_regs_fpu))) |
612 | return -EFAULT; |
613 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); |
614 | |
615 | for (i = 0; i < 32; ++i) { |
616 | index = KVM_REG_MIPS_FPR_32(i); |
617 | if (copy_to_user(to: indices, from: &index, n: sizeof(index))) |
618 | return -EFAULT; |
619 | ++indices; |
620 | |
621 | /* skip odd doubles if no F64 */ |
622 | if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) |
623 | continue; |
624 | |
625 | index = KVM_REG_MIPS_FPR_64(i); |
626 | if (copy_to_user(to: indices, from: &index, n: sizeof(index))) |
627 | return -EFAULT; |
628 | ++indices; |
629 | } |
630 | } |
631 | |
632 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { |
633 | if (copy_to_user(indices, kvm_mips_get_one_regs_msa, |
634 | sizeof(kvm_mips_get_one_regs_msa))) |
635 | return -EFAULT; |
636 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); |
637 | |
638 | for (i = 0; i < 32; ++i) { |
639 | index = KVM_REG_MIPS_VEC_128(i); |
640 | if (copy_to_user(to: indices, from: &index, n: sizeof(index))) |
641 | return -EFAULT; |
642 | ++indices; |
643 | } |
644 | } |
645 | |
646 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); |
647 | } |
648 | |
649 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, |
650 | const struct kvm_one_reg *reg) |
651 | { |
652 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
653 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
654 | int ret; |
655 | s64 v; |
656 | s64 vs[2]; |
657 | unsigned int idx; |
658 | |
659 | switch (reg->id) { |
660 | /* General purpose registers */ |
661 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: |
662 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; |
663 | break; |
664 | #ifndef CONFIG_CPU_MIPSR6 |
665 | case KVM_REG_MIPS_HI: |
666 | v = (long)vcpu->arch.hi; |
667 | break; |
668 | case KVM_REG_MIPS_LO: |
669 | v = (long)vcpu->arch.lo; |
670 | break; |
671 | #endif |
672 | case KVM_REG_MIPS_PC: |
673 | v = (long)vcpu->arch.pc; |
674 | break; |
675 | |
676 | /* Floating point registers */ |
677 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): |
678 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
679 | return -EINVAL; |
680 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); |
681 | /* Odd singles in top of even double when FR=0 */ |
682 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) |
683 | v = get_fpr32(&fpu->fpr[idx], 0); |
684 | else |
685 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); |
686 | break; |
687 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): |
688 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
689 | return -EINVAL; |
690 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); |
691 | /* Can't access odd doubles in FR=0 mode */ |
692 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) |
693 | return -EINVAL; |
694 | v = get_fpr64(&fpu->fpr[idx], 0); |
695 | break; |
696 | case KVM_REG_MIPS_FCR_IR: |
697 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
698 | return -EINVAL; |
699 | v = boot_cpu_data.fpu_id; |
700 | break; |
701 | case KVM_REG_MIPS_FCR_CSR: |
702 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
703 | return -EINVAL; |
704 | v = fpu->fcr31; |
705 | break; |
706 | |
707 | /* MIPS SIMD Architecture (MSA) registers */ |
708 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): |
709 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
710 | return -EINVAL; |
711 | /* Can't access MSA registers in FR=0 mode */ |
712 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) |
713 | return -EINVAL; |
714 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); |
715 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
716 | /* least significant byte first */ |
717 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); |
718 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); |
719 | #else |
720 | /* most significant byte first */ |
721 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); |
722 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); |
723 | #endif |
724 | break; |
725 | case KVM_REG_MIPS_MSA_IR: |
726 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
727 | return -EINVAL; |
728 | v = boot_cpu_data.msa_id; |
729 | break; |
730 | case KVM_REG_MIPS_MSA_CSR: |
731 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
732 | return -EINVAL; |
733 | v = fpu->msacsr; |
734 | break; |
735 | |
736 | /* registers to be handled specially */ |
737 | default: |
738 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); |
739 | if (ret) |
740 | return ret; |
741 | break; |
742 | } |
743 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
744 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; |
745 | |
746 | return put_user(v, uaddr64); |
747 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { |
748 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; |
749 | u32 v32 = (u32)v; |
750 | |
751 | return put_user(v32, uaddr32); |
752 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
753 | void __user *uaddr = (void __user *)(long)reg->addr; |
754 | |
755 | return copy_to_user(to: uaddr, from: vs, n: 16) ? -EFAULT : 0; |
756 | } else { |
757 | return -EINVAL; |
758 | } |
759 | } |
760 | |
761 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, |
762 | const struct kvm_one_reg *reg) |
763 | { |
764 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
765 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
766 | s64 v; |
767 | s64 vs[2]; |
768 | unsigned int idx; |
769 | |
770 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
771 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; |
772 | |
773 | if (get_user(v, uaddr64) != 0) |
774 | return -EFAULT; |
775 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { |
776 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; |
777 | s32 v32; |
778 | |
779 | if (get_user(v32, uaddr32) != 0) |
780 | return -EFAULT; |
781 | v = (s64)v32; |
782 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
783 | void __user *uaddr = (void __user *)(long)reg->addr; |
784 | |
785 | return copy_from_user(to: vs, from: uaddr, n: 16) ? -EFAULT : 0; |
786 | } else { |
787 | return -EINVAL; |
788 | } |
789 | |
790 | switch (reg->id) { |
791 | /* General purpose registers */ |
792 | case KVM_REG_MIPS_R0: |
793 | /* Silently ignore requests to set $0 */ |
794 | break; |
795 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: |
796 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; |
797 | break; |
798 | #ifndef CONFIG_CPU_MIPSR6 |
799 | case KVM_REG_MIPS_HI: |
800 | vcpu->arch.hi = v; |
801 | break; |
802 | case KVM_REG_MIPS_LO: |
803 | vcpu->arch.lo = v; |
804 | break; |
805 | #endif |
806 | case KVM_REG_MIPS_PC: |
807 | vcpu->arch.pc = v; |
808 | break; |
809 | |
810 | /* Floating point registers */ |
811 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): |
812 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
813 | return -EINVAL; |
814 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); |
815 | /* Odd singles in top of even double when FR=0 */ |
816 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) |
817 | set_fpr32(&fpu->fpr[idx], 0, v); |
818 | else |
819 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); |
820 | break; |
821 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): |
822 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
823 | return -EINVAL; |
824 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); |
825 | /* Can't access odd doubles in FR=0 mode */ |
826 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) |
827 | return -EINVAL; |
828 | set_fpr64(&fpu->fpr[idx], 0, v); |
829 | break; |
830 | case KVM_REG_MIPS_FCR_IR: |
831 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
832 | return -EINVAL; |
833 | /* Read-only */ |
834 | break; |
835 | case KVM_REG_MIPS_FCR_CSR: |
836 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
837 | return -EINVAL; |
838 | fpu->fcr31 = v; |
839 | break; |
840 | |
841 | /* MIPS SIMD Architecture (MSA) registers */ |
842 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): |
843 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
844 | return -EINVAL; |
845 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); |
846 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
847 | /* least significant byte first */ |
848 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); |
849 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); |
850 | #else |
851 | /* most significant byte first */ |
852 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); |
853 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); |
854 | #endif |
855 | break; |
856 | case KVM_REG_MIPS_MSA_IR: |
857 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
858 | return -EINVAL; |
859 | /* Read-only */ |
860 | break; |
861 | case KVM_REG_MIPS_MSA_CSR: |
862 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) |
863 | return -EINVAL; |
864 | fpu->msacsr = v; |
865 | break; |
866 | |
867 | /* registers to be handled specially */ |
868 | default: |
869 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); |
870 | } |
871 | return 0; |
872 | } |
873 | |
874 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
875 | struct kvm_enable_cap *cap) |
876 | { |
877 | int r = 0; |
878 | |
879 | if (!kvm_vm_ioctl_check_extension(kvm: vcpu->kvm, ext: cap->cap)) |
880 | return -EINVAL; |
881 | if (cap->flags) |
882 | return -EINVAL; |
883 | if (cap->args[0]) |
884 | return -EINVAL; |
885 | |
886 | switch (cap->cap) { |
887 | case KVM_CAP_MIPS_FPU: |
888 | vcpu->arch.fpu_enabled = true; |
889 | break; |
890 | case KVM_CAP_MIPS_MSA: |
891 | vcpu->arch.msa_enabled = true; |
892 | break; |
893 | default: |
894 | r = -EINVAL; |
895 | break; |
896 | } |
897 | |
898 | return r; |
899 | } |
900 | |
901 | long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, |
902 | unsigned long arg) |
903 | { |
904 | struct kvm_vcpu *vcpu = filp->private_data; |
905 | void __user *argp = (void __user *)arg; |
906 | |
907 | if (ioctl == KVM_INTERRUPT) { |
908 | struct kvm_mips_interrupt irq; |
909 | |
910 | if (copy_from_user(to: &irq, from: argp, n: sizeof(irq))) |
911 | return -EFAULT; |
912 | kvm_debug("[%d] %s: irq: %d\n" , vcpu->vcpu_id, __func__, |
913 | irq.irq); |
914 | |
915 | return kvm_vcpu_ioctl_interrupt(vcpu, irq: &irq); |
916 | } |
917 | |
918 | return -ENOIOCTLCMD; |
919 | } |
920 | |
921 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
922 | unsigned long arg) |
923 | { |
924 | struct kvm_vcpu *vcpu = filp->private_data; |
925 | void __user *argp = (void __user *)arg; |
926 | long r; |
927 | |
928 | vcpu_load(vcpu); |
929 | |
930 | switch (ioctl) { |
931 | case KVM_SET_ONE_REG: |
932 | case KVM_GET_ONE_REG: { |
933 | struct kvm_one_reg reg; |
934 | |
935 | r = -EFAULT; |
936 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
937 | break; |
938 | if (ioctl == KVM_SET_ONE_REG) |
939 | r = kvm_mips_set_reg(vcpu, reg: ®); |
940 | else |
941 | r = kvm_mips_get_reg(vcpu, reg: ®); |
942 | break; |
943 | } |
944 | case KVM_GET_REG_LIST: { |
945 | struct kvm_reg_list __user *user_list = argp; |
946 | struct kvm_reg_list reg_list; |
947 | unsigned n; |
948 | |
949 | r = -EFAULT; |
950 | if (copy_from_user(to: ®_list, from: user_list, n: sizeof(reg_list))) |
951 | break; |
952 | n = reg_list.n; |
953 | reg_list.n = kvm_mips_num_regs(vcpu); |
954 | if (copy_to_user(to: user_list, from: ®_list, n: sizeof(reg_list))) |
955 | break; |
956 | r = -E2BIG; |
957 | if (n < reg_list.n) |
958 | break; |
959 | r = kvm_mips_copy_reg_indices(vcpu, indices: user_list->reg); |
960 | break; |
961 | } |
962 | case KVM_ENABLE_CAP: { |
963 | struct kvm_enable_cap cap; |
964 | |
965 | r = -EFAULT; |
966 | if (copy_from_user(to: &cap, from: argp, n: sizeof(cap))) |
967 | break; |
968 | r = kvm_vcpu_ioctl_enable_cap(vcpu, cap: &cap); |
969 | break; |
970 | } |
971 | default: |
972 | r = -ENOIOCTLCMD; |
973 | } |
974 | |
975 | vcpu_put(vcpu); |
976 | return r; |
977 | } |
978 | |
979 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
980 | { |
981 | |
982 | } |
983 | |
984 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
985 | { |
986 | kvm_mips_callbacks->prepare_flush_shadow(kvm); |
987 | return 1; |
988 | } |
989 | |
990 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
991 | { |
992 | int r; |
993 | |
994 | switch (ioctl) { |
995 | default: |
996 | r = -ENOIOCTLCMD; |
997 | } |
998 | |
999 | return r; |
1000 | } |
1001 | |
1002 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1003 | struct kvm_sregs *sregs) |
1004 | { |
1005 | return -ENOIOCTLCMD; |
1006 | } |
1007 | |
1008 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1009 | struct kvm_sregs *sregs) |
1010 | { |
1011 | return -ENOIOCTLCMD; |
1012 | } |
1013 | |
1014 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
1015 | { |
1016 | } |
1017 | |
1018 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1019 | { |
1020 | return -ENOIOCTLCMD; |
1021 | } |
1022 | |
1023 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1024 | { |
1025 | return -ENOIOCTLCMD; |
1026 | } |
1027 | |
1028 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
1029 | { |
1030 | return VM_FAULT_SIGBUS; |
1031 | } |
1032 | |
1033 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
1034 | { |
1035 | int r; |
1036 | |
1037 | switch (ext) { |
1038 | case KVM_CAP_ONE_REG: |
1039 | case KVM_CAP_ENABLE_CAP: |
1040 | case KVM_CAP_READONLY_MEM: |
1041 | case KVM_CAP_SYNC_MMU: |
1042 | case KVM_CAP_IMMEDIATE_EXIT: |
1043 | r = 1; |
1044 | break; |
1045 | case KVM_CAP_NR_VCPUS: |
1046 | r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); |
1047 | break; |
1048 | case KVM_CAP_MAX_VCPUS: |
1049 | r = KVM_MAX_VCPUS; |
1050 | break; |
1051 | case KVM_CAP_MAX_VCPU_ID: |
1052 | r = KVM_MAX_VCPU_IDS; |
1053 | break; |
1054 | case KVM_CAP_MIPS_FPU: |
1055 | /* We don't handle systems with inconsistent cpu_has_fpu */ |
1056 | r = !!raw_cpu_has_fpu; |
1057 | break; |
1058 | case KVM_CAP_MIPS_MSA: |
1059 | /* |
1060 | * We don't support MSA vector partitioning yet: |
1061 | * 1) It would require explicit support which can't be tested |
1062 | * yet due to lack of support in current hardware. |
1063 | * 2) It extends the state that would need to be saved/restored |
1064 | * by e.g. QEMU for migration. |
1065 | * |
1066 | * When vector partitioning hardware becomes available, support |
1067 | * could be added by requiring a flag when enabling |
1068 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows |
1069 | * to save/restore the appropriate extra state. |
1070 | */ |
1071 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); |
1072 | break; |
1073 | default: |
1074 | r = kvm_mips_callbacks->check_extension(kvm, ext); |
1075 | break; |
1076 | } |
1077 | return r; |
1078 | } |
1079 | |
1080 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1081 | { |
1082 | return kvm_mips_pending_timer(vcpu) || |
1083 | kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; |
1084 | } |
1085 | |
1086 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) |
1087 | { |
1088 | int i; |
1089 | struct mips_coproc *cop0; |
1090 | |
1091 | if (!vcpu) |
1092 | return -1; |
1093 | |
1094 | kvm_debug("VCPU Register Dump:\n" ); |
1095 | kvm_debug("\tpc = 0x%08lx\n" , vcpu->arch.pc); |
1096 | kvm_debug("\texceptions: %08lx\n" , vcpu->arch.pending_exceptions); |
1097 | |
1098 | for (i = 0; i < 32; i += 4) { |
1099 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n" , i, |
1100 | vcpu->arch.gprs[i], |
1101 | vcpu->arch.gprs[i + 1], |
1102 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); |
1103 | } |
1104 | kvm_debug("\thi: 0x%08lx\n" , vcpu->arch.hi); |
1105 | kvm_debug("\tlo: 0x%08lx\n" , vcpu->arch.lo); |
1106 | |
1107 | cop0 = &vcpu->arch.cop0; |
1108 | kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n" , |
1109 | kvm_read_c0_guest_status(cop0), |
1110 | kvm_read_c0_guest_cause(cop0)); |
1111 | |
1112 | kvm_debug("\tEPC: 0x%08lx\n" , kvm_read_c0_guest_epc(cop0)); |
1113 | |
1114 | return 0; |
1115 | } |
1116 | |
1117 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1118 | { |
1119 | int i; |
1120 | |
1121 | vcpu_load(vcpu); |
1122 | |
1123 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
1124 | vcpu->arch.gprs[i] = regs->gpr[i]; |
1125 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ |
1126 | vcpu->arch.hi = regs->hi; |
1127 | vcpu->arch.lo = regs->lo; |
1128 | vcpu->arch.pc = regs->pc; |
1129 | |
1130 | vcpu_put(vcpu); |
1131 | return 0; |
1132 | } |
1133 | |
1134 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1135 | { |
1136 | int i; |
1137 | |
1138 | vcpu_load(vcpu); |
1139 | |
1140 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
1141 | regs->gpr[i] = vcpu->arch.gprs[i]; |
1142 | |
1143 | regs->hi = vcpu->arch.hi; |
1144 | regs->lo = vcpu->arch.lo; |
1145 | regs->pc = vcpu->arch.pc; |
1146 | |
1147 | vcpu_put(vcpu); |
1148 | return 0; |
1149 | } |
1150 | |
1151 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1152 | struct kvm_translation *tr) |
1153 | { |
1154 | return 0; |
1155 | } |
1156 | |
1157 | static void kvm_mips_set_c0_status(void) |
1158 | { |
1159 | u32 status = read_c0_status(); |
1160 | |
1161 | if (cpu_has_dsp) |
1162 | status |= (ST0_MX); |
1163 | |
1164 | write_c0_status(status); |
1165 | ehb(); |
1166 | } |
1167 | |
1168 | /* |
1169 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) |
1170 | */ |
1171 | static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) |
1172 | { |
1173 | struct kvm_run *run = vcpu->run; |
1174 | u32 cause = vcpu->arch.host_cp0_cause; |
1175 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
1176 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
1177 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
1178 | enum emulation_result er = EMULATE_DONE; |
1179 | u32 inst; |
1180 | int ret = RESUME_GUEST; |
1181 | |
1182 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1183 | |
1184 | /* Set a default exit reason */ |
1185 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1186 | run->ready_for_interrupt_injection = 1; |
1187 | |
1188 | /* |
1189 | * Set the appropriate status bits based on host CPU features, |
1190 | * before we hit the scheduler |
1191 | */ |
1192 | kvm_mips_set_c0_status(); |
1193 | |
1194 | local_irq_enable(); |
1195 | |
1196 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n" , |
1197 | cause, opc, run, vcpu); |
1198 | trace_kvm_exit(vcpu, reason: exccode); |
1199 | |
1200 | switch (exccode) { |
1201 | case EXCCODE_INT: |
1202 | kvm_debug("[%d]EXCCODE_INT @ %p\n" , vcpu->vcpu_id, opc); |
1203 | |
1204 | ++vcpu->stat.int_exits; |
1205 | |
1206 | if (need_resched()) |
1207 | cond_resched(); |
1208 | |
1209 | ret = RESUME_GUEST; |
1210 | break; |
1211 | |
1212 | case EXCCODE_CPU: |
1213 | kvm_debug("EXCCODE_CPU: @ PC: %p\n" , opc); |
1214 | |
1215 | ++vcpu->stat.cop_unusable_exits; |
1216 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); |
1217 | /* XXXKYMA: Might need to return to user space */ |
1218 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
1219 | ret = RESUME_HOST; |
1220 | break; |
1221 | |
1222 | case EXCCODE_MOD: |
1223 | ++vcpu->stat.tlbmod_exits; |
1224 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); |
1225 | break; |
1226 | |
1227 | case EXCCODE_TLBS: |
1228 | kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n" , |
1229 | cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, |
1230 | badvaddr); |
1231 | |
1232 | ++vcpu->stat.tlbmiss_st_exits; |
1233 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); |
1234 | break; |
1235 | |
1236 | case EXCCODE_TLBL: |
1237 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n" , |
1238 | cause, opc, badvaddr); |
1239 | |
1240 | ++vcpu->stat.tlbmiss_ld_exits; |
1241 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); |
1242 | break; |
1243 | |
1244 | case EXCCODE_ADES: |
1245 | ++vcpu->stat.addrerr_st_exits; |
1246 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); |
1247 | break; |
1248 | |
1249 | case EXCCODE_ADEL: |
1250 | ++vcpu->stat.addrerr_ld_exits; |
1251 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); |
1252 | break; |
1253 | |
1254 | case EXCCODE_SYS: |
1255 | ++vcpu->stat.syscall_exits; |
1256 | ret = kvm_mips_callbacks->handle_syscall(vcpu); |
1257 | break; |
1258 | |
1259 | case EXCCODE_RI: |
1260 | ++vcpu->stat.resvd_inst_exits; |
1261 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); |
1262 | break; |
1263 | |
1264 | case EXCCODE_BP: |
1265 | ++vcpu->stat.break_inst_exits; |
1266 | ret = kvm_mips_callbacks->handle_break(vcpu); |
1267 | break; |
1268 | |
1269 | case EXCCODE_TR: |
1270 | ++vcpu->stat.trap_inst_exits; |
1271 | ret = kvm_mips_callbacks->handle_trap(vcpu); |
1272 | break; |
1273 | |
1274 | case EXCCODE_MSAFPE: |
1275 | ++vcpu->stat.msa_fpe_exits; |
1276 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); |
1277 | break; |
1278 | |
1279 | case EXCCODE_FPE: |
1280 | ++vcpu->stat.fpe_exits; |
1281 | ret = kvm_mips_callbacks->handle_fpe(vcpu); |
1282 | break; |
1283 | |
1284 | case EXCCODE_MSADIS: |
1285 | ++vcpu->stat.msa_disabled_exits; |
1286 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); |
1287 | break; |
1288 | |
1289 | case EXCCODE_GE: |
1290 | /* defer exit accounting to handler */ |
1291 | ret = kvm_mips_callbacks->handle_guest_exit(vcpu); |
1292 | break; |
1293 | |
1294 | default: |
1295 | if (cause & CAUSEF_BD) |
1296 | opc += 1; |
1297 | inst = 0; |
1298 | kvm_get_badinstr(opc, vcpu, &inst); |
1299 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n" , |
1300 | exccode, opc, inst, badvaddr, |
1301 | kvm_read_c0_guest_status(&vcpu->arch.cop0)); |
1302 | kvm_arch_vcpu_dump_regs(vcpu); |
1303 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1304 | ret = RESUME_HOST; |
1305 | break; |
1306 | |
1307 | } |
1308 | |
1309 | local_irq_disable(); |
1310 | |
1311 | if (ret == RESUME_GUEST) |
1312 | kvm_vz_acquire_htimer(vcpu); |
1313 | |
1314 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) |
1315 | kvm_mips_deliver_interrupts(vcpu, cause); |
1316 | |
1317 | if (!(ret & RESUME_HOST)) { |
1318 | /* Only check for signals if not already exiting to userspace */ |
1319 | if (signal_pending(current)) { |
1320 | run->exit_reason = KVM_EXIT_INTR; |
1321 | ret = (-EINTR << 2) | RESUME_HOST; |
1322 | ++vcpu->stat.signal_exits; |
1323 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); |
1324 | } |
1325 | } |
1326 | |
1327 | if (ret == RESUME_GUEST) { |
1328 | trace_kvm_reenter(vcpu); |
1329 | |
1330 | /* |
1331 | * Make sure the read of VCPU requests in vcpu_reenter() |
1332 | * callback is not reordered ahead of the write to vcpu->mode, |
1333 | * or we could miss a TLB flush request while the requester sees |
1334 | * the VCPU as outside of guest mode and not needing an IPI. |
1335 | */ |
1336 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
1337 | |
1338 | kvm_mips_callbacks->vcpu_reenter(vcpu); |
1339 | |
1340 | /* |
1341 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
1342 | * is live), restore FCR31 / MSACSR. |
1343 | * |
1344 | * This should be before returning to the guest exception |
1345 | * vector, as it may well cause an [MSA] FP exception if there |
1346 | * are pending exception bits unmasked. (see |
1347 | * kvm_mips_csr_die_notifier() for how that is handled). |
1348 | */ |
1349 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && |
1350 | read_c0_status() & ST0_CU1) |
1351 | __kvm_restore_fcsr(&vcpu->arch); |
1352 | |
1353 | if (kvm_mips_guest_has_msa(&vcpu->arch) && |
1354 | read_c0_config5() & MIPS_CONF5_MSAEN) |
1355 | __kvm_restore_msacsr(&vcpu->arch); |
1356 | } |
1357 | return ret; |
1358 | } |
1359 | |
1360 | int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu) |
1361 | { |
1362 | int ret; |
1363 | |
1364 | guest_state_exit_irqoff(); |
1365 | ret = __kvm_mips_handle_exit(vcpu); |
1366 | guest_state_enter_irqoff(); |
1367 | |
1368 | return ret; |
1369 | } |
1370 | |
1371 | /* Enable FPU for guest and restore context */ |
1372 | void kvm_own_fpu(struct kvm_vcpu *vcpu) |
1373 | { |
1374 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
1375 | unsigned int sr, cfg5; |
1376 | |
1377 | preempt_disable(); |
1378 | |
1379 | sr = kvm_read_c0_guest_status(cop0); |
1380 | |
1381 | /* |
1382 | * If MSA state is already live, it is undefined how it interacts with |
1383 | * FR=0 FPU state, and we don't want to hit reserved instruction |
1384 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so |
1385 | * play it safe and save it first. |
1386 | */ |
1387 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && |
1388 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
1389 | kvm_lose_fpu(vcpu); |
1390 | |
1391 | /* |
1392 | * Enable FPU for guest |
1393 | * We set FR and FRE according to guest context |
1394 | */ |
1395 | change_c0_status(ST0_CU1 | ST0_FR, sr); |
1396 | if (cpu_has_fre) { |
1397 | cfg5 = kvm_read_c0_guest_config5(cop0); |
1398 | change_c0_config5(MIPS_CONF5_FRE, cfg5); |
1399 | } |
1400 | enable_fpu_hazard(); |
1401 | |
1402 | /* If guest FPU state not active, restore it now */ |
1403 | if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { |
1404 | __kvm_restore_fpu(&vcpu->arch); |
1405 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
1406 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); |
1407 | } else { |
1408 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); |
1409 | } |
1410 | |
1411 | preempt_enable(); |
1412 | } |
1413 | |
1414 | #ifdef CONFIG_CPU_HAS_MSA |
1415 | /* Enable MSA for guest and restore context */ |
1416 | void kvm_own_msa(struct kvm_vcpu *vcpu) |
1417 | { |
1418 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
1419 | unsigned int sr, cfg5; |
1420 | |
1421 | preempt_disable(); |
1422 | |
1423 | /* |
1424 | * Enable FPU if enabled in guest, since we're restoring FPU context |
1425 | * anyway. We set FR and FRE according to guest context. |
1426 | */ |
1427 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { |
1428 | sr = kvm_read_c0_guest_status(cop0); |
1429 | |
1430 | /* |
1431 | * If FR=0 FPU state is already live, it is undefined how it |
1432 | * interacts with MSA state, so play it safe and save it first. |
1433 | */ |
1434 | if (!(sr & ST0_FR) && |
1435 | (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | |
1436 | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) |
1437 | kvm_lose_fpu(vcpu); |
1438 | |
1439 | change_c0_status(ST0_CU1 | ST0_FR, sr); |
1440 | if (sr & ST0_CU1 && cpu_has_fre) { |
1441 | cfg5 = kvm_read_c0_guest_config5(cop0); |
1442 | change_c0_config5(MIPS_CONF5_FRE, cfg5); |
1443 | } |
1444 | } |
1445 | |
1446 | /* Enable MSA for guest */ |
1447 | set_c0_config5(MIPS_CONF5_MSAEN); |
1448 | enable_fpu_hazard(); |
1449 | |
1450 | switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { |
1451 | case KVM_MIPS_AUX_FPU: |
1452 | /* |
1453 | * Guest FPU state already loaded, only restore upper MSA state |
1454 | */ |
1455 | __kvm_restore_msa_upper(&vcpu->arch); |
1456 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
1457 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); |
1458 | break; |
1459 | case 0: |
1460 | /* Neither FPU or MSA already active, restore full MSA state */ |
1461 | __kvm_restore_msa(&vcpu->arch); |
1462 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
1463 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) |
1464 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
1465 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, |
1466 | KVM_TRACE_AUX_FPU_MSA); |
1467 | break; |
1468 | default: |
1469 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); |
1470 | break; |
1471 | } |
1472 | |
1473 | preempt_enable(); |
1474 | } |
1475 | #endif |
1476 | |
1477 | /* Drop FPU & MSA without saving it */ |
1478 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) |
1479 | { |
1480 | preempt_disable(); |
1481 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
1482 | disable_msa(); |
1483 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); |
1484 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; |
1485 | } |
1486 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
1487 | clear_c0_status(ST0_CU1 | ST0_FR); |
1488 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); |
1489 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
1490 | } |
1491 | preempt_enable(); |
1492 | } |
1493 | |
1494 | /* Save and disable FPU & MSA */ |
1495 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) |
1496 | { |
1497 | /* |
1498 | * With T&E, FPU & MSA get disabled in root context (hardware) when it |
1499 | * is disabled in guest context (software), but the register state in |
1500 | * the hardware may still be in use. |
1501 | * This is why we explicitly re-enable the hardware before saving. |
1502 | */ |
1503 | |
1504 | preempt_disable(); |
1505 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
1506 | __kvm_save_msa(&vcpu->arch); |
1507 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); |
1508 | |
1509 | /* Disable MSA & FPU */ |
1510 | disable_msa(); |
1511 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
1512 | clear_c0_status(ST0_CU1 | ST0_FR); |
1513 | disable_fpu_hazard(); |
1514 | } |
1515 | vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); |
1516 | } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
1517 | __kvm_save_fpu(&vcpu->arch); |
1518 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
1519 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); |
1520 | |
1521 | /* Disable FPU */ |
1522 | clear_c0_status(ST0_CU1 | ST0_FR); |
1523 | disable_fpu_hazard(); |
1524 | } |
1525 | preempt_enable(); |
1526 | } |
1527 | |
1528 | /* |
1529 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are |
1530 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP |
1531 | * exception if cause bits are set in the value being written. |
1532 | */ |
1533 | static int kvm_mips_csr_die_notify(struct notifier_block *self, |
1534 | unsigned long cmd, void *ptr) |
1535 | { |
1536 | struct die_args *args = (struct die_args *)ptr; |
1537 | struct pt_regs *regs = args->regs; |
1538 | unsigned long pc; |
1539 | |
1540 | /* Only interested in FPE and MSAFPE */ |
1541 | if (cmd != DIE_FP && cmd != DIE_MSAFP) |
1542 | return NOTIFY_DONE; |
1543 | |
1544 | /* Return immediately if guest context isn't active */ |
1545 | if (!(current->flags & PF_VCPU)) |
1546 | return NOTIFY_DONE; |
1547 | |
1548 | /* Should never get here from user mode */ |
1549 | BUG_ON(user_mode(regs)); |
1550 | |
1551 | pc = instruction_pointer(regs); |
1552 | switch (cmd) { |
1553 | case DIE_FP: |
1554 | /* match 2nd instruction in __kvm_restore_fcsr */ |
1555 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) |
1556 | return NOTIFY_DONE; |
1557 | break; |
1558 | case DIE_MSAFP: |
1559 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ |
1560 | if (!cpu_has_msa || |
1561 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || |
1562 | pc > (unsigned long)&__kvm_restore_msacsr + 8) |
1563 | return NOTIFY_DONE; |
1564 | break; |
1565 | } |
1566 | |
1567 | /* Move PC forward a little and continue executing */ |
1568 | instruction_pointer(regs) += 4; |
1569 | |
1570 | return NOTIFY_STOP; |
1571 | } |
1572 | |
1573 | static struct notifier_block kvm_mips_csr_die_notifier = { |
1574 | .notifier_call = kvm_mips_csr_die_notify, |
1575 | }; |
1576 | |
1577 | static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = { |
1578 | [MIPS_EXC_INT_TIMER] = C_IRQ5, |
1579 | [MIPS_EXC_INT_IO_1] = C_IRQ0, |
1580 | [MIPS_EXC_INT_IPI_1] = C_IRQ1, |
1581 | [MIPS_EXC_INT_IPI_2] = C_IRQ2, |
1582 | }; |
1583 | |
1584 | static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = { |
1585 | [MIPS_EXC_INT_TIMER] = C_IRQ5, |
1586 | [MIPS_EXC_INT_IO_1] = C_IRQ0, |
1587 | [MIPS_EXC_INT_IO_2] = C_IRQ1, |
1588 | [MIPS_EXC_INT_IPI_1] = C_IRQ4, |
1589 | }; |
1590 | |
1591 | u32 *kvm_priority_to_irq = kvm_default_priority_to_irq; |
1592 | |
1593 | u32 kvm_irq_to_priority(u32 irq) |
1594 | { |
1595 | int i; |
1596 | |
1597 | for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) { |
1598 | if (kvm_priority_to_irq[i] == (1 << (irq + 8))) |
1599 | return i; |
1600 | } |
1601 | |
1602 | return MIPS_EXC_MAX; |
1603 | } |
1604 | |
1605 | static int __init kvm_mips_init(void) |
1606 | { |
1607 | int ret; |
1608 | |
1609 | if (cpu_has_mmid) { |
1610 | pr_warn("KVM does not yet support MMIDs. KVM Disabled\n" ); |
1611 | return -EOPNOTSUPP; |
1612 | } |
1613 | |
1614 | ret = kvm_mips_entry_setup(); |
1615 | if (ret) |
1616 | return ret; |
1617 | |
1618 | ret = kvm_mips_emulation_init(); |
1619 | if (ret) |
1620 | return ret; |
1621 | |
1622 | |
1623 | if (boot_cpu_type() == CPU_LOONGSON64) |
1624 | kvm_priority_to_irq = kvm_loongson3_priority_to_irq; |
1625 | |
1626 | register_die_notifier(nb: &kvm_mips_csr_die_notifier); |
1627 | |
1628 | ret = kvm_init(vcpu_size: sizeof(struct kvm_vcpu), vcpu_align: 0, THIS_MODULE); |
1629 | if (ret) { |
1630 | unregister_die_notifier(nb: &kvm_mips_csr_die_notifier); |
1631 | return ret; |
1632 | } |
1633 | return 0; |
1634 | } |
1635 | |
1636 | static void __exit kvm_mips_exit(void) |
1637 | { |
1638 | kvm_exit(); |
1639 | |
1640 | unregister_die_notifier(nb: &kvm_mips_csr_die_notifier); |
1641 | } |
1642 | |
1643 | module_init(kvm_mips_init); |
1644 | module_exit(kvm_mips_exit); |
1645 | |
1646 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); |
1647 | |