1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. |
4 | * |
5 | * Authors: |
6 | * Alexander Graf <agraf@suse.de> |
7 | * Kevin Wolf <mail@kevin-wolf.de> |
8 | * Paul Mackerras <paulus@samba.org> |
9 | * |
10 | * Description: |
11 | * Functions relating to running KVM on Book 3S processors where |
12 | * we don't have access to hypervisor mode, and we run the guest |
13 | * in problem state (user mode). |
14 | * |
15 | * This file is derived from arch/powerpc/kvm/44x.c, |
16 | * by Hollis Blanchard <hollisb@us.ibm.com>. |
17 | */ |
18 | |
19 | #include <linux/kvm_host.h> |
20 | #include <linux/export.h> |
21 | #include <linux/err.h> |
22 | #include <linux/slab.h> |
23 | |
24 | #include <asm/reg.h> |
25 | #include <asm/cputable.h> |
26 | #include <asm/cacheflush.h> |
27 | #include <linux/uaccess.h> |
28 | #include <asm/interrupt.h> |
29 | #include <asm/io.h> |
30 | #include <asm/kvm_ppc.h> |
31 | #include <asm/kvm_book3s.h> |
32 | #include <asm/mmu_context.h> |
33 | #include <asm/switch_to.h> |
34 | #include <asm/firmware.h> |
35 | #include <asm/setup.h> |
36 | #include <linux/gfp.h> |
37 | #include <linux/sched.h> |
38 | #include <linux/vmalloc.h> |
39 | #include <linux/highmem.h> |
40 | #include <linux/module.h> |
41 | #include <linux/miscdevice.h> |
42 | #include <asm/asm-prototypes.h> |
43 | #include <asm/tm.h> |
44 | |
45 | #include "book3s.h" |
46 | |
47 | #define CREATE_TRACE_POINTS |
48 | #include "trace_pr.h" |
49 | |
50 | /* #define EXIT_DEBUG */ |
51 | /* #define DEBUG_EXT */ |
52 | |
53 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
54 | ulong msr); |
55 | #ifdef CONFIG_PPC_BOOK3S_64 |
56 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); |
57 | #endif |
58 | |
59 | /* Some compatibility defines */ |
60 | #ifdef CONFIG_PPC_BOOK3S_32 |
61 | #define MSR_USER32 MSR_USER |
62 | #define MSR_USER64 MSR_USER |
63 | #define HW_PAGE_SIZE PAGE_SIZE |
64 | #define HPTE_R_M _PAGE_COHERENT |
65 | #endif |
66 | |
67 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
68 | { |
69 | ulong msr = kvmppc_get_msr(vcpu); |
70 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; |
71 | } |
72 | |
73 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) |
74 | { |
75 | ulong msr = kvmppc_get_msr(vcpu); |
76 | ulong pc = kvmppc_get_pc(vcpu); |
77 | |
78 | /* We are in DR only split real mode */ |
79 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) |
80 | return; |
81 | |
82 | /* We have not fixed up the guest already */ |
83 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) |
84 | return; |
85 | |
86 | /* The code is in fixupable address space */ |
87 | if (pc & SPLIT_HACK_MASK) |
88 | return; |
89 | |
90 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; |
91 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); |
92 | } |
93 | |
94 | static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) |
95 | { |
96 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { |
97 | ulong pc = kvmppc_get_pc(vcpu); |
98 | ulong lr = kvmppc_get_lr(vcpu); |
99 | if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
100 | kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); |
101 | if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) |
102 | kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); |
103 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; |
104 | } |
105 | } |
106 | |
107 | static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) |
108 | { |
109 | unsigned long msr, pc, new_msr, new_pc; |
110 | |
111 | kvmppc_unfixup_split_real(vcpu); |
112 | |
113 | msr = kvmppc_get_msr(vcpu); |
114 | pc = kvmppc_get_pc(vcpu); |
115 | new_msr = vcpu->arch.intr_msr; |
116 | new_pc = to_book3s(vcpu)->hior + vec; |
117 | |
118 | #ifdef CONFIG_PPC_BOOK3S_64 |
119 | /* If transactional, change to suspend mode on IRQ delivery */ |
120 | if (MSR_TM_TRANSACTIONAL(msr)) |
121 | new_msr |= MSR_TS_S; |
122 | else |
123 | new_msr |= msr & MSR_TS_MASK; |
124 | #endif |
125 | |
126 | kvmppc_set_srr0(vcpu, pc); |
127 | kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); |
128 | kvmppc_set_pc(vcpu, new_pc); |
129 | kvmppc_set_msr(vcpu, new_msr); |
130 | } |
131 | |
132 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
133 | { |
134 | #ifdef CONFIG_PPC_BOOK3S_64 |
135 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
136 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
137 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
138 | svcpu->in_use = 0; |
139 | svcpu_put(svcpu); |
140 | |
141 | /* Disable AIL if supported */ |
142 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
143 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
144 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); |
145 | if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV)) |
146 | mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) & ~FSCR_SCV); |
147 | } |
148 | #endif |
149 | |
150 | vcpu->cpu = smp_processor_id(); |
151 | #ifdef CONFIG_PPC_BOOK3S_32 |
152 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
153 | #endif |
154 | |
155 | if (kvmppc_is_split_real(vcpu)) |
156 | kvmppc_fixup_split_real(vcpu); |
157 | |
158 | kvmppc_restore_tm_pr(vcpu); |
159 | } |
160 | |
161 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
162 | { |
163 | #ifdef CONFIG_PPC_BOOK3S_64 |
164 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
165 | if (svcpu->in_use) { |
166 | kvmppc_copy_from_svcpu(vcpu); |
167 | } |
168 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
169 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
170 | svcpu_put(svcpu); |
171 | |
172 | /* Enable AIL if supported */ |
173 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
174 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
175 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); |
176 | if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV)) |
177 | mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) | FSCR_SCV); |
178 | } |
179 | #endif |
180 | |
181 | if (kvmppc_is_split_real(vcpu)) |
182 | kvmppc_unfixup_split_real(vcpu); |
183 | |
184 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
185 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
186 | kvmppc_save_tm_pr(vcpu); |
187 | |
188 | vcpu->cpu = -1; |
189 | } |
190 | |
191 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
192 | void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) |
193 | { |
194 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
195 | |
196 | svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; |
197 | svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; |
198 | svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; |
199 | svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; |
200 | svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; |
201 | svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; |
202 | svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; |
203 | svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; |
204 | svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; |
205 | svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; |
206 | svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; |
207 | svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; |
208 | svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; |
209 | svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; |
210 | svcpu->cr = vcpu->arch.regs.ccr; |
211 | svcpu->xer = vcpu->arch.regs.xer; |
212 | svcpu->ctr = vcpu->arch.regs.ctr; |
213 | svcpu->lr = vcpu->arch.regs.link; |
214 | svcpu->pc = vcpu->arch.regs.nip; |
215 | #ifdef CONFIG_PPC_BOOK3S_64 |
216 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; |
217 | #endif |
218 | /* |
219 | * Now also save the current time base value. We use this |
220 | * to find the guest purr and spurr value. |
221 | */ |
222 | vcpu->arch.entry_tb = get_tb(); |
223 | vcpu->arch.entry_vtb = get_vtb(); |
224 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
225 | vcpu->arch.entry_ic = mfspr(SPRN_IC); |
226 | svcpu->in_use = true; |
227 | |
228 | svcpu_put(svcpu); |
229 | } |
230 | |
231 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
232 | { |
233 | ulong guest_msr = kvmppc_get_msr(vcpu); |
234 | ulong smsr = guest_msr; |
235 | |
236 | /* Guest MSR values */ |
237 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
238 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | |
239 | MSR_TM | MSR_TS_MASK; |
240 | #else |
241 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; |
242 | #endif |
243 | /* Process MSR values */ |
244 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; |
245 | /* External providers the guest reserved */ |
246 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); |
247 | /* 64-bit Process MSR values */ |
248 | #ifdef CONFIG_PPC_BOOK3S_64 |
249 | smsr |= MSR_HV; |
250 | #endif |
251 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
252 | /* |
253 | * in guest privileged state, we want to fail all TM transactions. |
254 | * So disable MSR TM bit so that all tbegin. will be able to be |
255 | * trapped into host. |
256 | */ |
257 | if (!(guest_msr & MSR_PR)) |
258 | smsr &= ~MSR_TM; |
259 | #endif |
260 | vcpu->arch.shadow_msr = smsr; |
261 | } |
262 | |
263 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
264 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) |
265 | { |
266 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
267 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
268 | ulong old_msr; |
269 | #endif |
270 | |
271 | /* |
272 | * Maybe we were already preempted and synced the svcpu from |
273 | * our preempt notifiers. Don't bother touching this svcpu then. |
274 | */ |
275 | if (!svcpu->in_use) |
276 | goto out; |
277 | |
278 | vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; |
279 | vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; |
280 | vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; |
281 | vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; |
282 | vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; |
283 | vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; |
284 | vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; |
285 | vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; |
286 | vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; |
287 | vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; |
288 | vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; |
289 | vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; |
290 | vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; |
291 | vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; |
292 | vcpu->arch.regs.ccr = svcpu->cr; |
293 | vcpu->arch.regs.xer = svcpu->xer; |
294 | vcpu->arch.regs.ctr = svcpu->ctr; |
295 | vcpu->arch.regs.link = svcpu->lr; |
296 | vcpu->arch.regs.nip = svcpu->pc; |
297 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; |
298 | vcpu->arch.fault_dar = svcpu->fault_dar; |
299 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
300 | vcpu->arch.last_inst = svcpu->last_inst; |
301 | #ifdef CONFIG_PPC_BOOK3S_64 |
302 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; |
303 | #endif |
304 | /* |
305 | * Update purr and spurr using time base on exit. |
306 | */ |
307 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; |
308 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; |
309 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; |
310 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
311 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; |
312 | |
313 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
314 | /* |
315 | * Unlike other MSR bits, MSR[TS]bits can be changed at guest without |
316 | * notifying host: |
317 | * modified by unprivileged instructions like "tbegin"/"tend"/ |
318 | * "tresume"/"tsuspend" in PR KVM guest. |
319 | * |
320 | * It is necessary to sync here to calculate a correct shadow_msr. |
321 | * |
322 | * privileged guest's tbegin will be failed at present. So we |
323 | * only take care of problem state guest. |
324 | */ |
325 | old_msr = kvmppc_get_msr(vcpu); |
326 | if (unlikely((old_msr & MSR_PR) && |
327 | (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != |
328 | (old_msr & (MSR_TS_MASK)))) { |
329 | old_msr &= ~(MSR_TS_MASK); |
330 | old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); |
331 | kvmppc_set_msr_fast(vcpu, old_msr); |
332 | kvmppc_recalc_shadow_msr(vcpu); |
333 | } |
334 | #endif |
335 | |
336 | svcpu->in_use = false; |
337 | |
338 | out: |
339 | svcpu_put(svcpu); |
340 | } |
341 | |
342 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
343 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) |
344 | { |
345 | tm_enable(); |
346 | vcpu->arch.tfhar = mfspr(SPRN_TFHAR); |
347 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); |
348 | vcpu->arch.tfiar = mfspr(SPRN_TFIAR); |
349 | tm_disable(); |
350 | } |
351 | |
352 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) |
353 | { |
354 | tm_enable(); |
355 | mtspr(SPRN_TFHAR, vcpu->arch.tfhar); |
356 | mtspr(SPRN_TEXASR, vcpu->arch.texasr); |
357 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); |
358 | tm_disable(); |
359 | } |
360 | |
361 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at |
362 | * hardware. |
363 | */ |
364 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) |
365 | { |
366 | ulong exit_nr; |
367 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & |
368 | (MSR_FP | MSR_VEC | MSR_VSX); |
369 | |
370 | if (!ext_diff) |
371 | return; |
372 | |
373 | if (ext_diff == MSR_FP) |
374 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; |
375 | else if (ext_diff == MSR_VEC) |
376 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; |
377 | else |
378 | exit_nr = BOOK3S_INTERRUPT_VSX; |
379 | |
380 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); |
381 | } |
382 | |
383 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) |
384 | { |
385 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { |
386 | kvmppc_save_tm_sprs(vcpu); |
387 | return; |
388 | } |
389 | |
390 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
391 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
392 | |
393 | preempt_disable(); |
394 | _kvmppc_save_tm_pr(vcpu, mfmsr()); |
395 | preempt_enable(); |
396 | } |
397 | |
398 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) |
399 | { |
400 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { |
401 | kvmppc_restore_tm_sprs(vcpu); |
402 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
403 | kvmppc_handle_lost_math_exts(vcpu); |
404 | if (vcpu->arch.fscr & FSCR_TAR) |
405 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); |
406 | } |
407 | return; |
408 | } |
409 | |
410 | preempt_disable(); |
411 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); |
412 | preempt_enable(); |
413 | |
414 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
415 | kvmppc_handle_lost_math_exts(vcpu); |
416 | if (vcpu->arch.fscr & FSCR_TAR) |
417 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); |
418 | } |
419 | } |
420 | #endif |
421 | |
422 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
423 | { |
424 | int r = 1; /* Indicate we want to get back into the guest */ |
425 | |
426 | /* We misuse TLB_FLUSH to indicate that we want to clear |
427 | all shadow cache entries */ |
428 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) |
429 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
430 | |
431 | return r; |
432 | } |
433 | |
434 | /************* MMU Notifiers *************/ |
435 | static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
436 | { |
437 | unsigned long i; |
438 | struct kvm_vcpu *vcpu; |
439 | |
440 | kvm_for_each_vcpu(i, vcpu, kvm) |
441 | kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT, |
442 | range->end << PAGE_SHIFT); |
443 | |
444 | return false; |
445 | } |
446 | |
447 | static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range) |
448 | { |
449 | return do_kvm_unmap_gfn(kvm, range); |
450 | } |
451 | |
452 | static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) |
453 | { |
454 | /* XXX could be more clever ;) */ |
455 | return false; |
456 | } |
457 | |
458 | static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) |
459 | { |
460 | /* XXX could be more clever ;) */ |
461 | return false; |
462 | } |
463 | |
464 | static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) |
465 | { |
466 | /* The page will get remapped properly on its next fault */ |
467 | return do_kvm_unmap_gfn(kvm, range); |
468 | } |
469 | |
470 | /*****************************************/ |
471 | |
472 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
473 | { |
474 | ulong old_msr; |
475 | |
476 | /* For PAPR guest, make sure MSR reflects guest mode */ |
477 | if (vcpu->arch.papr_enabled) |
478 | msr = (msr & ~MSR_HV) | MSR_ME; |
479 | |
480 | #ifdef EXIT_DEBUG |
481 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n" , msr); |
482 | #endif |
483 | |
484 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
485 | /* We should never target guest MSR to TS=10 && PR=0, |
486 | * since we always fail transaction for guest privilege |
487 | * state. |
488 | */ |
489 | if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) |
490 | kvmppc_emulate_tabort(vcpu, |
491 | TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); |
492 | #endif |
493 | |
494 | old_msr = kvmppc_get_msr(vcpu); |
495 | msr &= to_book3s(vcpu)->msr_mask; |
496 | kvmppc_set_msr_fast(vcpu, msr); |
497 | kvmppc_recalc_shadow_msr(vcpu); |
498 | |
499 | if (msr & MSR_POW) { |
500 | if (!vcpu->arch.pending_exceptions) { |
501 | kvm_vcpu_halt(vcpu); |
502 | vcpu->stat.generic.halt_wakeup++; |
503 | |
504 | /* Unset POW bit after we woke up */ |
505 | msr &= ~MSR_POW; |
506 | kvmppc_set_msr_fast(vcpu, msr); |
507 | } |
508 | } |
509 | |
510 | if (kvmppc_is_split_real(vcpu)) |
511 | kvmppc_fixup_split_real(vcpu); |
512 | else |
513 | kvmppc_unfixup_split_real(vcpu); |
514 | |
515 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
516 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
517 | kvmppc_mmu_flush_segments(vcpu); |
518 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
519 | |
520 | /* Preload magic page segment when in kernel mode */ |
521 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { |
522 | struct kvm_vcpu_arch *a = &vcpu->arch; |
523 | |
524 | if (msr & MSR_DR) |
525 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); |
526 | else |
527 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); |
528 | } |
529 | } |
530 | |
531 | /* |
532 | * When switching from 32 to 64-bit, we may have a stale 32-bit |
533 | * magic page around, we need to flush it. Typically 32-bit magic |
534 | * page will be instantiated when calling into RTAS. Note: We |
535 | * assume that such transition only happens while in kernel mode, |
536 | * ie, we never transition from user 32-bit to kernel 64-bit with |
537 | * a 32-bit magic page around. |
538 | */ |
539 | if (vcpu->arch.magic_page_pa && |
540 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { |
541 | /* going from RTAS to normal kernel code */ |
542 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, |
543 | ~0xFFFUL); |
544 | } |
545 | |
546 | /* Preload FPU if it's enabled */ |
547 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
548 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
549 | |
550 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
551 | if (kvmppc_get_msr(vcpu) & MSR_TM) |
552 | kvmppc_handle_lost_math_exts(vcpu); |
553 | #endif |
554 | } |
555 | |
556 | static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
557 | { |
558 | u32 host_pvr; |
559 | |
560 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; |
561 | vcpu->arch.pvr = pvr; |
562 | #ifdef CONFIG_PPC_BOOK3S_64 |
563 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { |
564 | kvmppc_mmu_book3s_64_init(vcpu); |
565 | if (!to_book3s(vcpu)->hior_explicit) |
566 | to_book3s(vcpu)->hior = 0xfff00000; |
567 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
568 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
569 | } else |
570 | #endif |
571 | { |
572 | kvmppc_mmu_book3s_32_init(vcpu); |
573 | if (!to_book3s(vcpu)->hior_explicit) |
574 | to_book3s(vcpu)->hior = 0; |
575 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
576 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
577 | } |
578 | |
579 | kvmppc_sanity_check(vcpu); |
580 | |
581 | /* If we are in hypervisor level on 970, we can tell the CPU to |
582 | * treat DCBZ as 32 bytes store */ |
583 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; |
584 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && |
585 | !strcmp(cur_cpu_spec->platform, "ppc970" )) |
586 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
587 | |
588 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody |
589 | really needs them in a VM on Cell and force disable them. */ |
590 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be" )) |
591 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); |
592 | |
593 | /* |
594 | * If they're asking for POWER6 or later, set the flag |
595 | * indicating that we can do multiple large page sizes |
596 | * and 1TB segments. |
597 | * Also set the flag that indicates that tlbie has the large |
598 | * page bit in the RB operand instead of the instruction. |
599 | */ |
600 | switch (PVR_VER(pvr)) { |
601 | case PVR_POWER6: |
602 | case PVR_POWER7: |
603 | case PVR_POWER7p: |
604 | case PVR_POWER8: |
605 | case PVR_POWER8E: |
606 | case PVR_POWER8NVL: |
607 | case PVR_HX_C2000: |
608 | case PVR_POWER9: |
609 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | |
610 | BOOK3S_HFLAG_NEW_TLBIE; |
611 | break; |
612 | } |
613 | |
614 | #ifdef CONFIG_PPC_BOOK3S_32 |
615 | /* 32 bit Book3S always has 32 byte dcbz */ |
616 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
617 | #endif |
618 | |
619 | /* On some CPUs we can execute paired single operations natively */ |
620 | asm ( "mfpvr %0" : "=r" (host_pvr)); |
621 | switch (host_pvr) { |
622 | case 0x00080200: /* lonestar 2.0 */ |
623 | case 0x00088202: /* lonestar 2.2 */ |
624 | case 0x70000100: /* gekko 1.0 */ |
625 | case 0x00080100: /* gekko 2.0 */ |
626 | case 0x00083203: /* gekko 2.3a */ |
627 | case 0x00083213: /* gekko 2.3b */ |
628 | case 0x00083204: /* gekko 2.4 */ |
629 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ |
630 | case 0x00087200: /* broadway */ |
631 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; |
632 | /* Enable HID2.PSE - in case we need it later */ |
633 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); |
634 | } |
635 | } |
636 | |
637 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To |
638 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to |
639 | * emulate 32 bytes dcbz length. |
640 | * |
641 | * The Book3s_64 inventors also realized this case and implemented a special bit |
642 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. |
643 | * |
644 | * My approach here is to patch the dcbz instruction on executing pages. |
645 | */ |
646 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) |
647 | { |
648 | struct page *hpage; |
649 | u64 hpage_offset; |
650 | u32 *page; |
651 | int i; |
652 | |
653 | hpage = gfn_to_page(kvm: vcpu->kvm, gfn: pte->raddr >> PAGE_SHIFT); |
654 | if (is_error_page(page: hpage)) |
655 | return; |
656 | |
657 | hpage_offset = pte->raddr & ~PAGE_MASK; |
658 | hpage_offset &= ~0xFFFULL; |
659 | hpage_offset /= 4; |
660 | |
661 | get_page(page: hpage); |
662 | page = kmap_atomic(page: hpage); |
663 | |
664 | /* patch dcbz into reserved instruction, so we trap */ |
665 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) |
666 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
667 | page[i] &= cpu_to_be32(0xfffffff7); |
668 | |
669 | kunmap_atomic(page); |
670 | put_page(page: hpage); |
671 | } |
672 | |
673 | static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
674 | { |
675 | ulong mp_pa = vcpu->arch.magic_page_pa; |
676 | |
677 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
678 | mp_pa = (uint32_t)mp_pa; |
679 | |
680 | gpa &= ~0xFFFULL; |
681 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { |
682 | return true; |
683 | } |
684 | |
685 | return kvm_is_visible_gfn(kvm: vcpu->kvm, gfn: gpa >> PAGE_SHIFT); |
686 | } |
687 | |
688 | static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, |
689 | ulong eaddr, int vec) |
690 | { |
691 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); |
692 | bool iswrite = false; |
693 | int r = RESUME_GUEST; |
694 | int relocated; |
695 | int page_found = 0; |
696 | struct kvmppc_pte pte = { 0 }; |
697 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
698 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; |
699 | u64 vsid; |
700 | |
701 | relocated = data ? dr : ir; |
702 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
703 | iswrite = true; |
704 | |
705 | /* Resolve real address if translation turned on */ |
706 | if (relocated) { |
707 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
708 | } else { |
709 | pte.may_execute = true; |
710 | pte.may_read = true; |
711 | pte.may_write = true; |
712 | pte.raddr = eaddr & KVM_PAM; |
713 | pte.eaddr = eaddr; |
714 | pte.vpage = eaddr >> 12; |
715 | pte.page_size = MMU_PAGE_64K; |
716 | pte.wimg = HPTE_R_M; |
717 | } |
718 | |
719 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
720 | case 0: |
721 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
722 | break; |
723 | case MSR_DR: |
724 | if (!data && |
725 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && |
726 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) |
727 | pte.raddr &= ~SPLIT_HACK_MASK; |
728 | fallthrough; |
729 | case MSR_IR: |
730 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
731 | |
732 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
733 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
734 | else |
735 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); |
736 | pte.vpage |= vsid; |
737 | |
738 | if (vsid == -1) |
739 | page_found = -EINVAL; |
740 | break; |
741 | } |
742 | |
743 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
744 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
745 | /* |
746 | * If we do the dcbz hack, we have to NX on every execution, |
747 | * so we can patch the executing code. This renders our guest |
748 | * NX-less. |
749 | */ |
750 | pte.may_execute = !data; |
751 | } |
752 | |
753 | if (page_found == -ENOENT || page_found == -EPERM) { |
754 | /* Page not found in guest PTE entries, or protection fault */ |
755 | u64 flags; |
756 | |
757 | if (page_found == -EPERM) |
758 | flags = DSISR_PROTFAULT; |
759 | else |
760 | flags = DSISR_NOHPTE; |
761 | if (data) { |
762 | flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; |
763 | kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags); |
764 | } else { |
765 | kvmppc_core_queue_inst_storage(vcpu, flags); |
766 | } |
767 | } else if (page_found == -EINVAL) { |
768 | /* Page not found in guest SLB */ |
769 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
770 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
771 | } else if (kvmppc_visible_gpa(vcpu, gpa: pte.raddr)) { |
772 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
773 | /* |
774 | * There is already a host HPTE there, presumably |
775 | * a read-only one for a page the guest thinks |
776 | * is writable, so get rid of it first. |
777 | */ |
778 | kvmppc_mmu_unmap_page(vcpu, &pte); |
779 | } |
780 | /* The guest's PTE is not mapped yet. Map on the host */ |
781 | if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { |
782 | /* Exit KVM if mapping failed */ |
783 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
784 | return RESUME_HOST; |
785 | } |
786 | if (data) |
787 | vcpu->stat.sp_storage++; |
788 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
789 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
790 | kvmppc_patch_dcbz(vcpu, pte: &pte); |
791 | } else { |
792 | /* MMIO */ |
793 | vcpu->stat.mmio_exits++; |
794 | vcpu->arch.paddr_accessed = pte.raddr; |
795 | vcpu->arch.vaddr_accessed = pte.eaddr; |
796 | r = kvmppc_emulate_mmio(vcpu); |
797 | if ( r == RESUME_HOST_NV ) |
798 | r = RESUME_HOST; |
799 | } |
800 | |
801 | return r; |
802 | } |
803 | |
804 | /* Give up external provider (FPU, Altivec, VSX) */ |
805 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) |
806 | { |
807 | struct thread_struct *t = ¤t->thread; |
808 | |
809 | /* |
810 | * VSX instructions can access FP and vector registers, so if |
811 | * we are giving up VSX, make sure we give up FP and VMX as well. |
812 | */ |
813 | if (msr & MSR_VSX) |
814 | msr |= MSR_FP | MSR_VEC; |
815 | |
816 | msr &= vcpu->arch.guest_owned_ext; |
817 | if (!msr) |
818 | return; |
819 | |
820 | #ifdef DEBUG_EXT |
821 | printk(KERN_INFO "Giving up ext 0x%lx\n" , msr); |
822 | #endif |
823 | |
824 | if (msr & MSR_FP) { |
825 | /* |
826 | * Note that on CPUs with VSX, giveup_fpu stores |
827 | * both the traditional FP registers and the added VSX |
828 | * registers into thread.fp_state.fpr[]. |
829 | */ |
830 | if (t->regs->msr & MSR_FP) |
831 | giveup_fpu(current); |
832 | t->fp_save_area = NULL; |
833 | } |
834 | |
835 | #ifdef CONFIG_ALTIVEC |
836 | if (msr & MSR_VEC) { |
837 | if (current->thread.regs->msr & MSR_VEC) |
838 | giveup_altivec(current); |
839 | t->vr_save_area = NULL; |
840 | } |
841 | #endif |
842 | |
843 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
844 | kvmppc_recalc_shadow_msr(vcpu); |
845 | } |
846 | |
847 | /* Give up facility (TAR / EBB / DSCR) */ |
848 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) |
849 | { |
850 | #ifdef CONFIG_PPC_BOOK3S_64 |
851 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { |
852 | /* Facility not available to the guest, ignore giveup request*/ |
853 | return; |
854 | } |
855 | |
856 | switch (fac) { |
857 | case FSCR_TAR_LG: |
858 | vcpu->arch.tar = mfspr(SPRN_TAR); |
859 | mtspr(SPRN_TAR, current->thread.tar); |
860 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; |
861 | break; |
862 | } |
863 | #endif |
864 | } |
865 | |
866 | /* Handle external providers (FPU, Altivec, VSX) */ |
867 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
868 | ulong msr) |
869 | { |
870 | struct thread_struct *t = ¤t->thread; |
871 | |
872 | /* When we have paired singles, we emulate in software */ |
873 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) |
874 | return RESUME_GUEST; |
875 | |
876 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
877 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
878 | return RESUME_GUEST; |
879 | } |
880 | |
881 | if (msr == MSR_VSX) { |
882 | /* No VSX? Give an illegal instruction interrupt */ |
883 | #ifdef CONFIG_VSX |
884 | if (!cpu_has_feature(CPU_FTR_VSX)) |
885 | #endif |
886 | { |
887 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
888 | return RESUME_GUEST; |
889 | } |
890 | |
891 | /* |
892 | * We have to load up all the FP and VMX registers before |
893 | * we can let the guest use VSX instructions. |
894 | */ |
895 | msr = MSR_FP | MSR_VEC | MSR_VSX; |
896 | } |
897 | |
898 | /* See if we already own all the ext(s) needed */ |
899 | msr &= ~vcpu->arch.guest_owned_ext; |
900 | if (!msr) |
901 | return RESUME_GUEST; |
902 | |
903 | #ifdef DEBUG_EXT |
904 | printk(KERN_INFO "Loading up ext 0x%lx\n" , msr); |
905 | #endif |
906 | |
907 | if (msr & MSR_FP) { |
908 | preempt_disable(); |
909 | enable_kernel_fp(); |
910 | load_fp_state(&vcpu->arch.fp); |
911 | disable_kernel_fp(); |
912 | t->fp_save_area = &vcpu->arch.fp; |
913 | preempt_enable(); |
914 | } |
915 | |
916 | if (msr & MSR_VEC) { |
917 | #ifdef CONFIG_ALTIVEC |
918 | preempt_disable(); |
919 | enable_kernel_altivec(); |
920 | load_vr_state(&vcpu->arch.vr); |
921 | disable_kernel_altivec(); |
922 | t->vr_save_area = &vcpu->arch.vr; |
923 | preempt_enable(); |
924 | #endif |
925 | } |
926 | |
927 | t->regs->msr |= msr; |
928 | vcpu->arch.guest_owned_ext |= msr; |
929 | kvmppc_recalc_shadow_msr(vcpu); |
930 | |
931 | return RESUME_GUEST; |
932 | } |
933 | |
934 | /* |
935 | * Kernel code using FP or VMX could have flushed guest state to |
936 | * the thread_struct; if so, get it back now. |
937 | */ |
938 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) |
939 | { |
940 | unsigned long lost_ext; |
941 | |
942 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; |
943 | if (!lost_ext) |
944 | return; |
945 | |
946 | if (lost_ext & MSR_FP) { |
947 | preempt_disable(); |
948 | enable_kernel_fp(); |
949 | load_fp_state(&vcpu->arch.fp); |
950 | disable_kernel_fp(); |
951 | preempt_enable(); |
952 | } |
953 | #ifdef CONFIG_ALTIVEC |
954 | if (lost_ext & MSR_VEC) { |
955 | preempt_disable(); |
956 | enable_kernel_altivec(); |
957 | load_vr_state(&vcpu->arch.vr); |
958 | disable_kernel_altivec(); |
959 | preempt_enable(); |
960 | } |
961 | #endif |
962 | current->thread.regs->msr |= lost_ext; |
963 | } |
964 | |
965 | #ifdef CONFIG_PPC_BOOK3S_64 |
966 | |
967 | void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) |
968 | { |
969 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ |
970 | vcpu->arch.fscr &= ~(0xffULL << 56); |
971 | vcpu->arch.fscr |= (fac << 56); |
972 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); |
973 | } |
974 | |
975 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) |
976 | { |
977 | enum emulation_result er = EMULATE_FAIL; |
978 | |
979 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) |
980 | er = kvmppc_emulate_instruction(vcpu); |
981 | |
982 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { |
983 | /* Couldn't emulate, trigger interrupt in guest */ |
984 | kvmppc_trigger_fac_interrupt(vcpu, fac); |
985 | } |
986 | } |
987 | |
988 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ |
989 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) |
990 | { |
991 | bool guest_fac_enabled; |
992 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); |
993 | |
994 | /* |
995 | * Not every facility is enabled by FSCR bits, check whether the |
996 | * guest has this facility enabled at all. |
997 | */ |
998 | switch (fac) { |
999 | case FSCR_TAR_LG: |
1000 | case FSCR_EBB_LG: |
1001 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); |
1002 | break; |
1003 | case FSCR_TM_LG: |
1004 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; |
1005 | break; |
1006 | default: |
1007 | guest_fac_enabled = false; |
1008 | break; |
1009 | } |
1010 | |
1011 | if (!guest_fac_enabled) { |
1012 | /* Facility not enabled by the guest */ |
1013 | kvmppc_trigger_fac_interrupt(vcpu, fac); |
1014 | return RESUME_GUEST; |
1015 | } |
1016 | |
1017 | switch (fac) { |
1018 | case FSCR_TAR_LG: |
1019 | /* TAR switching isn't lazy in Linux yet */ |
1020 | current->thread.tar = mfspr(SPRN_TAR); |
1021 | mtspr(SPRN_TAR, vcpu->arch.tar); |
1022 | vcpu->arch.shadow_fscr |= FSCR_TAR; |
1023 | break; |
1024 | default: |
1025 | kvmppc_emulate_fac(vcpu, fac); |
1026 | break; |
1027 | } |
1028 | |
1029 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1030 | /* Since we disabled MSR_TM at privilege state, the mfspr instruction |
1031 | * for TM spr can trigger TM fac unavailable. In this case, the |
1032 | * emulation is handled by kvmppc_emulate_fac(), which invokes |
1033 | * kvmppc_emulate_mfspr() finally. But note the mfspr can include |
1034 | * RT for NV registers. So it need to restore those NV reg to reflect |
1035 | * the update. |
1036 | */ |
1037 | if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) |
1038 | return RESUME_GUEST_NV; |
1039 | #endif |
1040 | |
1041 | return RESUME_GUEST; |
1042 | } |
1043 | |
1044 | void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) |
1045 | { |
1046 | if (fscr & FSCR_SCV) |
1047 | fscr &= ~FSCR_SCV; /* SCV must not be enabled */ |
1048 | /* Prohibit prefixed instructions for now */ |
1049 | fscr &= ~FSCR_PREFIX; |
1050 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { |
1051 | /* TAR got dropped, drop it in shadow too */ |
1052 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
1053 | } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { |
1054 | vcpu->arch.fscr = fscr; |
1055 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); |
1056 | return; |
1057 | } |
1058 | |
1059 | vcpu->arch.fscr = fscr; |
1060 | } |
1061 | #endif |
1062 | |
1063 | static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) |
1064 | { |
1065 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
1066 | u64 msr = kvmppc_get_msr(vcpu); |
1067 | |
1068 | kvmppc_set_msr(vcpu, msr | MSR_SE); |
1069 | } |
1070 | } |
1071 | |
1072 | static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) |
1073 | { |
1074 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
1075 | u64 msr = kvmppc_get_msr(vcpu); |
1076 | |
1077 | kvmppc_set_msr(vcpu, msr & ~MSR_SE); |
1078 | } |
1079 | } |
1080 | |
1081 | static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
1082 | { |
1083 | enum emulation_result er; |
1084 | ulong flags; |
1085 | ppc_inst_t last_inst; |
1086 | int emul, r; |
1087 | |
1088 | /* |
1089 | * shadow_srr1 only contains valid flags if we came here via a program |
1090 | * exception. The other exceptions (emulation assist, FP unavailable, |
1091 | * etc.) do not provide flags in SRR1, so use an illegal-instruction |
1092 | * exception when injecting a program interrupt into the guest. |
1093 | */ |
1094 | if (exit_nr == BOOK3S_INTERRUPT_PROGRAM) |
1095 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
1096 | else |
1097 | flags = SRR1_PROGILL; |
1098 | |
1099 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
1100 | if (emul != EMULATE_DONE) |
1101 | return RESUME_GUEST; |
1102 | |
1103 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
1104 | #ifdef EXIT_DEBUG |
1105 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n" , |
1106 | kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); |
1107 | #endif |
1108 | if ((ppc_inst_val(last_inst) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { |
1109 | kvmppc_core_queue_program(vcpu, flags); |
1110 | return RESUME_GUEST; |
1111 | } |
1112 | } |
1113 | |
1114 | vcpu->stat.emulated_inst_exits++; |
1115 | er = kvmppc_emulate_instruction(vcpu); |
1116 | switch (er) { |
1117 | case EMULATE_DONE: |
1118 | r = RESUME_GUEST_NV; |
1119 | break; |
1120 | case EMULATE_AGAIN: |
1121 | r = RESUME_GUEST; |
1122 | break; |
1123 | case EMULATE_FAIL: |
1124 | pr_crit("%s: emulation at %lx failed (%08x)\n" , |
1125 | __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); |
1126 | kvmppc_core_queue_program(vcpu, flags); |
1127 | r = RESUME_GUEST; |
1128 | break; |
1129 | case EMULATE_DO_MMIO: |
1130 | vcpu->run->exit_reason = KVM_EXIT_MMIO; |
1131 | r = RESUME_HOST_NV; |
1132 | break; |
1133 | case EMULATE_EXIT_USER: |
1134 | r = RESUME_HOST_NV; |
1135 | break; |
1136 | default: |
1137 | BUG(); |
1138 | } |
1139 | |
1140 | return r; |
1141 | } |
1142 | |
1143 | int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
1144 | { |
1145 | struct kvm_run *run = vcpu->run; |
1146 | int r = RESUME_HOST; |
1147 | int s; |
1148 | |
1149 | vcpu->stat.sum_exits++; |
1150 | |
1151 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1152 | run->ready_for_interrupt_injection = 1; |
1153 | |
1154 | /* We get here with MSR.EE=1 */ |
1155 | |
1156 | trace_kvm_exit(exit_nr, vcpu); |
1157 | guest_exit(); |
1158 | |
1159 | switch (exit_nr) { |
1160 | case BOOK3S_INTERRUPT_INST_STORAGE: |
1161 | { |
1162 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
1163 | vcpu->stat.pf_instruc++; |
1164 | |
1165 | if (kvmppc_is_split_real(vcpu)) |
1166 | kvmppc_fixup_split_real(vcpu); |
1167 | |
1168 | #ifdef CONFIG_PPC_BOOK3S_32 |
1169 | /* We set segments as unused segments when invalidating them. So |
1170 | * treat the respective fault as segment fault. */ |
1171 | { |
1172 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
1173 | u32 sr; |
1174 | |
1175 | svcpu = svcpu_get(vcpu); |
1176 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; |
1177 | svcpu_put(svcpu); |
1178 | if (sr == SR_INVALID) { |
1179 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
1180 | r = RESUME_GUEST; |
1181 | break; |
1182 | } |
1183 | } |
1184 | #endif |
1185 | |
1186 | /* only care about PTEG not found errors, but leave NX alone */ |
1187 | if (shadow_srr1 & 0x40000000) { |
1188 | int idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
1189 | r = kvmppc_handle_pagefault(vcpu, eaddr: kvmppc_get_pc(vcpu), vec: exit_nr); |
1190 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
1191 | vcpu->stat.sp_instruc++; |
1192 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
1193 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
1194 | /* |
1195 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, |
1196 | * so we can't use the NX bit inside the guest. Let's cross our fingers, |
1197 | * that no guest that needs the dcbz hack does NX. |
1198 | */ |
1199 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
1200 | r = RESUME_GUEST; |
1201 | } else { |
1202 | kvmppc_core_queue_inst_storage(vcpu, |
1203 | shadow_srr1 & 0x58000000); |
1204 | r = RESUME_GUEST; |
1205 | } |
1206 | break; |
1207 | } |
1208 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
1209 | { |
1210 | ulong dar = kvmppc_get_fault_dar(vcpu); |
1211 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
1212 | vcpu->stat.pf_storage++; |
1213 | |
1214 | #ifdef CONFIG_PPC_BOOK3S_32 |
1215 | /* We set segments as unused segments when invalidating them. So |
1216 | * treat the respective fault as segment fault. */ |
1217 | { |
1218 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
1219 | u32 sr; |
1220 | |
1221 | svcpu = svcpu_get(vcpu); |
1222 | sr = svcpu->sr[dar >> SID_SHIFT]; |
1223 | svcpu_put(svcpu); |
1224 | if (sr == SR_INVALID) { |
1225 | kvmppc_mmu_map_segment(vcpu, dar); |
1226 | r = RESUME_GUEST; |
1227 | break; |
1228 | } |
1229 | } |
1230 | #endif |
1231 | |
1232 | /* |
1233 | * We need to handle missing shadow PTEs, and |
1234 | * protection faults due to us mapping a page read-only |
1235 | * when the guest thinks it is writable. |
1236 | */ |
1237 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { |
1238 | int idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
1239 | r = kvmppc_handle_pagefault(vcpu, eaddr: dar, vec: exit_nr); |
1240 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
1241 | } else { |
1242 | kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr); |
1243 | r = RESUME_GUEST; |
1244 | } |
1245 | break; |
1246 | } |
1247 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
1248 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
1249 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
1250 | kvmppc_book3s_queue_irqprio(vcpu, |
1251 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
1252 | } |
1253 | r = RESUME_GUEST; |
1254 | break; |
1255 | case BOOK3S_INTERRUPT_INST_SEGMENT: |
1256 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { |
1257 | kvmppc_book3s_queue_irqprio(vcpu, |
1258 | BOOK3S_INTERRUPT_INST_SEGMENT); |
1259 | } |
1260 | r = RESUME_GUEST; |
1261 | break; |
1262 | /* We're good on these - the host merely wanted to get our attention */ |
1263 | case BOOK3S_INTERRUPT_DECREMENTER: |
1264 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
1265 | case BOOK3S_INTERRUPT_DOORBELL: |
1266 | case BOOK3S_INTERRUPT_H_DOORBELL: |
1267 | vcpu->stat.dec_exits++; |
1268 | r = RESUME_GUEST; |
1269 | break; |
1270 | case BOOK3S_INTERRUPT_EXTERNAL: |
1271 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
1272 | case BOOK3S_INTERRUPT_H_VIRT: |
1273 | vcpu->stat.ext_intr_exits++; |
1274 | r = RESUME_GUEST; |
1275 | break; |
1276 | case BOOK3S_INTERRUPT_HMI: |
1277 | case BOOK3S_INTERRUPT_PERFMON: |
1278 | case BOOK3S_INTERRUPT_SYSTEM_RESET: |
1279 | r = RESUME_GUEST; |
1280 | break; |
1281 | case BOOK3S_INTERRUPT_PROGRAM: |
1282 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
1283 | r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
1284 | break; |
1285 | case BOOK3S_INTERRUPT_SYSCALL: |
1286 | { |
1287 | ppc_inst_t last_sc; |
1288 | int emul; |
1289 | |
1290 | /* Get last sc for papr */ |
1291 | if (vcpu->arch.papr_enabled) { |
1292 | /* The sc instruction points SRR0 to the next inst */ |
1293 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); |
1294 | if (emul != EMULATE_DONE) { |
1295 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); |
1296 | r = RESUME_GUEST; |
1297 | break; |
1298 | } |
1299 | } |
1300 | |
1301 | if (vcpu->arch.papr_enabled && |
1302 | (ppc_inst_val(last_sc) == 0x44000022) && |
1303 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
1304 | /* SC 1 papr hypercalls */ |
1305 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
1306 | int i; |
1307 | |
1308 | #ifdef CONFIG_PPC_BOOK3S_64 |
1309 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
1310 | r = RESUME_GUEST; |
1311 | break; |
1312 | } |
1313 | #endif |
1314 | |
1315 | run->papr_hcall.nr = cmd; |
1316 | for (i = 0; i < 9; ++i) { |
1317 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); |
1318 | run->papr_hcall.args[i] = gpr; |
1319 | } |
1320 | run->exit_reason = KVM_EXIT_PAPR_HCALL; |
1321 | vcpu->arch.hcall_needed = 1; |
1322 | r = RESUME_HOST; |
1323 | } else if (vcpu->arch.osi_enabled && |
1324 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
1325 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { |
1326 | /* MOL hypercalls */ |
1327 | u64 *gprs = run->osi.gprs; |
1328 | int i; |
1329 | |
1330 | run->exit_reason = KVM_EXIT_OSI; |
1331 | for (i = 0; i < 32; i++) |
1332 | gprs[i] = kvmppc_get_gpr(vcpu, i); |
1333 | vcpu->arch.osi_needed = 1; |
1334 | r = RESUME_HOST_NV; |
1335 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
1336 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
1337 | /* KVM PV hypercalls */ |
1338 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); |
1339 | r = RESUME_GUEST; |
1340 | } else { |
1341 | /* Guest syscalls */ |
1342 | vcpu->stat.syscall_exits++; |
1343 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1344 | r = RESUME_GUEST; |
1345 | } |
1346 | break; |
1347 | } |
1348 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1349 | case BOOK3S_INTERRUPT_ALTIVEC: |
1350 | case BOOK3S_INTERRUPT_VSX: |
1351 | { |
1352 | int ext_msr = 0; |
1353 | int emul; |
1354 | ppc_inst_t last_inst; |
1355 | |
1356 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { |
1357 | /* Do paired single instruction emulation */ |
1358 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1359 | &last_inst); |
1360 | if (emul == EMULATE_DONE) |
1361 | r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
1362 | else |
1363 | r = RESUME_GUEST; |
1364 | |
1365 | break; |
1366 | } |
1367 | |
1368 | /* Enable external provider */ |
1369 | switch (exit_nr) { |
1370 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1371 | ext_msr = MSR_FP; |
1372 | break; |
1373 | |
1374 | case BOOK3S_INTERRUPT_ALTIVEC: |
1375 | ext_msr = MSR_VEC; |
1376 | break; |
1377 | |
1378 | case BOOK3S_INTERRUPT_VSX: |
1379 | ext_msr = MSR_VSX; |
1380 | break; |
1381 | } |
1382 | |
1383 | r = kvmppc_handle_ext(vcpu, exit_nr, msr: ext_msr); |
1384 | break; |
1385 | } |
1386 | case BOOK3S_INTERRUPT_ALIGNMENT: |
1387 | { |
1388 | ppc_inst_t last_inst; |
1389 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
1390 | |
1391 | if (emul == EMULATE_DONE) { |
1392 | u32 dsisr; |
1393 | u64 dar; |
1394 | |
1395 | dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst)); |
1396 | dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst)); |
1397 | |
1398 | kvmppc_set_dsisr(vcpu, dsisr); |
1399 | kvmppc_set_dar(vcpu, dar); |
1400 | |
1401 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1402 | } |
1403 | r = RESUME_GUEST; |
1404 | break; |
1405 | } |
1406 | #ifdef CONFIG_PPC_BOOK3S_64 |
1407 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: |
1408 | r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
1409 | break; |
1410 | #endif |
1411 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
1412 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1413 | r = RESUME_GUEST; |
1414 | break; |
1415 | case BOOK3S_INTERRUPT_TRACE: |
1416 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
1417 | run->exit_reason = KVM_EXIT_DEBUG; |
1418 | r = RESUME_HOST; |
1419 | } else { |
1420 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1421 | r = RESUME_GUEST; |
1422 | } |
1423 | break; |
1424 | default: |
1425 | { |
1426 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
1427 | /* Ugh - bork here! What did we get? */ |
1428 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n" , |
1429 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
1430 | r = RESUME_HOST; |
1431 | BUG(); |
1432 | break; |
1433 | } |
1434 | } |
1435 | |
1436 | if (!(r & RESUME_HOST)) { |
1437 | /* To avoid clobbering exit_reason, only check for signals if |
1438 | * we aren't already exiting to userspace for some other |
1439 | * reason. */ |
1440 | |
1441 | /* |
1442 | * Interrupts could be timers for the guest which we have to |
1443 | * inject again, so let's postpone them until we're in the guest |
1444 | * and if we really did time things so badly, then we just exit |
1445 | * again due to a host external interrupt. |
1446 | */ |
1447 | s = kvmppc_prepare_to_enter(vcpu); |
1448 | if (s <= 0) |
1449 | r = s; |
1450 | else { |
1451 | /* interrupts now hard-disabled */ |
1452 | kvmppc_fix_ee_before_entry(); |
1453 | } |
1454 | |
1455 | kvmppc_handle_lost_ext(vcpu); |
1456 | } |
1457 | |
1458 | trace_kvm_book3s_reenter(r, vcpu); |
1459 | |
1460 | return r; |
1461 | } |
1462 | |
1463 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
1464 | struct kvm_sregs *sregs) |
1465 | { |
1466 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
1467 | int i; |
1468 | |
1469 | sregs->pvr = vcpu->arch.pvr; |
1470 | |
1471 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; |
1472 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
1473 | for (i = 0; i < 64; i++) { |
1474 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; |
1475 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; |
1476 | } |
1477 | } else { |
1478 | for (i = 0; i < 16; i++) |
1479 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
1480 | |
1481 | for (i = 0; i < 8; i++) { |
1482 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; |
1483 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; |
1484 | } |
1485 | } |
1486 | |
1487 | return 0; |
1488 | } |
1489 | |
1490 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
1491 | struct kvm_sregs *sregs) |
1492 | { |
1493 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
1494 | int i; |
1495 | |
1496 | kvmppc_set_pvr_pr(vcpu, pvr: sregs->pvr); |
1497 | |
1498 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
1499 | #ifdef CONFIG_PPC_BOOK3S_64 |
1500 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
1501 | /* Flush all SLB entries */ |
1502 | vcpu->arch.mmu.slbmte(vcpu, 0, 0); |
1503 | vcpu->arch.mmu.slbia(vcpu); |
1504 | |
1505 | for (i = 0; i < 64; i++) { |
1506 | u64 rb = sregs->u.s.ppc64.slb[i].slbe; |
1507 | u64 rs = sregs->u.s.ppc64.slb[i].slbv; |
1508 | |
1509 | if (rb & SLB_ESID_V) |
1510 | vcpu->arch.mmu.slbmte(vcpu, rs, rb); |
1511 | } |
1512 | } else |
1513 | #endif |
1514 | { |
1515 | for (i = 0; i < 16; i++) { |
1516 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); |
1517 | } |
1518 | for (i = 0; i < 8; i++) { |
1519 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, |
1520 | (u32)sregs->u.s.ppc32.ibat[i]); |
1521 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, |
1522 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); |
1523 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, |
1524 | (u32)sregs->u.s.ppc32.dbat[i]); |
1525 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, |
1526 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); |
1527 | } |
1528 | } |
1529 | |
1530 | /* Flush the MMU after messing with the segments */ |
1531 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
1532 | |
1533 | return 0; |
1534 | } |
1535 | |
1536 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1537 | union kvmppc_one_reg *val) |
1538 | { |
1539 | int r = 0; |
1540 | |
1541 | switch (id) { |
1542 | case KVM_REG_PPC_DEBUG_INST: |
1543 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); |
1544 | break; |
1545 | case KVM_REG_PPC_HIOR: |
1546 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
1547 | break; |
1548 | case KVM_REG_PPC_VTB: |
1549 | *val = get_reg_val(id, to_book3s(vcpu)->vtb); |
1550 | break; |
1551 | case KVM_REG_PPC_LPCR: |
1552 | case KVM_REG_PPC_LPCR_64: |
1553 | /* |
1554 | * We are only interested in the LPCR_ILE bit |
1555 | */ |
1556 | if (vcpu->arch.intr_msr & MSR_LE) |
1557 | *val = get_reg_val(id, LPCR_ILE); |
1558 | else |
1559 | *val = get_reg_val(id, 0); |
1560 | break; |
1561 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1562 | case KVM_REG_PPC_TFHAR: |
1563 | *val = get_reg_val(id, vcpu->arch.tfhar); |
1564 | break; |
1565 | case KVM_REG_PPC_TFIAR: |
1566 | *val = get_reg_val(id, vcpu->arch.tfiar); |
1567 | break; |
1568 | case KVM_REG_PPC_TEXASR: |
1569 | *val = get_reg_val(id, vcpu->arch.texasr); |
1570 | break; |
1571 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: |
1572 | *val = get_reg_val(id, |
1573 | vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); |
1574 | break; |
1575 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: |
1576 | { |
1577 | int i, j; |
1578 | |
1579 | i = id - KVM_REG_PPC_TM_VSR0; |
1580 | if (i < 32) |
1581 | for (j = 0; j < TS_FPRWIDTH; j++) |
1582 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; |
1583 | else { |
1584 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
1585 | val->vval = vcpu->arch.vr_tm.vr[i-32]; |
1586 | else |
1587 | r = -ENXIO; |
1588 | } |
1589 | break; |
1590 | } |
1591 | case KVM_REG_PPC_TM_CR: |
1592 | *val = get_reg_val(id, vcpu->arch.cr_tm); |
1593 | break; |
1594 | case KVM_REG_PPC_TM_XER: |
1595 | *val = get_reg_val(id, vcpu->arch.xer_tm); |
1596 | break; |
1597 | case KVM_REG_PPC_TM_LR: |
1598 | *val = get_reg_val(id, vcpu->arch.lr_tm); |
1599 | break; |
1600 | case KVM_REG_PPC_TM_CTR: |
1601 | *val = get_reg_val(id, vcpu->arch.ctr_tm); |
1602 | break; |
1603 | case KVM_REG_PPC_TM_FPSCR: |
1604 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); |
1605 | break; |
1606 | case KVM_REG_PPC_TM_AMR: |
1607 | *val = get_reg_val(id, vcpu->arch.amr_tm); |
1608 | break; |
1609 | case KVM_REG_PPC_TM_PPR: |
1610 | *val = get_reg_val(id, vcpu->arch.ppr_tm); |
1611 | break; |
1612 | case KVM_REG_PPC_TM_VRSAVE: |
1613 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); |
1614 | break; |
1615 | case KVM_REG_PPC_TM_VSCR: |
1616 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
1617 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); |
1618 | else |
1619 | r = -ENXIO; |
1620 | break; |
1621 | case KVM_REG_PPC_TM_DSCR: |
1622 | *val = get_reg_val(id, vcpu->arch.dscr_tm); |
1623 | break; |
1624 | case KVM_REG_PPC_TM_TAR: |
1625 | *val = get_reg_val(id, vcpu->arch.tar_tm); |
1626 | break; |
1627 | #endif |
1628 | default: |
1629 | r = -EINVAL; |
1630 | break; |
1631 | } |
1632 | |
1633 | return r; |
1634 | } |
1635 | |
1636 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
1637 | { |
1638 | if (new_lpcr & LPCR_ILE) |
1639 | vcpu->arch.intr_msr |= MSR_LE; |
1640 | else |
1641 | vcpu->arch.intr_msr &= ~MSR_LE; |
1642 | } |
1643 | |
1644 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1645 | union kvmppc_one_reg *val) |
1646 | { |
1647 | int r = 0; |
1648 | |
1649 | switch (id) { |
1650 | case KVM_REG_PPC_HIOR: |
1651 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1652 | to_book3s(vcpu)->hior_explicit = true; |
1653 | break; |
1654 | case KVM_REG_PPC_VTB: |
1655 | to_book3s(vcpu)->vtb = set_reg_val(id, *val); |
1656 | break; |
1657 | case KVM_REG_PPC_LPCR: |
1658 | case KVM_REG_PPC_LPCR_64: |
1659 | kvmppc_set_lpcr_pr(vcpu, new_lpcr: set_reg_val(id, *val)); |
1660 | break; |
1661 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1662 | case KVM_REG_PPC_TFHAR: |
1663 | vcpu->arch.tfhar = set_reg_val(id, *val); |
1664 | break; |
1665 | case KVM_REG_PPC_TFIAR: |
1666 | vcpu->arch.tfiar = set_reg_val(id, *val); |
1667 | break; |
1668 | case KVM_REG_PPC_TEXASR: |
1669 | vcpu->arch.texasr = set_reg_val(id, *val); |
1670 | break; |
1671 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: |
1672 | vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = |
1673 | set_reg_val(id, *val); |
1674 | break; |
1675 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: |
1676 | { |
1677 | int i, j; |
1678 | |
1679 | i = id - KVM_REG_PPC_TM_VSR0; |
1680 | if (i < 32) |
1681 | for (j = 0; j < TS_FPRWIDTH; j++) |
1682 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; |
1683 | else |
1684 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
1685 | vcpu->arch.vr_tm.vr[i-32] = val->vval; |
1686 | else |
1687 | r = -ENXIO; |
1688 | break; |
1689 | } |
1690 | case KVM_REG_PPC_TM_CR: |
1691 | vcpu->arch.cr_tm = set_reg_val(id, *val); |
1692 | break; |
1693 | case KVM_REG_PPC_TM_XER: |
1694 | vcpu->arch.xer_tm = set_reg_val(id, *val); |
1695 | break; |
1696 | case KVM_REG_PPC_TM_LR: |
1697 | vcpu->arch.lr_tm = set_reg_val(id, *val); |
1698 | break; |
1699 | case KVM_REG_PPC_TM_CTR: |
1700 | vcpu->arch.ctr_tm = set_reg_val(id, *val); |
1701 | break; |
1702 | case KVM_REG_PPC_TM_FPSCR: |
1703 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); |
1704 | break; |
1705 | case KVM_REG_PPC_TM_AMR: |
1706 | vcpu->arch.amr_tm = set_reg_val(id, *val); |
1707 | break; |
1708 | case KVM_REG_PPC_TM_PPR: |
1709 | vcpu->arch.ppr_tm = set_reg_val(id, *val); |
1710 | break; |
1711 | case KVM_REG_PPC_TM_VRSAVE: |
1712 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); |
1713 | break; |
1714 | case KVM_REG_PPC_TM_VSCR: |
1715 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
1716 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); |
1717 | else |
1718 | r = -ENXIO; |
1719 | break; |
1720 | case KVM_REG_PPC_TM_DSCR: |
1721 | vcpu->arch.dscr_tm = set_reg_val(id, *val); |
1722 | break; |
1723 | case KVM_REG_PPC_TM_TAR: |
1724 | vcpu->arch.tar_tm = set_reg_val(id, *val); |
1725 | break; |
1726 | #endif |
1727 | default: |
1728 | r = -EINVAL; |
1729 | break; |
1730 | } |
1731 | |
1732 | return r; |
1733 | } |
1734 | |
1735 | static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) |
1736 | { |
1737 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1738 | unsigned long p; |
1739 | int err; |
1740 | |
1741 | err = -ENOMEM; |
1742 | |
1743 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1744 | if (!vcpu_book3s) |
1745 | goto out; |
1746 | vcpu->arch.book3s = vcpu_book3s; |
1747 | |
1748 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
1749 | vcpu->arch.shadow_vcpu = |
1750 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); |
1751 | if (!vcpu->arch.shadow_vcpu) |
1752 | goto free_vcpu3s; |
1753 | #endif |
1754 | |
1755 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
1756 | if (!p) |
1757 | goto free_shadow_vcpu; |
1758 | vcpu->arch.shared = (void *)p; |
1759 | #ifdef CONFIG_PPC_BOOK3S_64 |
1760 | /* Always start the shared struct in native endian mode */ |
1761 | #ifdef __BIG_ENDIAN__ |
1762 | vcpu->arch.shared_big_endian = true; |
1763 | #else |
1764 | vcpu->arch.shared_big_endian = false; |
1765 | #endif |
1766 | |
1767 | /* |
1768 | * Default to the same as the host if we're on sufficiently |
1769 | * recent machine that we have 1TB segments; |
1770 | * otherwise default to PPC970FX. |
1771 | */ |
1772 | vcpu->arch.pvr = 0x3C0301; |
1773 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1774 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
1775 | vcpu->arch.intr_msr = MSR_SF; |
1776 | #else |
1777 | /* default to book3s_32 (750) */ |
1778 | vcpu->arch.pvr = 0x84202; |
1779 | vcpu->arch.intr_msr = 0; |
1780 | #endif |
1781 | kvmppc_set_pvr_pr(vcpu, pvr: vcpu->arch.pvr); |
1782 | vcpu->arch.slb_nr = 64; |
1783 | |
1784 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
1785 | |
1786 | err = kvmppc_mmu_init_pr(vcpu); |
1787 | if (err < 0) |
1788 | goto free_shared_page; |
1789 | |
1790 | return 0; |
1791 | |
1792 | free_shared_page: |
1793 | free_page((unsigned long)vcpu->arch.shared); |
1794 | free_shadow_vcpu: |
1795 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
1796 | kfree(vcpu->arch.shadow_vcpu); |
1797 | free_vcpu3s: |
1798 | #endif |
1799 | vfree(addr: vcpu_book3s); |
1800 | out: |
1801 | return err; |
1802 | } |
1803 | |
1804 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
1805 | { |
1806 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1807 | |
1808 | kvmppc_mmu_destroy_pr(vcpu); |
1809 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); |
1810 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
1811 | kfree(vcpu->arch.shadow_vcpu); |
1812 | #endif |
1813 | vfree(addr: vcpu_book3s); |
1814 | } |
1815 | |
1816 | static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) |
1817 | { |
1818 | int ret; |
1819 | |
1820 | /* Check if we can run the vcpu at all */ |
1821 | if (!vcpu->arch.sane) { |
1822 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1823 | ret = -EINVAL; |
1824 | goto out; |
1825 | } |
1826 | |
1827 | kvmppc_setup_debug(vcpu); |
1828 | |
1829 | /* |
1830 | * Interrupts could be timers for the guest which we have to inject |
1831 | * again, so let's postpone them until we're in the guest and if we |
1832 | * really did time things so badly, then we just exit again due to |
1833 | * a host external interrupt. |
1834 | */ |
1835 | ret = kvmppc_prepare_to_enter(vcpu); |
1836 | if (ret <= 0) |
1837 | goto out; |
1838 | /* interrupts now hard-disabled */ |
1839 | |
1840 | /* Save FPU, Altivec and VSX state */ |
1841 | giveup_all(current); |
1842 | |
1843 | /* Preload FPU if it's enabled */ |
1844 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
1845 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1846 | |
1847 | kvmppc_fix_ee_before_entry(); |
1848 | |
1849 | ret = __kvmppc_vcpu_run(vcpu); |
1850 | |
1851 | kvmppc_clear_debug(vcpu); |
1852 | |
1853 | /* No need for guest_exit. It's done in handle_exit. |
1854 | We also get here with interrupts enabled. */ |
1855 | |
1856 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
1857 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1858 | |
1859 | /* Make sure we save the guest TAR/EBB/DSCR state */ |
1860 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
1861 | |
1862 | srr_regs_clobbered(); |
1863 | out: |
1864 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1865 | return ret; |
1866 | } |
1867 | |
1868 | /* |
1869 | * Get (and clear) the dirty memory log for a memory slot. |
1870 | */ |
1871 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1872 | struct kvm_dirty_log *log) |
1873 | { |
1874 | struct kvm_memory_slot *memslot; |
1875 | struct kvm_vcpu *vcpu; |
1876 | ulong ga, ga_end; |
1877 | int is_dirty = 0; |
1878 | int r; |
1879 | unsigned long n; |
1880 | |
1881 | mutex_lock(&kvm->slots_lock); |
1882 | |
1883 | r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); |
1884 | if (r) |
1885 | goto out; |
1886 | |
1887 | /* If nothing is dirty, don't bother messing with page tables. */ |
1888 | if (is_dirty) { |
1889 | ga = memslot->base_gfn << PAGE_SHIFT; |
1890 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
1891 | |
1892 | kvm_for_each_vcpu(n, vcpu, kvm) |
1893 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
1894 | |
1895 | n = kvm_dirty_bitmap_bytes(memslot); |
1896 | memset(memslot->dirty_bitmap, 0, n); |
1897 | } |
1898 | |
1899 | r = 0; |
1900 | out: |
1901 | mutex_unlock(lock: &kvm->slots_lock); |
1902 | return r; |
1903 | } |
1904 | |
1905 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1906 | struct kvm_memory_slot *memslot) |
1907 | { |
1908 | return; |
1909 | } |
1910 | |
1911 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
1912 | const struct kvm_memory_slot *old, |
1913 | struct kvm_memory_slot *new, |
1914 | enum kvm_mr_change change) |
1915 | { |
1916 | return 0; |
1917 | } |
1918 | |
1919 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
1920 | struct kvm_memory_slot *old, |
1921 | const struct kvm_memory_slot *new, |
1922 | enum kvm_mr_change change) |
1923 | { |
1924 | return; |
1925 | } |
1926 | |
1927 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot) |
1928 | { |
1929 | return; |
1930 | } |
1931 | |
1932 | #ifdef CONFIG_PPC64 |
1933 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1934 | struct kvm_ppc_smmu_info *info) |
1935 | { |
1936 | long int i; |
1937 | struct kvm_vcpu *vcpu; |
1938 | |
1939 | info->flags = 0; |
1940 | |
1941 | /* SLB is always 64 entries */ |
1942 | info->slb_size = 64; |
1943 | |
1944 | /* Standard 4k base page size segment */ |
1945 | info->sps[0].page_shift = 12; |
1946 | info->sps[0].slb_enc = 0; |
1947 | info->sps[0].enc[0].page_shift = 12; |
1948 | info->sps[0].enc[0].pte_enc = 0; |
1949 | |
1950 | /* |
1951 | * 64k large page size. |
1952 | * We only want to put this in if the CPUs we're emulating |
1953 | * support it, but unfortunately we don't have a vcpu easily |
1954 | * to hand here to test. Just pick the first vcpu, and if |
1955 | * that doesn't exist yet, report the minimum capability, |
1956 | * i.e., no 64k pages. |
1957 | * 1T segment support goes along with 64k pages. |
1958 | */ |
1959 | i = 1; |
1960 | vcpu = kvm_get_vcpu(kvm, 0); |
1961 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { |
1962 | info->flags = KVM_PPC_1T_SEGMENTS; |
1963 | info->sps[i].page_shift = 16; |
1964 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; |
1965 | info->sps[i].enc[0].page_shift = 16; |
1966 | info->sps[i].enc[0].pte_enc = 1; |
1967 | ++i; |
1968 | } |
1969 | |
1970 | /* Standard 16M large page size segment */ |
1971 | info->sps[i].page_shift = 24; |
1972 | info->sps[i].slb_enc = SLB_VSID_L; |
1973 | info->sps[i].enc[0].page_shift = 24; |
1974 | info->sps[i].enc[0].pte_enc = 0; |
1975 | |
1976 | return 0; |
1977 | } |
1978 | |
1979 | static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) |
1980 | { |
1981 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
1982 | return -ENODEV; |
1983 | /* Require flags and process table base and size to all be zero. */ |
1984 | if (cfg->flags || cfg->process_table) |
1985 | return -EINVAL; |
1986 | return 0; |
1987 | } |
1988 | |
1989 | #else |
1990 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1991 | struct kvm_ppc_smmu_info *info) |
1992 | { |
1993 | /* We should not get called */ |
1994 | BUG(); |
1995 | return 0; |
1996 | } |
1997 | #endif /* CONFIG_PPC64 */ |
1998 | |
1999 | static unsigned int kvm_global_user_count = 0; |
2000 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); |
2001 | |
2002 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
2003 | { |
2004 | mutex_init(&kvm->arch.hpt_mutex); |
2005 | |
2006 | #ifdef CONFIG_PPC_BOOK3S_64 |
2007 | /* Start out with the default set of hcalls enabled */ |
2008 | kvmppc_pr_init_default_hcalls(kvm); |
2009 | #endif |
2010 | |
2011 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
2012 | spin_lock(lock: &kvm_global_user_count_lock); |
2013 | if (++kvm_global_user_count == 1) |
2014 | pseries_disable_reloc_on_exc(); |
2015 | spin_unlock(lock: &kvm_global_user_count_lock); |
2016 | } |
2017 | return 0; |
2018 | } |
2019 | |
2020 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
2021 | { |
2022 | #ifdef CONFIG_PPC64 |
2023 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
2024 | #endif |
2025 | |
2026 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
2027 | spin_lock(lock: &kvm_global_user_count_lock); |
2028 | BUG_ON(kvm_global_user_count == 0); |
2029 | if (--kvm_global_user_count == 0) |
2030 | pseries_enable_reloc_on_exc(); |
2031 | spin_unlock(lock: &kvm_global_user_count_lock); |
2032 | } |
2033 | } |
2034 | |
2035 | static int kvmppc_core_check_processor_compat_pr(void) |
2036 | { |
2037 | /* |
2038 | * PR KVM can work on POWER9 inside a guest partition |
2039 | * running in HPT mode. It can't work if we are using |
2040 | * radix translation (because radix provides no way for |
2041 | * a process to have unique translations in quadrant 3). |
2042 | */ |
2043 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) |
2044 | return -EIO; |
2045 | return 0; |
2046 | } |
2047 | |
2048 | static int kvm_arch_vm_ioctl_pr(struct file *filp, |
2049 | unsigned int ioctl, unsigned long arg) |
2050 | { |
2051 | return -ENOTTY; |
2052 | } |
2053 | |
2054 | static struct kvmppc_ops kvm_ops_pr = { |
2055 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
2056 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, |
2057 | .get_one_reg = kvmppc_get_one_reg_pr, |
2058 | .set_one_reg = kvmppc_set_one_reg_pr, |
2059 | .vcpu_load = kvmppc_core_vcpu_load_pr, |
2060 | .vcpu_put = kvmppc_core_vcpu_put_pr, |
2061 | .inject_interrupt = kvmppc_inject_interrupt_pr, |
2062 | .set_msr = kvmppc_set_msr_pr, |
2063 | .vcpu_run = kvmppc_vcpu_run_pr, |
2064 | .vcpu_create = kvmppc_core_vcpu_create_pr, |
2065 | .vcpu_free = kvmppc_core_vcpu_free_pr, |
2066 | .check_requests = kvmppc_core_check_requests_pr, |
2067 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, |
2068 | .flush_memslot = kvmppc_core_flush_memslot_pr, |
2069 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, |
2070 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, |
2071 | .unmap_gfn_range = kvm_unmap_gfn_range_pr, |
2072 | .age_gfn = kvm_age_gfn_pr, |
2073 | .test_age_gfn = kvm_test_age_gfn_pr, |
2074 | .set_spte_gfn = kvm_set_spte_gfn_pr, |
2075 | .free_memslot = kvmppc_core_free_memslot_pr, |
2076 | .init_vm = kvmppc_core_init_vm_pr, |
2077 | .destroy_vm = kvmppc_core_destroy_vm_pr, |
2078 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
2079 | .emulate_op = kvmppc_core_emulate_op_pr, |
2080 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, |
2081 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, |
2082 | .fast_vcpu_kick = kvm_vcpu_kick, |
2083 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, |
2084 | #ifdef CONFIG_PPC_BOOK3S_64 |
2085 | .hcall_implemented = kvmppc_hcall_impl_pr, |
2086 | .configure_mmu = kvm_configure_mmu_pr, |
2087 | #endif |
2088 | .giveup_ext = kvmppc_giveup_ext, |
2089 | }; |
2090 | |
2091 | |
2092 | int kvmppc_book3s_init_pr(void) |
2093 | { |
2094 | int r; |
2095 | |
2096 | r = kvmppc_core_check_processor_compat_pr(); |
2097 | if (r < 0) |
2098 | return r; |
2099 | |
2100 | kvm_ops_pr.owner = THIS_MODULE; |
2101 | kvmppc_pr_ops = &kvm_ops_pr; |
2102 | |
2103 | r = kvmppc_mmu_hpte_sysinit(); |
2104 | return r; |
2105 | } |
2106 | |
2107 | void kvmppc_book3s_exit_pr(void) |
2108 | { |
2109 | kvmppc_pr_ops = NULL; |
2110 | kvmppc_mmu_hpte_sysexit(); |
2111 | } |
2112 | |
2113 | /* |
2114 | * We only support separate modules for book3s 64 |
2115 | */ |
2116 | #ifdef CONFIG_PPC_BOOK3S_64 |
2117 | |
2118 | module_init(kvmppc_book3s_init_pr); |
2119 | module_exit(kvmppc_book3s_exit_pr); |
2120 | |
2121 | MODULE_LICENSE("GPL" ); |
2122 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
2123 | MODULE_ALIAS("devname:kvm" ); |
2124 | #endif |
2125 | |