1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
4 | */ |
5 | |
6 | #include <linux/cpu.h> |
7 | #include <linux/kvm_host.h> |
8 | #include <linux/preempt.h> |
9 | #include <linux/export.h> |
10 | #include <linux/sched.h> |
11 | #include <linux/spinlock.h> |
12 | #include <linux/init.h> |
13 | #include <linux/memblock.h> |
14 | #include <linux/sizes.h> |
15 | #include <linux/cma.h> |
16 | #include <linux/bitops.h> |
17 | |
18 | #include <asm/cputable.h> |
19 | #include <asm/interrupt.h> |
20 | #include <asm/kvm_ppc.h> |
21 | #include <asm/kvm_book3s.h> |
22 | #include <asm/machdep.h> |
23 | #include <asm/xics.h> |
24 | #include <asm/xive.h> |
25 | #include <asm/dbell.h> |
26 | #include <asm/cputhreads.h> |
27 | #include <asm/io.h> |
28 | #include <asm/opal.h> |
29 | #include <asm/smp.h> |
30 | |
31 | #define KVM_CMA_CHUNK_ORDER 18 |
32 | |
33 | #include "book3s_xics.h" |
34 | #include "book3s_xive.h" |
35 | #include "book3s_hv.h" |
36 | |
37 | /* |
38 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) |
39 | * should be power of 2. |
40 | */ |
41 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ |
42 | /* |
43 | * By default we reserve 5% of memory for hash pagetable allocation. |
44 | */ |
45 | static unsigned long kvm_cma_resv_ratio = 5; |
46 | |
47 | static struct cma *kvm_cma; |
48 | |
49 | static int __init early_parse_kvm_cma_resv(char *p) |
50 | { |
51 | pr_debug("%s(%s)\n" , __func__, p); |
52 | if (!p) |
53 | return -EINVAL; |
54 | return kstrtoul(s: p, base: 0, res: &kvm_cma_resv_ratio); |
55 | } |
56 | early_param("kvm_cma_resv_ratio" , early_parse_kvm_cma_resv); |
57 | |
58 | struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) |
59 | { |
60 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
61 | |
62 | return cma_alloc(cma: kvm_cma, count: nr_pages, order_base_2(HPT_ALIGN_PAGES), |
63 | no_warn: false); |
64 | } |
65 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); |
66 | |
67 | void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) |
68 | { |
69 | cma_release(cma: kvm_cma, pages: page, count: nr_pages); |
70 | } |
71 | EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); |
72 | |
73 | /** |
74 | * kvm_cma_reserve() - reserve area for kvm hash pagetable |
75 | * |
76 | * This function reserves memory from early allocator. It should be |
77 | * called by arch specific code once the memblock allocator |
78 | * has been activated and all other subsystems have already allocated/reserved |
79 | * memory. |
80 | */ |
81 | void __init kvm_cma_reserve(void) |
82 | { |
83 | unsigned long align_size; |
84 | phys_addr_t selected_size; |
85 | |
86 | /* |
87 | * We need CMA reservation only when we are in HV mode |
88 | */ |
89 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
90 | return; |
91 | |
92 | selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); |
93 | if (selected_size) { |
94 | pr_info("%s: reserving %ld MiB for global area\n" , __func__, |
95 | (unsigned long)selected_size / SZ_1M); |
96 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
97 | cma_declare_contiguous(base: 0, size: selected_size, limit: 0, alignment: align_size, |
98 | KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, fixed: false, name: "kvm_cma" , |
99 | res_cma: &kvm_cma); |
100 | } |
101 | } |
102 | |
103 | /* |
104 | * Real-mode H_CONFER implementation. |
105 | * We check if we are the only vcpu out of this virtual core |
106 | * still running in the guest and not ceded. If so, we pop up |
107 | * to the virtual-mode implementation; if not, just return to |
108 | * the guest. |
109 | */ |
110 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, |
111 | unsigned int yield_count) |
112 | { |
113 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
114 | int ptid = local_paca->kvm_hstate.ptid; |
115 | int threads_running; |
116 | int threads_ceded; |
117 | int threads_conferring; |
118 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; |
119 | int rv = H_SUCCESS; /* => don't yield */ |
120 | |
121 | set_bit(nr: ptid, addr: &vc->conferring_threads); |
122 | while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { |
123 | threads_running = VCORE_ENTRY_MAP(vc); |
124 | threads_ceded = vc->napping_threads; |
125 | threads_conferring = vc->conferring_threads; |
126 | if ((threads_ceded | threads_conferring) == threads_running) { |
127 | rv = H_TOO_HARD; /* => do yield */ |
128 | break; |
129 | } |
130 | } |
131 | clear_bit(nr: ptid, addr: &vc->conferring_threads); |
132 | return rv; |
133 | } |
134 | |
135 | /* |
136 | * When running HV mode KVM we need to block certain operations while KVM VMs |
137 | * exist in the system. We use a counter of VMs to track this. |
138 | * |
139 | * One of the operations we need to block is onlining of secondaries, so we |
140 | * protect hv_vm_count with cpus_read_lock/unlock(). |
141 | */ |
142 | static atomic_t hv_vm_count; |
143 | |
144 | void kvm_hv_vm_activated(void) |
145 | { |
146 | cpus_read_lock(); |
147 | atomic_inc(v: &hv_vm_count); |
148 | cpus_read_unlock(); |
149 | } |
150 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); |
151 | |
152 | void kvm_hv_vm_deactivated(void) |
153 | { |
154 | cpus_read_lock(); |
155 | atomic_dec(v: &hv_vm_count); |
156 | cpus_read_unlock(); |
157 | } |
158 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); |
159 | |
160 | bool kvm_hv_mode_active(void) |
161 | { |
162 | return atomic_read(v: &hv_vm_count) != 0; |
163 | } |
164 | |
165 | extern int hcall_real_table[], hcall_real_table_end[]; |
166 | |
167 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) |
168 | { |
169 | cmd /= 4; |
170 | if (cmd < hcall_real_table_end - hcall_real_table && |
171 | hcall_real_table[cmd]) |
172 | return 1; |
173 | |
174 | return 0; |
175 | } |
176 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); |
177 | |
178 | int kvmppc_hwrng_present(void) |
179 | { |
180 | return ppc_md.get_random_seed != NULL; |
181 | } |
182 | EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); |
183 | |
184 | long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) |
185 | { |
186 | unsigned long rand; |
187 | |
188 | if (ppc_md.get_random_seed && |
189 | ppc_md.get_random_seed(&rand)) { |
190 | kvmppc_set_gpr(vcpu, 4, rand); |
191 | return H_SUCCESS; |
192 | } |
193 | |
194 | return H_HARDWARE; |
195 | } |
196 | |
197 | /* |
198 | * Send an interrupt or message to another CPU. |
199 | * The caller needs to include any barrier needed to order writes |
200 | * to memory vs. the IPI/message. |
201 | */ |
202 | void kvmhv_rm_send_ipi(int cpu) |
203 | { |
204 | void __iomem *xics_phys; |
205 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
206 | |
207 | /* On POWER9 we can use msgsnd for any destination cpu. */ |
208 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
209 | msg |= get_hard_smp_processor_id(cpu); |
210 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); |
211 | return; |
212 | } |
213 | |
214 | /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ |
215 | if (cpu_has_feature(CPU_FTR_ARCH_207S) && |
216 | cpu_first_thread_sibling(cpu) == |
217 | cpu_first_thread_sibling(raw_smp_processor_id())) { |
218 | msg |= cpu_thread_in_core(cpu); |
219 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); |
220 | return; |
221 | } |
222 | |
223 | /* We should never reach this */ |
224 | if (WARN_ON_ONCE(xics_on_xive())) |
225 | return; |
226 | |
227 | /* Else poke the target with an IPI */ |
228 | xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; |
229 | if (xics_phys) |
230 | __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); |
231 | else |
232 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); |
233 | } |
234 | |
235 | /* |
236 | * The following functions are called from the assembly code |
237 | * in book3s_hv_rmhandlers.S. |
238 | */ |
239 | static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) |
240 | { |
241 | int cpu = vc->pcpu; |
242 | |
243 | /* Order setting of exit map vs. msgsnd/IPI */ |
244 | smp_mb(); |
245 | for (; active; active >>= 1, ++cpu) |
246 | if (active & 1) |
247 | kvmhv_rm_send_ipi(cpu); |
248 | } |
249 | |
250 | void kvmhv_commence_exit(int trap) |
251 | { |
252 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
253 | int ptid = local_paca->kvm_hstate.ptid; |
254 | struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; |
255 | int me, ee, i; |
256 | |
257 | /* Set our bit in the threads-exiting-guest map in the 0xff00 |
258 | bits of vcore->entry_exit_map */ |
259 | me = 0x100 << ptid; |
260 | do { |
261 | ee = vc->entry_exit_map; |
262 | } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); |
263 | |
264 | /* Are we the first here? */ |
265 | if ((ee >> 8) != 0) |
266 | return; |
267 | |
268 | /* |
269 | * Trigger the other threads in this vcore to exit the guest. |
270 | * If this is a hypervisor decrementer interrupt then they |
271 | * will be already on their way out of the guest. |
272 | */ |
273 | if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) |
274 | kvmhv_interrupt_vcore(vc, active: ee & ~(1 << ptid)); |
275 | |
276 | /* |
277 | * If we are doing dynamic micro-threading, interrupt the other |
278 | * subcores to pull them out of their guests too. |
279 | */ |
280 | if (!sip) |
281 | return; |
282 | |
283 | for (i = 0; i < MAX_SUBCORES; ++i) { |
284 | vc = sip->vc[i]; |
285 | if (!vc) |
286 | break; |
287 | do { |
288 | ee = vc->entry_exit_map; |
289 | /* Already asked to exit? */ |
290 | if ((ee >> 8) != 0) |
291 | break; |
292 | } while (cmpxchg(&vc->entry_exit_map, ee, |
293 | ee | VCORE_EXIT_REQ) != ee); |
294 | if ((ee >> 8) == 0) |
295 | kvmhv_interrupt_vcore(vc, active: ee); |
296 | } |
297 | } |
298 | |
299 | struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; |
300 | EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); |
301 | |
302 | #ifdef CONFIG_KVM_XICS |
303 | static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, |
304 | u32 xisr) |
305 | { |
306 | int i; |
307 | |
308 | /* |
309 | * We access the mapped array here without a lock. That |
310 | * is safe because we never reduce the number of entries |
311 | * in the array and we never change the v_hwirq field of |
312 | * an entry once it is set. |
313 | * |
314 | * We have also carefully ordered the stores in the writer |
315 | * and the loads here in the reader, so that if we find a matching |
316 | * hwirq here, the associated GSI and irq_desc fields are valid. |
317 | */ |
318 | for (i = 0; i < pimap->n_mapped; i++) { |
319 | if (xisr == pimap->mapped[i].r_hwirq) { |
320 | /* |
321 | * Order subsequent reads in the caller to serialize |
322 | * with the writer. |
323 | */ |
324 | smp_rmb(); |
325 | return &pimap->mapped[i]; |
326 | } |
327 | } |
328 | return NULL; |
329 | } |
330 | |
331 | /* |
332 | * If we have an interrupt that's not an IPI, check if we have a |
333 | * passthrough adapter and if so, check if this external interrupt |
334 | * is for the adapter. |
335 | * We will attempt to deliver the IRQ directly to the target VCPU's |
336 | * ICP, the virtual ICP (based on affinity - the xive value in ICS). |
337 | * |
338 | * If the delivery fails or if this is not for a passthrough adapter, |
339 | * return to the host to handle this interrupt. We earlier |
340 | * saved a copy of the XIRR in the PACA, it will be picked up by |
341 | * the host ICP driver. |
342 | */ |
343 | static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
344 | { |
345 | struct kvmppc_passthru_irqmap *pimap; |
346 | struct kvmppc_irq_map *irq_map; |
347 | struct kvm_vcpu *vcpu; |
348 | |
349 | vcpu = local_paca->kvm_hstate.kvm_vcpu; |
350 | if (!vcpu) |
351 | return 1; |
352 | pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); |
353 | if (!pimap) |
354 | return 1; |
355 | irq_map = get_irqmap(pimap, xisr); |
356 | if (!irq_map) |
357 | return 1; |
358 | |
359 | /* We're handling this interrupt, generic code doesn't need to */ |
360 | local_paca->kvm_hstate.saved_xirr = 0; |
361 | |
362 | return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); |
363 | } |
364 | |
365 | #else |
366 | static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
367 | { |
368 | return 1; |
369 | } |
370 | #endif |
371 | |
372 | /* |
373 | * Determine what sort of external interrupt is pending (if any). |
374 | * Returns: |
375 | * 0 if no interrupt is pending |
376 | * 1 if an interrupt is pending that needs to be handled by the host |
377 | * 2 Passthrough that needs completion in the host |
378 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
379 | * -2 if there is PCI passthrough external interrupt that was handled |
380 | */ |
381 | static long kvmppc_read_one_intr(bool *again); |
382 | |
383 | long kvmppc_read_intr(void) |
384 | { |
385 | long ret = 0; |
386 | long rc; |
387 | bool again; |
388 | |
389 | if (xive_enabled()) |
390 | return 1; |
391 | |
392 | do { |
393 | again = false; |
394 | rc = kvmppc_read_one_intr(again: &again); |
395 | if (rc && (ret == 0 || rc > ret)) |
396 | ret = rc; |
397 | } while (again); |
398 | return ret; |
399 | } |
400 | |
401 | static long kvmppc_read_one_intr(bool *again) |
402 | { |
403 | void __iomem *xics_phys; |
404 | u32 h_xirr; |
405 | __be32 xirr; |
406 | u32 xisr; |
407 | u8 host_ipi; |
408 | int64_t rc; |
409 | |
410 | if (xive_enabled()) |
411 | return 1; |
412 | |
413 | /* see if a host IPI is pending */ |
414 | host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); |
415 | if (host_ipi) |
416 | return 1; |
417 | |
418 | /* Now read the interrupt from the ICP */ |
419 | xics_phys = local_paca->kvm_hstate.xics_phys; |
420 | rc = 0; |
421 | if (!xics_phys) |
422 | rc = opal_int_get_xirr(&xirr, false); |
423 | else |
424 | xirr = __raw_rm_readl(xics_phys + XICS_XIRR); |
425 | if (rc < 0) |
426 | return 1; |
427 | |
428 | /* |
429 | * Save XIRR for later. Since we get control in reverse endian |
430 | * on LE systems, save it byte reversed and fetch it back in |
431 | * host endian. Note that xirr is the value read from the |
432 | * XIRR register, while h_xirr is the host endian version. |
433 | */ |
434 | h_xirr = be32_to_cpu(xirr); |
435 | local_paca->kvm_hstate.saved_xirr = h_xirr; |
436 | xisr = h_xirr & 0xffffff; |
437 | /* |
438 | * Ensure that the store/load complete to guarantee all side |
439 | * effects of loading from XIRR has completed |
440 | */ |
441 | smp_mb(); |
442 | |
443 | /* if nothing pending in the ICP */ |
444 | if (!xisr) |
445 | return 0; |
446 | |
447 | /* We found something in the ICP... |
448 | * |
449 | * If it is an IPI, clear the MFRR and EOI it. |
450 | */ |
451 | if (xisr == XICS_IPI) { |
452 | rc = 0; |
453 | if (xics_phys) { |
454 | __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); |
455 | __raw_rm_writel(xirr, xics_phys + XICS_XIRR); |
456 | } else { |
457 | opal_int_set_mfrr(hard_smp_processor_id(), 0xff); |
458 | rc = opal_int_eoi(h_xirr); |
459 | } |
460 | /* If rc > 0, there is another interrupt pending */ |
461 | *again = rc > 0; |
462 | |
463 | /* |
464 | * Need to ensure side effects of above stores |
465 | * complete before proceeding. |
466 | */ |
467 | smp_mb(); |
468 | |
469 | /* |
470 | * We need to re-check host IPI now in case it got set in the |
471 | * meantime. If it's clear, we bounce the interrupt to the |
472 | * guest |
473 | */ |
474 | host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi); |
475 | if (unlikely(host_ipi != 0)) { |
476 | /* We raced with the host, |
477 | * we need to resend that IPI, bummer |
478 | */ |
479 | if (xics_phys) |
480 | __raw_rm_writeb(IPI_PRIORITY, |
481 | xics_phys + XICS_MFRR); |
482 | else |
483 | opal_int_set_mfrr(hard_smp_processor_id(), |
484 | IPI_PRIORITY); |
485 | /* Let side effects complete */ |
486 | smp_mb(); |
487 | return 1; |
488 | } |
489 | |
490 | /* OK, it's an IPI for us */ |
491 | local_paca->kvm_hstate.saved_xirr = 0; |
492 | return -1; |
493 | } |
494 | |
495 | return kvmppc_check_passthru(xisr, xirr, again); |
496 | } |
497 | |
498 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu) |
499 | { |
500 | vcpu->arch.ceded = 0; |
501 | if (vcpu->arch.timer_running) { |
502 | hrtimer_try_to_cancel(timer: &vcpu->arch.dec_timer); |
503 | vcpu->arch.timer_running = 0; |
504 | } |
505 | } |
506 | |
507 | void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
508 | { |
509 | /* Guest must always run with ME enabled, HV disabled. */ |
510 | msr = (msr | MSR_ME) & ~MSR_HV; |
511 | |
512 | /* |
513 | * Check for illegal transactional state bit combination |
514 | * and if we find it, force the TS field to a safe state. |
515 | */ |
516 | if ((msr & MSR_TS_MASK) == MSR_TS_MASK) |
517 | msr &= ~MSR_TS_MASK; |
518 | __kvmppc_set_msr_hv(vcpu, val: msr); |
519 | kvmppc_end_cede(vcpu); |
520 | } |
521 | EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); |
522 | |
523 | static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) |
524 | { |
525 | unsigned long msr, pc, new_msr, new_pc; |
526 | |
527 | msr = kvmppc_get_msr(vcpu); |
528 | pc = kvmppc_get_pc(vcpu); |
529 | new_msr = vcpu->arch.intr_msr; |
530 | new_pc = vec; |
531 | |
532 | /* If transactional, change to suspend mode on IRQ delivery */ |
533 | if (MSR_TM_TRANSACTIONAL(msr)) |
534 | new_msr |= MSR_TS_S; |
535 | else |
536 | new_msr |= msr & MSR_TS_MASK; |
537 | |
538 | /* |
539 | * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and |
540 | * applicable. AIL=2 is not supported. |
541 | * |
542 | * AIL does not apply to SRESET, MCE, or HMI (which is never |
543 | * delivered to the guest), and does not apply if IR=0 or DR=0. |
544 | */ |
545 | if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && |
546 | vec != BOOK3S_INTERRUPT_MACHINE_CHECK && |
547 | (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && |
548 | (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { |
549 | new_msr |= MSR_IR | MSR_DR; |
550 | new_pc += 0xC000000000004000ULL; |
551 | } |
552 | |
553 | kvmppc_set_srr0(vcpu, pc); |
554 | kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); |
555 | kvmppc_set_pc(vcpu, new_pc); |
556 | __kvmppc_set_msr_hv(vcpu, val: new_msr); |
557 | } |
558 | |
559 | void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) |
560 | { |
561 | inject_interrupt(vcpu, vec, srr1_flags); |
562 | kvmppc_end_cede(vcpu); |
563 | } |
564 | EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); |
565 | |
566 | /* |
567 | * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? |
568 | * Can we inject a Decrementer or a External interrupt? |
569 | */ |
570 | void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) |
571 | { |
572 | int ext; |
573 | unsigned long lpcr; |
574 | |
575 | WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)); |
576 | |
577 | /* Insert EXTERNAL bit into LPCR at the MER bit position */ |
578 | ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; |
579 | lpcr = mfspr(SPRN_LPCR); |
580 | lpcr |= ext << LPCR_MER_SH; |
581 | mtspr(SPRN_LPCR, lpcr); |
582 | isync(); |
583 | |
584 | if (vcpu->arch.shregs.msr & MSR_EE) { |
585 | if (ext) { |
586 | inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); |
587 | } else { |
588 | long int dec = mfspr(SPRN_DEC); |
589 | if (!(lpcr & LPCR_LD)) |
590 | dec = (int) dec; |
591 | if (dec < 0) |
592 | inject_interrupt(vcpu, |
593 | BOOK3S_INTERRUPT_DECREMENTER, 0); |
594 | } |
595 | } |
596 | |
597 | if (vcpu->arch.doorbell_request) { |
598 | mtspr(SPRN_DPDES, 1); |
599 | vcpu->arch.vcore->dpdes = 1; |
600 | smp_wmb(); |
601 | vcpu->arch.doorbell_request = 0; |
602 | } |
603 | } |
604 | |
605 | static void flush_guest_tlb(struct kvm *kvm) |
606 | { |
607 | unsigned long rb, set; |
608 | |
609 | rb = PPC_BIT(52); /* IS = 2 */ |
610 | for (set = 0; set < kvm->arch.tlb_sets; ++set) { |
611 | /* R=0 PRS=0 RIC=0 */ |
612 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
613 | : : "r" (rb), "i" (0), "i" (0), "i" (0), |
614 | "r" (0) : "memory" ); |
615 | rb += PPC_BIT(51); /* increment set number */ |
616 | } |
617 | asm volatile("ptesync" : : :"memory" ); |
618 | } |
619 | |
620 | void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu) |
621 | { |
622 | if (cpumask_test_cpu(cpu: pcpu, cpumask: &kvm->arch.need_tlb_flush)) { |
623 | flush_guest_tlb(kvm); |
624 | |
625 | /* Clear the bit after the TLB flush */ |
626 | cpumask_clear_cpu(cpu: pcpu, dstp: &kvm->arch.need_tlb_flush); |
627 | } |
628 | } |
629 | EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); |
630 | |