1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2012 Michael Ellerman, IBM Corporation. |
4 | * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/kvm_host.h> |
9 | #include <linux/err.h> |
10 | #include <linux/kernel_stat.h> |
11 | #include <linux/pgtable.h> |
12 | |
13 | #include <asm/kvm_book3s.h> |
14 | #include <asm/kvm_ppc.h> |
15 | #include <asm/hvcall.h> |
16 | #include <asm/xics.h> |
17 | #include <asm/synch.h> |
18 | #include <asm/cputhreads.h> |
19 | #include <asm/ppc-opcode.h> |
20 | #include <asm/pnv-pci.h> |
21 | #include <asm/opal.h> |
22 | #include <asm/smp.h> |
23 | |
24 | #include "book3s_xics.h" |
25 | |
26 | #define DEBUG_PASSUP |
27 | |
28 | int h_ipi_redirect = 1; |
29 | EXPORT_SYMBOL(h_ipi_redirect); |
30 | int kvm_irq_bypass = 1; |
31 | EXPORT_SYMBOL(kvm_irq_bypass); |
32 | |
33 | static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
34 | u32 new_irq, bool check_resend); |
35 | static int xics_opal_set_server(unsigned int hw_irq, int server_cpu); |
36 | |
37 | /* -- ICS routines -- */ |
38 | static void ics_rm_check_resend(struct kvmppc_xics *xics, |
39 | struct kvmppc_ics *ics, struct kvmppc_icp *icp) |
40 | { |
41 | int i; |
42 | |
43 | for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { |
44 | struct ics_irq_state *state = &ics->irq_state[i]; |
45 | if (state->resend) |
46 | icp_rm_deliver_irq(xics, icp, new_irq: state->number, check_resend: true); |
47 | } |
48 | |
49 | } |
50 | |
51 | /* -- ICP routines -- */ |
52 | |
53 | #ifdef CONFIG_SMP |
54 | static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) |
55 | { |
56 | int hcpu; |
57 | |
58 | hcpu = hcore << threads_shift; |
59 | kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; |
60 | smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); |
61 | kvmppc_set_host_ipi(hcpu); |
62 | smp_mb(); |
63 | kvmhv_rm_send_ipi(hcpu); |
64 | } |
65 | #else |
66 | static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { } |
67 | #endif |
68 | |
69 | /* |
70 | * We start the search from our current CPU Id in the core map |
71 | * and go in a circle until we get back to our ID looking for a |
72 | * core that is running in host context and that hasn't already |
73 | * been targeted for another rm_host_ops. |
74 | * |
75 | * In the future, could consider using a fairer algorithm (one |
76 | * that distributes the IPIs better) |
77 | * |
78 | * Returns -1, if no CPU could be found in the host |
79 | * Else, returns a CPU Id which has been reserved for use |
80 | */ |
81 | static inline int grab_next_hostcore(int start, |
82 | struct kvmppc_host_rm_core *rm_core, int max, int action) |
83 | { |
84 | bool success; |
85 | int core; |
86 | union kvmppc_rm_state old, new; |
87 | |
88 | for (core = start + 1; core < max; core++) { |
89 | old = new = READ_ONCE(rm_core[core].rm_state); |
90 | |
91 | if (!old.in_host || old.rm_action) |
92 | continue; |
93 | |
94 | /* Try to grab this host core if not taken already. */ |
95 | new.rm_action = action; |
96 | |
97 | success = cmpxchg64(&rm_core[core].rm_state.raw, |
98 | old.raw, new.raw) == old.raw; |
99 | if (success) { |
100 | /* |
101 | * Make sure that the store to the rm_action is made |
102 | * visible before we return to caller (and the |
103 | * subsequent store to rm_data) to synchronize with |
104 | * the IPI handler. |
105 | */ |
106 | smp_wmb(); |
107 | return core; |
108 | } |
109 | } |
110 | |
111 | return -1; |
112 | } |
113 | |
114 | static inline int find_available_hostcore(int action) |
115 | { |
116 | int core; |
117 | int my_core = smp_processor_id() >> threads_shift; |
118 | struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core; |
119 | |
120 | core = grab_next_hostcore(start: my_core, rm_core, max: cpu_nr_cores(), action); |
121 | if (core == -1) |
122 | core = grab_next_hostcore(start: core, rm_core, max: my_core, action); |
123 | |
124 | return core; |
125 | } |
126 | |
127 | static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, |
128 | struct kvm_vcpu *this_vcpu) |
129 | { |
130 | struct kvmppc_icp *this_icp = this_vcpu->arch.icp; |
131 | int cpu; |
132 | int hcore; |
133 | |
134 | /* Mark the target VCPU as having an interrupt pending */ |
135 | vcpu->stat.queue_intr++; |
136 | set_bit(nr: BOOK3S_IRQPRIO_EXTERNAL, addr: &vcpu->arch.pending_exceptions); |
137 | |
138 | /* Kick self ? Just set MER and return */ |
139 | if (vcpu == this_vcpu) { |
140 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER); |
141 | return; |
142 | } |
143 | |
144 | /* |
145 | * Check if the core is loaded, |
146 | * if not, find an available host core to post to wake the VCPU, |
147 | * if we can't find one, set up state to eventually return too hard. |
148 | */ |
149 | cpu = vcpu->arch.thread_cpu; |
150 | if (cpu < 0 || cpu >= nr_cpu_ids) { |
151 | hcore = -1; |
152 | if (kvmppc_host_rm_ops_hv && h_ipi_redirect) |
153 | hcore = find_available_hostcore(action: XICS_RM_KICK_VCPU); |
154 | if (hcore != -1) { |
155 | icp_send_hcore_msg(hcore, vcpu); |
156 | } else { |
157 | this_icp->rm_action |= XICS_RM_KICK_VCPU; |
158 | this_icp->rm_kick_target = vcpu; |
159 | } |
160 | return; |
161 | } |
162 | |
163 | smp_mb(); |
164 | kvmhv_rm_send_ipi(cpu); |
165 | } |
166 | |
167 | static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) |
168 | { |
169 | /* Note: Only called on self ! */ |
170 | clear_bit(nr: BOOK3S_IRQPRIO_EXTERNAL, addr: &vcpu->arch.pending_exceptions); |
171 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); |
172 | } |
173 | |
174 | static inline bool icp_rm_try_update(struct kvmppc_icp *icp, |
175 | union kvmppc_icp_state old, |
176 | union kvmppc_icp_state new) |
177 | { |
178 | struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu; |
179 | bool success; |
180 | |
181 | /* Calculate new output value */ |
182 | new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); |
183 | |
184 | /* Attempt atomic update */ |
185 | success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; |
186 | if (!success) |
187 | goto bail; |
188 | |
189 | /* |
190 | * Check for output state update |
191 | * |
192 | * Note that this is racy since another processor could be updating |
193 | * the state already. This is why we never clear the interrupt output |
194 | * here, we only ever set it. The clear only happens prior to doing |
195 | * an update and only by the processor itself. Currently we do it |
196 | * in Accept (H_XIRR) and Up_Cppr (H_XPPR). |
197 | * |
198 | * We also do not try to figure out whether the EE state has changed, |
199 | * we unconditionally set it if the new state calls for it. The reason |
200 | * for that is that we opportunistically remove the pending interrupt |
201 | * flag when raising CPPR, so we need to set it back here if an |
202 | * interrupt is still pending. |
203 | */ |
204 | if (new.out_ee) |
205 | icp_rm_set_vcpu_irq(vcpu: icp->vcpu, this_vcpu); |
206 | |
207 | /* Expose the state change for debug purposes */ |
208 | this_vcpu->arch.icp->rm_dbgstate = new; |
209 | this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; |
210 | |
211 | bail: |
212 | return success; |
213 | } |
214 | |
215 | static inline int check_too_hard(struct kvmppc_xics *xics, |
216 | struct kvmppc_icp *icp) |
217 | { |
218 | return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; |
219 | } |
220 | |
221 | static void icp_rm_check_resend(struct kvmppc_xics *xics, |
222 | struct kvmppc_icp *icp) |
223 | { |
224 | u32 icsid; |
225 | |
226 | /* Order this load with the test for need_resend in the caller */ |
227 | smp_rmb(); |
228 | for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { |
229 | struct kvmppc_ics *ics = xics->ics[icsid]; |
230 | |
231 | if (!test_and_clear_bit(nr: icsid, addr: icp->resend_map)) |
232 | continue; |
233 | if (!ics) |
234 | continue; |
235 | ics_rm_check_resend(xics, ics, icp); |
236 | } |
237 | } |
238 | |
239 | static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, |
240 | u32 *reject) |
241 | { |
242 | union kvmppc_icp_state old_state, new_state; |
243 | bool success; |
244 | |
245 | do { |
246 | old_state = new_state = READ_ONCE(icp->state); |
247 | |
248 | *reject = 0; |
249 | |
250 | /* See if we can deliver */ |
251 | success = new_state.cppr > priority && |
252 | new_state.mfrr > priority && |
253 | new_state.pending_pri > priority; |
254 | |
255 | /* |
256 | * If we can, check for a rejection and perform the |
257 | * delivery |
258 | */ |
259 | if (success) { |
260 | *reject = new_state.xisr; |
261 | new_state.xisr = irq; |
262 | new_state.pending_pri = priority; |
263 | } else { |
264 | /* |
265 | * If we failed to deliver we set need_resend |
266 | * so a subsequent CPPR state change causes us |
267 | * to try a new delivery. |
268 | */ |
269 | new_state.need_resend = true; |
270 | } |
271 | |
272 | } while (!icp_rm_try_update(icp, old: old_state, new: new_state)); |
273 | |
274 | return success; |
275 | } |
276 | |
277 | static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
278 | u32 new_irq, bool check_resend) |
279 | { |
280 | struct ics_irq_state *state; |
281 | struct kvmppc_ics *ics; |
282 | u32 reject; |
283 | u16 src; |
284 | |
285 | /* |
286 | * This is used both for initial delivery of an interrupt and |
287 | * for subsequent rejection. |
288 | * |
289 | * Rejection can be racy vs. resends. We have evaluated the |
290 | * rejection in an atomic ICP transaction which is now complete, |
291 | * so potentially the ICP can already accept the interrupt again. |
292 | * |
293 | * So we need to retry the delivery. Essentially the reject path |
294 | * boils down to a failed delivery. Always. |
295 | * |
296 | * Now the interrupt could also have moved to a different target, |
297 | * thus we may need to re-do the ICP lookup as well |
298 | */ |
299 | |
300 | again: |
301 | /* Get the ICS state and lock it */ |
302 | ics = kvmppc_xics_find_ics(xics, new_irq, &src); |
303 | if (!ics) { |
304 | /* Unsafe increment, but this does not need to be accurate */ |
305 | xics->err_noics++; |
306 | return; |
307 | } |
308 | state = &ics->irq_state[src]; |
309 | |
310 | /* Get a lock on the ICS */ |
311 | arch_spin_lock(&ics->lock); |
312 | |
313 | /* Get our server */ |
314 | if (!icp || state->server != icp->server_num) { |
315 | icp = kvmppc_xics_find_server(xics->kvm, state->server); |
316 | if (!icp) { |
317 | /* Unsafe increment again*/ |
318 | xics->err_noicp++; |
319 | goto out; |
320 | } |
321 | } |
322 | |
323 | if (check_resend) |
324 | if (!state->resend) |
325 | goto out; |
326 | |
327 | /* Clear the resend bit of that interrupt */ |
328 | state->resend = 0; |
329 | |
330 | /* |
331 | * If masked, bail out |
332 | * |
333 | * Note: PAPR doesn't mention anything about masked pending |
334 | * when doing a resend, only when doing a delivery. |
335 | * |
336 | * However that would have the effect of losing a masked |
337 | * interrupt that was rejected and isn't consistent with |
338 | * the whole masked_pending business which is about not |
339 | * losing interrupts that occur while masked. |
340 | * |
341 | * I don't differentiate normal deliveries and resends, this |
342 | * implementation will differ from PAPR and not lose such |
343 | * interrupts. |
344 | */ |
345 | if (state->priority == MASKED) { |
346 | state->masked_pending = 1; |
347 | goto out; |
348 | } |
349 | |
350 | /* |
351 | * Try the delivery, this will set the need_resend flag |
352 | * in the ICP as part of the atomic transaction if the |
353 | * delivery is not possible. |
354 | * |
355 | * Note that if successful, the new delivery might have itself |
356 | * rejected an interrupt that was "delivered" before we took the |
357 | * ics spin lock. |
358 | * |
359 | * In this case we do the whole sequence all over again for the |
360 | * new guy. We cannot assume that the rejected interrupt is less |
361 | * favored than the new one, and thus doesn't need to be delivered, |
362 | * because by the time we exit icp_rm_try_to_deliver() the target |
363 | * processor may well have already consumed & completed it, and thus |
364 | * the rejected interrupt might actually be already acceptable. |
365 | */ |
366 | if (icp_rm_try_to_deliver(icp, irq: new_irq, priority: state->priority, reject: &reject)) { |
367 | /* |
368 | * Delivery was successful, did we reject somebody else ? |
369 | */ |
370 | if (reject && reject != XICS_IPI) { |
371 | arch_spin_unlock(&ics->lock); |
372 | icp->n_reject++; |
373 | new_irq = reject; |
374 | check_resend = 0; |
375 | goto again; |
376 | } |
377 | } else { |
378 | /* |
379 | * We failed to deliver the interrupt we need to set the |
380 | * resend map bit and mark the ICS state as needing a resend |
381 | */ |
382 | state->resend = 1; |
383 | |
384 | /* |
385 | * Make sure when checking resend, we don't miss the resend |
386 | * if resend_map bit is seen and cleared. |
387 | */ |
388 | smp_wmb(); |
389 | set_bit(nr: ics->icsid, addr: icp->resend_map); |
390 | |
391 | /* |
392 | * If the need_resend flag got cleared in the ICP some time |
393 | * between icp_rm_try_to_deliver() atomic update and now, then |
394 | * we know it might have missed the resend_map bit. So we |
395 | * retry |
396 | */ |
397 | smp_mb(); |
398 | if (!icp->state.need_resend) { |
399 | state->resend = 0; |
400 | arch_spin_unlock(&ics->lock); |
401 | check_resend = 0; |
402 | goto again; |
403 | } |
404 | } |
405 | out: |
406 | arch_spin_unlock(&ics->lock); |
407 | } |
408 | |
409 | static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
410 | u8 new_cppr) |
411 | { |
412 | union kvmppc_icp_state old_state, new_state; |
413 | bool resend; |
414 | |
415 | /* |
416 | * This handles several related states in one operation: |
417 | * |
418 | * ICP State: Down_CPPR |
419 | * |
420 | * Load CPPR with new value and if the XISR is 0 |
421 | * then check for resends: |
422 | * |
423 | * ICP State: Resend |
424 | * |
425 | * If MFRR is more favored than CPPR, check for IPIs |
426 | * and notify ICS of a potential resend. This is done |
427 | * asynchronously (when used in real mode, we will have |
428 | * to exit here). |
429 | * |
430 | * We do not handle the complete Check_IPI as documented |
431 | * here. In the PAPR, this state will be used for both |
432 | * Set_MFRR and Down_CPPR. However, we know that we aren't |
433 | * changing the MFRR state here so we don't need to handle |
434 | * the case of an MFRR causing a reject of a pending irq, |
435 | * this will have been handled when the MFRR was set in the |
436 | * first place. |
437 | * |
438 | * Thus we don't have to handle rejects, only resends. |
439 | * |
440 | * When implementing real mode for HV KVM, resend will lead to |
441 | * a H_TOO_HARD return and the whole transaction will be handled |
442 | * in virtual mode. |
443 | */ |
444 | do { |
445 | old_state = new_state = READ_ONCE(icp->state); |
446 | |
447 | /* Down_CPPR */ |
448 | new_state.cppr = new_cppr; |
449 | |
450 | /* |
451 | * Cut down Resend / Check_IPI / IPI |
452 | * |
453 | * The logic is that we cannot have a pending interrupt |
454 | * trumped by an IPI at this point (see above), so we |
455 | * know that either the pending interrupt is already an |
456 | * IPI (in which case we don't care to override it) or |
457 | * it's either more favored than us or non existent |
458 | */ |
459 | if (new_state.mfrr < new_cppr && |
460 | new_state.mfrr <= new_state.pending_pri) { |
461 | new_state.pending_pri = new_state.mfrr; |
462 | new_state.xisr = XICS_IPI; |
463 | } |
464 | |
465 | /* Latch/clear resend bit */ |
466 | resend = new_state.need_resend; |
467 | new_state.need_resend = 0; |
468 | |
469 | } while (!icp_rm_try_update(icp, old: old_state, new: new_state)); |
470 | |
471 | /* |
472 | * Now handle resend checks. Those are asynchronous to the ICP |
473 | * state update in HW (ie bus transactions) so we can handle them |
474 | * separately here as well. |
475 | */ |
476 | if (resend) { |
477 | icp->n_check_resend++; |
478 | icp_rm_check_resend(xics, icp); |
479 | } |
480 | } |
481 | |
482 | unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu) |
483 | { |
484 | kvmppc_set_gpr(vcpu, 5, get_tb()); |
485 | return xics_rm_h_xirr(vcpu); |
486 | } |
487 | |
488 | unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) |
489 | { |
490 | union kvmppc_icp_state old_state, new_state; |
491 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
492 | struct kvmppc_icp *icp = vcpu->arch.icp; |
493 | u32 xirr; |
494 | |
495 | if (!xics || !xics->real_mode) |
496 | return H_TOO_HARD; |
497 | |
498 | /* First clear the interrupt */ |
499 | icp_rm_clr_vcpu_irq(vcpu: icp->vcpu); |
500 | |
501 | /* |
502 | * ICP State: Accept_Interrupt |
503 | * |
504 | * Return the pending interrupt (if any) along with the |
505 | * current CPPR, then clear the XISR & set CPPR to the |
506 | * pending priority |
507 | */ |
508 | do { |
509 | old_state = new_state = READ_ONCE(icp->state); |
510 | |
511 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); |
512 | if (!old_state.xisr) |
513 | break; |
514 | new_state.cppr = new_state.pending_pri; |
515 | new_state.pending_pri = 0xff; |
516 | new_state.xisr = 0; |
517 | |
518 | } while (!icp_rm_try_update(icp, old: old_state, new: new_state)); |
519 | |
520 | /* Return the result in GPR4 */ |
521 | kvmppc_set_gpr(vcpu, 4, xirr); |
522 | |
523 | return check_too_hard(xics, icp); |
524 | } |
525 | |
526 | int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
527 | unsigned long mfrr) |
528 | { |
529 | union kvmppc_icp_state old_state, new_state; |
530 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
531 | struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; |
532 | u32 reject; |
533 | bool resend; |
534 | bool local; |
535 | |
536 | if (!xics || !xics->real_mode) |
537 | return H_TOO_HARD; |
538 | |
539 | local = this_icp->server_num == server; |
540 | if (local) |
541 | icp = this_icp; |
542 | else |
543 | icp = kvmppc_xics_find_server(vcpu->kvm, server); |
544 | if (!icp) |
545 | return H_PARAMETER; |
546 | |
547 | /* |
548 | * ICP state: Set_MFRR |
549 | * |
550 | * If the CPPR is more favored than the new MFRR, then |
551 | * nothing needs to be done as there can be no XISR to |
552 | * reject. |
553 | * |
554 | * ICP state: Check_IPI |
555 | * |
556 | * If the CPPR is less favored, then we might be replacing |
557 | * an interrupt, and thus need to possibly reject it. |
558 | * |
559 | * ICP State: IPI |
560 | * |
561 | * Besides rejecting any pending interrupts, we also |
562 | * update XISR and pending_pri to mark IPI as pending. |
563 | * |
564 | * PAPR does not describe this state, but if the MFRR is being |
565 | * made less favored than its earlier value, there might be |
566 | * a previously-rejected interrupt needing to be resent. |
567 | * Ideally, we would want to resend only if |
568 | * prio(pending_interrupt) < mfrr && |
569 | * prio(pending_interrupt) < cppr |
570 | * where pending interrupt is the one that was rejected. But |
571 | * we don't have that state, so we simply trigger a resend |
572 | * whenever the MFRR is made less favored. |
573 | */ |
574 | do { |
575 | old_state = new_state = READ_ONCE(icp->state); |
576 | |
577 | /* Set_MFRR */ |
578 | new_state.mfrr = mfrr; |
579 | |
580 | /* Check_IPI */ |
581 | reject = 0; |
582 | resend = false; |
583 | if (mfrr < new_state.cppr) { |
584 | /* Reject a pending interrupt if not an IPI */ |
585 | if (mfrr <= new_state.pending_pri) { |
586 | reject = new_state.xisr; |
587 | new_state.pending_pri = mfrr; |
588 | new_state.xisr = XICS_IPI; |
589 | } |
590 | } |
591 | |
592 | if (mfrr > old_state.mfrr) { |
593 | resend = new_state.need_resend; |
594 | new_state.need_resend = 0; |
595 | } |
596 | } while (!icp_rm_try_update(icp, old: old_state, new: new_state)); |
597 | |
598 | /* Handle reject in real mode */ |
599 | if (reject && reject != XICS_IPI) { |
600 | this_icp->n_reject++; |
601 | icp_rm_deliver_irq(xics, icp, new_irq: reject, check_resend: false); |
602 | } |
603 | |
604 | /* Handle resends in real mode */ |
605 | if (resend) { |
606 | this_icp->n_check_resend++; |
607 | icp_rm_check_resend(xics, icp); |
608 | } |
609 | |
610 | return check_too_hard(xics, icp: this_icp); |
611 | } |
612 | |
613 | int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
614 | { |
615 | union kvmppc_icp_state old_state, new_state; |
616 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
617 | struct kvmppc_icp *icp = vcpu->arch.icp; |
618 | u32 reject; |
619 | |
620 | if (!xics || !xics->real_mode) |
621 | return H_TOO_HARD; |
622 | |
623 | /* |
624 | * ICP State: Set_CPPR |
625 | * |
626 | * We can safely compare the new value with the current |
627 | * value outside of the transaction as the CPPR is only |
628 | * ever changed by the processor on itself |
629 | */ |
630 | if (cppr > icp->state.cppr) { |
631 | icp_rm_down_cppr(xics, icp, new_cppr: cppr); |
632 | goto bail; |
633 | } else if (cppr == icp->state.cppr) |
634 | return H_SUCCESS; |
635 | |
636 | /* |
637 | * ICP State: Up_CPPR |
638 | * |
639 | * The processor is raising its priority, this can result |
640 | * in a rejection of a pending interrupt: |
641 | * |
642 | * ICP State: Reject_Current |
643 | * |
644 | * We can remove EE from the current processor, the update |
645 | * transaction will set it again if needed |
646 | */ |
647 | icp_rm_clr_vcpu_irq(vcpu: icp->vcpu); |
648 | |
649 | do { |
650 | old_state = new_state = READ_ONCE(icp->state); |
651 | |
652 | reject = 0; |
653 | new_state.cppr = cppr; |
654 | |
655 | if (cppr <= new_state.pending_pri) { |
656 | reject = new_state.xisr; |
657 | new_state.xisr = 0; |
658 | new_state.pending_pri = 0xff; |
659 | } |
660 | |
661 | } while (!icp_rm_try_update(icp, old: old_state, new: new_state)); |
662 | |
663 | /* |
664 | * Check for rejects. They are handled by doing a new delivery |
665 | * attempt (see comments in icp_rm_deliver_irq). |
666 | */ |
667 | if (reject && reject != XICS_IPI) { |
668 | icp->n_reject++; |
669 | icp_rm_deliver_irq(xics, icp, new_irq: reject, check_resend: false); |
670 | } |
671 | bail: |
672 | return check_too_hard(xics, icp); |
673 | } |
674 | |
675 | static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq) |
676 | { |
677 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
678 | struct kvmppc_icp *icp = vcpu->arch.icp; |
679 | struct kvmppc_ics *ics; |
680 | struct ics_irq_state *state; |
681 | u16 src; |
682 | u32 pq_old, pq_new; |
683 | |
684 | /* |
685 | * ICS EOI handling: For LSI, if P bit is still set, we need to |
686 | * resend it. |
687 | * |
688 | * For MSI, we move Q bit into P (and clear Q). If it is set, |
689 | * resend it. |
690 | */ |
691 | |
692 | ics = kvmppc_xics_find_ics(xics, irq, &src); |
693 | if (!ics) |
694 | goto bail; |
695 | |
696 | state = &ics->irq_state[src]; |
697 | |
698 | if (state->lsi) |
699 | pq_new = state->pq_state; |
700 | else |
701 | do { |
702 | pq_old = state->pq_state; |
703 | pq_new = pq_old >> 1; |
704 | } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); |
705 | |
706 | if (pq_new & PQ_PRESENTED) |
707 | icp_rm_deliver_irq(xics, NULL, new_irq: irq, check_resend: false); |
708 | |
709 | if (!hlist_empty(h: &vcpu->kvm->irq_ack_notifier_list)) { |
710 | icp->rm_action |= XICS_RM_NOTIFY_EOI; |
711 | icp->rm_eoied_irq = irq; |
712 | } |
713 | |
714 | /* Handle passthrough interrupts */ |
715 | if (state->host_irq) { |
716 | ++vcpu->stat.pthru_all; |
717 | if (state->intr_cpu != -1) { |
718 | int pcpu = raw_smp_processor_id(); |
719 | |
720 | pcpu = cpu_first_thread_sibling(pcpu); |
721 | ++vcpu->stat.pthru_host; |
722 | if (state->intr_cpu != pcpu) { |
723 | ++vcpu->stat.pthru_bad_aff; |
724 | xics_opal_set_server(hw_irq: state->host_irq, server_cpu: pcpu); |
725 | } |
726 | state->intr_cpu = -1; |
727 | } |
728 | } |
729 | |
730 | bail: |
731 | return check_too_hard(xics, icp); |
732 | } |
733 | |
734 | int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) |
735 | { |
736 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
737 | struct kvmppc_icp *icp = vcpu->arch.icp; |
738 | u32 irq = xirr & 0x00ffffff; |
739 | |
740 | if (!xics || !xics->real_mode) |
741 | return H_TOO_HARD; |
742 | |
743 | /* |
744 | * ICP State: EOI |
745 | * |
746 | * Note: If EOI is incorrectly used by SW to lower the CPPR |
747 | * value (ie more favored), we do not check for rejection of |
748 | * a pending interrupt, this is a SW error and PAPR specifies |
749 | * that we don't have to deal with it. |
750 | * |
751 | * The sending of an EOI to the ICS is handled after the |
752 | * CPPR update |
753 | * |
754 | * ICP State: Down_CPPR which we handle |
755 | * in a separate function as it's shared with H_CPPR. |
756 | */ |
757 | icp_rm_down_cppr(xics, icp, new_cppr: xirr >> 24); |
758 | |
759 | /* IPIs have no EOI */ |
760 | if (irq == XICS_IPI) |
761 | return check_too_hard(xics, icp); |
762 | |
763 | return ics_rm_eoi(vcpu, irq); |
764 | } |
765 | |
766 | static unsigned long eoi_rc; |
767 | |
768 | static void icp_eoi(struct irq_data *d, u32 hwirq, __be32 xirr, bool *again) |
769 | { |
770 | void __iomem *xics_phys; |
771 | int64_t rc; |
772 | |
773 | rc = pnv_opal_pci_msi_eoi(d); |
774 | |
775 | if (rc) |
776 | eoi_rc = rc; |
777 | |
778 | iosync(); |
779 | |
780 | /* EOI it */ |
781 | xics_phys = local_paca->kvm_hstate.xics_phys; |
782 | if (xics_phys) { |
783 | __raw_rm_writel(xirr, xics_phys + XICS_XIRR); |
784 | } else { |
785 | rc = opal_int_eoi(be32_to_cpu(xirr)); |
786 | *again = rc > 0; |
787 | } |
788 | } |
789 | |
790 | static int xics_opal_set_server(unsigned int hw_irq, int server_cpu) |
791 | { |
792 | unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2; |
793 | |
794 | return opal_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY); |
795 | } |
796 | |
797 | /* |
798 | * Increment a per-CPU 32-bit unsigned integer variable. |
799 | * Safe to call in real-mode. Handles vmalloc'ed addresses |
800 | * |
801 | * ToDo: Make this work for any integral type |
802 | */ |
803 | |
804 | static inline void this_cpu_inc_rm(unsigned int __percpu *addr) |
805 | { |
806 | unsigned long l; |
807 | unsigned int *raddr; |
808 | int cpu = smp_processor_id(); |
809 | |
810 | raddr = per_cpu_ptr(addr, cpu); |
811 | l = (unsigned long)raddr; |
812 | |
813 | if (get_region_id(l) == VMALLOC_REGION_ID) { |
814 | l = vmalloc_to_phys(raddr); |
815 | raddr = (unsigned int *)l; |
816 | } |
817 | ++*raddr; |
818 | } |
819 | |
820 | /* |
821 | * We don't try to update the flags in the irq_desc 'istate' field in |
822 | * here as would happen in the normal IRQ handling path for several reasons: |
823 | * - state flags represent internal IRQ state and are not expected to be |
824 | * updated outside the IRQ subsystem |
825 | * - more importantly, these are useful for edge triggered interrupts, |
826 | * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here |
827 | * and these states shouldn't apply to us. |
828 | * |
829 | * However, we do update irq_stats - we somewhat duplicate the code in |
830 | * kstat_incr_irqs_this_cpu() for this since this function is defined |
831 | * in irq/internal.h which we don't want to include here. |
832 | * The only difference is that desc->kstat_irqs is an allocated per CPU |
833 | * variable and could have been vmalloc'ed, so we can't directly |
834 | * call __this_cpu_inc() on it. The kstat structure is a static |
835 | * per CPU variable and it should be accessible by real-mode KVM. |
836 | * |
837 | */ |
838 | static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc) |
839 | { |
840 | this_cpu_inc_rm(addr: desc->kstat_irqs); |
841 | __this_cpu_inc(kstat.irqs_sum); |
842 | } |
843 | |
844 | long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, |
845 | __be32 xirr, |
846 | struct kvmppc_irq_map *irq_map, |
847 | struct kvmppc_passthru_irqmap *pimap, |
848 | bool *again) |
849 | { |
850 | struct kvmppc_xics *xics; |
851 | struct kvmppc_icp *icp; |
852 | struct kvmppc_ics *ics; |
853 | struct ics_irq_state *state; |
854 | u32 irq; |
855 | u16 src; |
856 | u32 pq_old, pq_new; |
857 | |
858 | irq = irq_map->v_hwirq; |
859 | xics = vcpu->kvm->arch.xics; |
860 | icp = vcpu->arch.icp; |
861 | |
862 | kvmppc_rm_handle_irq_desc(desc: irq_map->desc); |
863 | |
864 | ics = kvmppc_xics_find_ics(xics, irq, &src); |
865 | if (!ics) |
866 | return 2; |
867 | |
868 | state = &ics->irq_state[src]; |
869 | |
870 | /* only MSIs register bypass producers, so it must be MSI here */ |
871 | do { |
872 | pq_old = state->pq_state; |
873 | pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED; |
874 | } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old); |
875 | |
876 | /* Test P=1, Q=0, this is the only case where we present */ |
877 | if (pq_new == PQ_PRESENTED) |
878 | icp_rm_deliver_irq(xics, icp, new_irq: irq, check_resend: false); |
879 | |
880 | /* EOI the interrupt */ |
881 | icp_eoi(d: irq_desc_get_irq_data(desc: irq_map->desc), hwirq: irq_map->r_hwirq, xirr, again); |
882 | |
883 | if (check_too_hard(xics, icp) == H_TOO_HARD) |
884 | return 2; |
885 | else |
886 | return -2; |
887 | } |
888 | |
889 | /* --- Non-real mode XICS-related built-in routines --- */ |
890 | |
891 | /* |
892 | * Host Operations poked by RM KVM |
893 | */ |
894 | static void rm_host_ipi_action(int action, void *data) |
895 | { |
896 | switch (action) { |
897 | case XICS_RM_KICK_VCPU: |
898 | kvmppc_host_rm_ops_hv->vcpu_kick(data); |
899 | break; |
900 | default: |
901 | WARN(1, "Unexpected rm_action=%d data=%p\n" , action, data); |
902 | break; |
903 | } |
904 | |
905 | } |
906 | |
907 | void kvmppc_xics_ipi_action(void) |
908 | { |
909 | int core; |
910 | unsigned int cpu = smp_processor_id(); |
911 | struct kvmppc_host_rm_core *rm_corep; |
912 | |
913 | core = cpu >> threads_shift; |
914 | rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core]; |
915 | |
916 | if (rm_corep->rm_data) { |
917 | rm_host_ipi_action(action: rm_corep->rm_state.rm_action, |
918 | data: rm_corep->rm_data); |
919 | /* Order these stores against the real mode KVM */ |
920 | rm_corep->rm_data = NULL; |
921 | smp_wmb(); |
922 | rm_corep->rm_state.rm_action = 0; |
923 | } |
924 | } |
925 | |