Lines Matching refs:icp

33 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
39 struct kvmppc_ics *ics, struct kvmppc_icp *icp) in ics_rm_check_resend() argument
46 icp_rm_deliver_irq(xics, icp, state->number, true); in ics_rm_check_resend()
130 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; in icp_rm_set_vcpu_irq()
174 static inline bool icp_rm_try_update(struct kvmppc_icp *icp, in icp_rm_try_update() argument
185 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_rm_try_update()
205 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
208 this_vcpu->arch.icp->rm_dbgstate = new; in icp_rm_try_update()
209 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update()
216 struct kvmppc_icp *icp) in check_too_hard() argument
218 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; in check_too_hard()
222 struct kvmppc_icp *icp) in icp_rm_check_resend() argument
228 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_rm_check_resend()
231 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_rm_check_resend()
235 ics_rm_check_resend(xics, ics, icp); in icp_rm_check_resend()
239 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, in icp_rm_try_to_deliver() argument
246 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
272 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_try_to_deliver()
277 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_deliver_irq() argument
314 if (!icp || state->server != icp->server_num) { in icp_rm_deliver_irq()
315 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_rm_deliver_irq()
316 if (!icp) { in icp_rm_deliver_irq()
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_rm_deliver_irq()
372 icp->n_reject++; in icp_rm_deliver_irq()
389 set_bit(ics->icsid, icp->resend_map); in icp_rm_deliver_irq()
398 if (!icp->state.need_resend) { in icp_rm_deliver_irq()
409 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_down_cppr() argument
445 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
469 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_down_cppr()
477 icp->n_check_resend++; in icp_rm_down_cppr()
478 icp_rm_check_resend(xics, icp); in icp_rm_down_cppr()
492 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_xirr() local
499 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_xirr()
509 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_xirr()
518 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_xirr()
523 return check_too_hard(xics, icp); in xics_rm_h_xirr()
531 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; in xics_rm_h_ipi() local
541 icp = this_icp; in xics_rm_h_ipi()
543 icp = kvmppc_xics_find_server(vcpu->kvm, server); in xics_rm_h_ipi()
544 if (!icp) in xics_rm_h_ipi()
575 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_ipi()
596 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_ipi()
601 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_ipi()
607 icp_rm_check_resend(xics, icp); in xics_rm_h_ipi()
617 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_cppr() local
630 if (cppr > icp->state.cppr) { in xics_rm_h_cppr()
631 icp_rm_down_cppr(xics, icp, cppr); in xics_rm_h_cppr()
633 } else if (cppr == icp->state.cppr) in xics_rm_h_cppr()
647 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_cppr()
650 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_cppr()
661 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_cppr()
668 icp->n_reject++; in xics_rm_h_cppr()
669 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_cppr()
672 return check_too_hard(xics, icp); in xics_rm_h_cppr()
678 struct kvmppc_icp *icp = vcpu->arch.icp; in ics_rm_eoi() local
710 icp->rm_action |= XICS_RM_NOTIFY_EOI; in ics_rm_eoi()
711 icp->rm_eoied_irq = irq; in ics_rm_eoi()
731 return check_too_hard(xics, icp); in ics_rm_eoi()
737 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_eoi() local
757 icp_rm_down_cppr(xics, icp, xirr >> 24); in xics_rm_h_eoi()
761 return check_too_hard(xics, icp); in xics_rm_h_eoi()
851 struct kvmppc_icp *icp; in kvmppc_deliver_irq_passthru() local
860 icp = vcpu->arch.icp; in kvmppc_deliver_irq_passthru()
878 icp_rm_deliver_irq(xics, icp, irq, false); in kvmppc_deliver_irq_passthru()
883 if (check_too_hard(xics, icp) == H_TOO_HARD) in kvmppc_deliver_irq_passthru()