• Home
  • Raw
  • Download

Lines Matching refs:icp

33 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
39 struct kvmppc_ics *ics, struct kvmppc_icp *icp) in ics_rm_check_resend() argument
46 icp_rm_deliver_irq(xics, icp, state->number, true); in ics_rm_check_resend()
130 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; in icp_rm_set_vcpu_irq()
181 static inline bool icp_rm_try_update(struct kvmppc_icp *icp, in icp_rm_try_update() argument
192 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_rm_try_update()
212 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
215 this_vcpu->arch.icp->rm_dbgstate = new; in icp_rm_try_update()
216 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update()
223 struct kvmppc_icp *icp) in check_too_hard() argument
225 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; in check_too_hard()
229 struct kvmppc_icp *icp) in icp_rm_check_resend() argument
235 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_rm_check_resend()
238 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_rm_check_resend()
242 ics_rm_check_resend(xics, ics, icp); in icp_rm_check_resend()
246 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, in icp_rm_try_to_deliver() argument
253 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
279 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_try_to_deliver()
284 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_deliver_irq() argument
321 if (!icp || state->server != icp->server_num) { in icp_rm_deliver_irq()
322 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_rm_deliver_irq()
323 if (!icp) { in icp_rm_deliver_irq()
373 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_rm_deliver_irq()
379 icp->n_reject++; in icp_rm_deliver_irq()
396 set_bit(ics->icsid, icp->resend_map); in icp_rm_deliver_irq()
405 if (!icp->state.need_resend) { in icp_rm_deliver_irq()
416 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_down_cppr() argument
452 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
476 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_down_cppr()
484 icp->n_check_resend++; in icp_rm_down_cppr()
485 icp_rm_check_resend(xics, icp); in icp_rm_down_cppr()
494 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_xirr() local
501 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_xirr()
511 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_xirr()
520 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_xirr()
525 return check_too_hard(xics, icp); in xics_rm_h_xirr()
533 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; in xics_rm_h_ipi() local
543 icp = this_icp; in xics_rm_h_ipi()
545 icp = kvmppc_xics_find_server(vcpu->kvm, server); in xics_rm_h_ipi()
546 if (!icp) in xics_rm_h_ipi()
577 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_ipi()
598 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_ipi()
603 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_ipi()
609 icp_rm_check_resend(xics, icp); in xics_rm_h_ipi()
619 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_cppr() local
632 if (cppr > icp->state.cppr) { in xics_rm_h_cppr()
633 icp_rm_down_cppr(xics, icp, cppr); in xics_rm_h_cppr()
635 } else if (cppr == icp->state.cppr) in xics_rm_h_cppr()
649 icp_rm_clr_vcpu_irq(icp->vcpu); in xics_rm_h_cppr()
652 old_state = new_state = READ_ONCE(icp->state); in xics_rm_h_cppr()
663 } while (!icp_rm_try_update(icp, old_state, new_state)); in xics_rm_h_cppr()
670 icp->n_reject++; in xics_rm_h_cppr()
671 icp_rm_deliver_irq(xics, icp, reject, false); in xics_rm_h_cppr()
674 return check_too_hard(xics, icp); in xics_rm_h_cppr()
680 struct kvmppc_icp *icp = vcpu->arch.icp; in ics_rm_eoi() local
712 icp->rm_action |= XICS_RM_NOTIFY_EOI; in ics_rm_eoi()
713 icp->rm_eoied_irq = irq; in ics_rm_eoi()
732 return check_too_hard(xics, icp); in ics_rm_eoi()
738 struct kvmppc_icp *icp = vcpu->arch.icp; in xics_rm_h_eoi() local
758 icp_rm_down_cppr(xics, icp, xirr >> 24); in xics_rm_h_eoi()
762 return check_too_hard(xics, icp); in xics_rm_h_eoi()
860 struct kvmppc_icp *icp; in kvmppc_deliver_irq_passthru() local
869 icp = vcpu->arch.icp; in kvmppc_deliver_irq_passthru()
887 icp_rm_deliver_irq(xics, icp, irq, false); in kvmppc_deliver_irq_passthru()
893 if (check_too_hard(xics, icp) == H_TOO_HARD) in kvmppc_deliver_irq_passthru()