• Home
  • Raw
  • Download

Lines Matching refs:icp

26 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
31 struct kvmppc_ics *ics, struct kvmppc_icp *icp) in ics_rm_check_resend() argument
44 icp_rm_deliver_irq(xics, icp, state->number); in ics_rm_check_resend()
56 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; in icp_rm_set_vcpu_irq()
89 static inline bool icp_rm_try_update(struct kvmppc_icp *icp, in icp_rm_try_update() argument
100 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; in icp_rm_try_update()
120 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
123 this_vcpu->arch.icp->rm_dbgstate = new; in icp_rm_try_update()
124 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update()
131 struct kvmppc_icp *icp) in check_too_hard() argument
133 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; in check_too_hard()
137 struct kvmppc_icp *icp) in icp_rm_check_resend() argument
143 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { in icp_rm_check_resend()
146 if (!test_and_clear_bit(icsid, icp->resend_map)) in icp_rm_check_resend()
150 ics_rm_check_resend(xics, ics, icp); in icp_rm_check_resend()
154 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, in icp_rm_try_to_deliver() argument
161 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
187 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_try_to_deliver()
192 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_deliver_irq() argument
229 if (!icp || state->server != icp->server_num) { in icp_rm_deliver_irq()
230 icp = kvmppc_xics_find_server(xics->kvm, state->server); in icp_rm_deliver_irq()
231 if (!icp) { in icp_rm_deliver_irq()
277 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { in icp_rm_deliver_irq()
283 icp->n_reject++; in icp_rm_deliver_irq()
292 set_bit(ics->icsid, icp->resend_map); in icp_rm_deliver_irq()
302 if (!icp->state.need_resend) { in icp_rm_deliver_irq()
311 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, in icp_rm_down_cppr() argument
347 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
371 } while (!icp_rm_try_update(icp, old_state, new_state)); in icp_rm_down_cppr()
379 icp->n_check_resend++; in icp_rm_down_cppr()
380 icp_rm_check_resend(xics, icp); in icp_rm_down_cppr()
389 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_rm_h_xirr() local
396 icp_rm_clr_vcpu_irq(icp->vcpu); in kvmppc_rm_h_xirr()
406 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_xirr()
415 } while (!icp_rm_try_update(icp, old_state, new_state)); in kvmppc_rm_h_xirr()
420 return check_too_hard(xics, icp); in kvmppc_rm_h_xirr()
428 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; in kvmppc_rm_h_ipi() local
438 icp = this_icp; in kvmppc_rm_h_ipi()
440 icp = kvmppc_xics_find_server(vcpu->kvm, server); in kvmppc_rm_h_ipi()
441 if (!icp) in kvmppc_rm_h_ipi()
472 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_ipi()
493 } while (!icp_rm_try_update(icp, old_state, new_state)); in kvmppc_rm_h_ipi()
498 icp_rm_deliver_irq(xics, icp, reject); in kvmppc_rm_h_ipi()
504 icp_rm_check_resend(xics, icp); in kvmppc_rm_h_ipi()
514 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_rm_h_cppr() local
527 if (cppr > icp->state.cppr) { in kvmppc_rm_h_cppr()
528 icp_rm_down_cppr(xics, icp, cppr); in kvmppc_rm_h_cppr()
530 } else if (cppr == icp->state.cppr) in kvmppc_rm_h_cppr()
544 icp_rm_clr_vcpu_irq(icp->vcpu); in kvmppc_rm_h_cppr()
547 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_cppr()
558 } while (!icp_rm_try_update(icp, old_state, new_state)); in kvmppc_rm_h_cppr()
565 icp->n_reject++; in kvmppc_rm_h_cppr()
566 icp_rm_deliver_irq(xics, icp, reject); in kvmppc_rm_h_cppr()
569 return check_too_hard(xics, icp); in kvmppc_rm_h_cppr()
575 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_rm_h_eoi() local
598 icp_rm_down_cppr(xics, icp, xirr >> 24); in kvmppc_rm_h_eoi()
616 icp_rm_deliver_irq(xics, icp, irq); in kvmppc_rm_h_eoi()
619 icp->rm_action |= XICS_RM_NOTIFY_EOI; in kvmppc_rm_h_eoi()
620 icp->rm_eoied_irq = irq; in kvmppc_rm_h_eoi()
623 return check_too_hard(xics, icp); in kvmppc_rm_h_eoi()