Lines Matching refs:sched
582 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c, in perf_sched_init() argument
587 memset(sched, 0, sizeof(*sched)); in perf_sched_init()
588 sched->max_events = num; in perf_sched_init()
589 sched->max_weight = wmax; in perf_sched_init()
590 sched->constraints = c; in perf_sched_init()
597 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
598 sched->state.weight = wmin; in perf_sched_init()
599 sched->state.unassigned = num; in perf_sched_init()
602 static void perf_sched_save_state(struct perf_sched *sched) in perf_sched_save_state() argument
604 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) in perf_sched_save_state()
607 sched->saved[sched->saved_states] = sched->state; in perf_sched_save_state()
608 sched->saved_states++; in perf_sched_save_state()
611 static bool perf_sched_restore_state(struct perf_sched *sched) in perf_sched_restore_state() argument
613 if (!sched->saved_states) in perf_sched_restore_state()
616 sched->saved_states--; in perf_sched_restore_state()
617 sched->state = sched->saved[sched->saved_states]; in perf_sched_restore_state()
620 clear_bit(sched->state.counter++, sched->state.used); in perf_sched_restore_state()
629 static bool __perf_sched_find_counter(struct perf_sched *sched) in __perf_sched_find_counter() argument
634 if (!sched->state.unassigned) in __perf_sched_find_counter()
637 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
640 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
646 if (!__test_and_set_bit(idx, sched->state.used)) in __perf_sched_find_counter()
651 idx = sched->state.counter; in __perf_sched_find_counter()
653 if (!__test_and_set_bit(idx, sched->state.used)) in __perf_sched_find_counter()
660 sched->state.counter = idx; in __perf_sched_find_counter()
663 perf_sched_save_state(sched); in __perf_sched_find_counter()
668 static bool perf_sched_find_counter(struct perf_sched *sched) in perf_sched_find_counter() argument
670 while (!__perf_sched_find_counter(sched)) { in perf_sched_find_counter()
671 if (!perf_sched_restore_state(sched)) in perf_sched_find_counter()
682 static bool perf_sched_next_event(struct perf_sched *sched) in perf_sched_next_event() argument
686 if (!sched->state.unassigned || !--sched->state.unassigned) in perf_sched_next_event()
691 sched->state.event++; in perf_sched_next_event()
692 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
694 sched->state.event = 0; in perf_sched_next_event()
695 sched->state.weight++; in perf_sched_next_event()
696 if (sched->state.weight > sched->max_weight) in perf_sched_next_event()
699 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
700 } while (c->weight != sched->state.weight); in perf_sched_next_event()
702 sched->state.counter = 0; /* start with first counter */ in perf_sched_next_event()
713 struct perf_sched sched; in perf_assign_events() local
715 perf_sched_init(&sched, constraints, n, wmin, wmax); in perf_assign_events()
718 if (!perf_sched_find_counter(&sched)) in perf_assign_events()
721 assign[sched.state.event] = sched.state.counter; in perf_assign_events()
722 } while (perf_sched_next_event(&sched)); in perf_assign_events()
724 return sched.state.unassigned; in perf_assign_events()