Lines Matching refs:cfs_b
5210 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) in __refill_cfs_bandwidth_runtime() argument
5214 if (unlikely(cfs_b->quota == RUNTIME_INF)) in __refill_cfs_bandwidth_runtime()
5217 cfs_b->runtime += cfs_b->quota; in __refill_cfs_bandwidth_runtime()
5218 runtime = cfs_b->runtime_snap - cfs_b->runtime; in __refill_cfs_bandwidth_runtime()
5220 cfs_b->burst_time += runtime; in __refill_cfs_bandwidth_runtime()
5221 cfs_b->nr_burst++; in __refill_cfs_bandwidth_runtime()
5224 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); in __refill_cfs_bandwidth_runtime()
5225 cfs_b->runtime_snap = cfs_b->runtime; in __refill_cfs_bandwidth_runtime()
5234 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, in __assign_cfs_rq_runtime() argument
5239 lockdep_assert_held(&cfs_b->lock); in __assign_cfs_rq_runtime()
5244 if (cfs_b->quota == RUNTIME_INF) in __assign_cfs_rq_runtime()
5247 start_cfs_bandwidth(cfs_b); in __assign_cfs_rq_runtime()
5249 if (cfs_b->runtime > 0) { in __assign_cfs_rq_runtime()
5250 amount = min(cfs_b->runtime, min_amount); in __assign_cfs_rq_runtime()
5251 cfs_b->runtime -= amount; in __assign_cfs_rq_runtime()
5252 cfs_b->idle = 0; in __assign_cfs_rq_runtime()
5264 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime() local
5267 raw_spin_lock(&cfs_b->lock); in assign_cfs_rq_runtime()
5268 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); in assign_cfs_rq_runtime()
5269 raw_spin_unlock(&cfs_b->lock); in assign_cfs_rq_runtime()
5365 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq() local
5369 raw_spin_lock(&cfs_b->lock); in throttle_cfs_rq()
5371 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { in throttle_cfs_rq()
5383 &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
5385 raw_spin_unlock(&cfs_b->lock); in throttle_cfs_rq()
5452 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq() local
5462 raw_spin_lock(&cfs_b->lock); in unthrottle_cfs_rq()
5463 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
5465 raw_spin_unlock(&cfs_b->lock); in unthrottle_cfs_rq()
5532 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) in distribute_cfs_runtime() argument
5538 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
5550 raw_spin_lock(&cfs_b->lock); in distribute_cfs_runtime()
5552 if (runtime > cfs_b->runtime) in distribute_cfs_runtime()
5553 runtime = cfs_b->runtime; in distribute_cfs_runtime()
5554 cfs_b->runtime -= runtime; in distribute_cfs_runtime()
5555 remaining = cfs_b->runtime; in distribute_cfs_runtime()
5556 raw_spin_unlock(&cfs_b->lock); in distribute_cfs_runtime()
5579 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) in do_sched_cfs_period_timer() argument
5584 if (cfs_b->quota == RUNTIME_INF) in do_sched_cfs_period_timer()
5587 throttled = !list_empty(&cfs_b->throttled_cfs_rq); in do_sched_cfs_period_timer()
5588 cfs_b->nr_periods += overrun; in do_sched_cfs_period_timer()
5591 __refill_cfs_bandwidth_runtime(cfs_b); in do_sched_cfs_period_timer()
5597 if (cfs_b->idle && !throttled) in do_sched_cfs_period_timer()
5602 cfs_b->idle = 1; in do_sched_cfs_period_timer()
5607 cfs_b->nr_throttled += overrun; in do_sched_cfs_period_timer()
5612 while (throttled && cfs_b->runtime > 0) { in do_sched_cfs_period_timer()
5613 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); in do_sched_cfs_period_timer()
5615 distribute_cfs_runtime(cfs_b); in do_sched_cfs_period_timer()
5616 raw_spin_lock_irqsave(&cfs_b->lock, flags); in do_sched_cfs_period_timer()
5618 throttled = !list_empty(&cfs_b->throttled_cfs_rq); in do_sched_cfs_period_timer()
5627 cfs_b->idle = 0; in do_sched_cfs_period_timer()
5649 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) in runtime_refresh_within() argument
5651 struct hrtimer *refresh_timer = &cfs_b->period_timer; in runtime_refresh_within()
5666 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) in start_cfs_slack_bandwidth() argument
5671 if (runtime_refresh_within(cfs_b, min_left)) in start_cfs_slack_bandwidth()
5675 if (cfs_b->slack_started) in start_cfs_slack_bandwidth()
5677 cfs_b->slack_started = true; in start_cfs_slack_bandwidth()
5679 hrtimer_start(&cfs_b->slack_timer, in start_cfs_slack_bandwidth()
5687 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime() local
5693 raw_spin_lock(&cfs_b->lock); in __return_cfs_rq_runtime()
5694 if (cfs_b->quota != RUNTIME_INF) { in __return_cfs_rq_runtime()
5695 cfs_b->runtime += slack_runtime; in __return_cfs_rq_runtime()
5698 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && in __return_cfs_rq_runtime()
5699 !list_empty(&cfs_b->throttled_cfs_rq)) in __return_cfs_rq_runtime()
5700 start_cfs_slack_bandwidth(cfs_b); in __return_cfs_rq_runtime()
5702 raw_spin_unlock(&cfs_b->lock); in __return_cfs_rq_runtime()
5723 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) in do_sched_cfs_slack_timer() argument
5729 raw_spin_lock_irqsave(&cfs_b->lock, flags); in do_sched_cfs_slack_timer()
5730 cfs_b->slack_started = false; in do_sched_cfs_slack_timer()
5732 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { in do_sched_cfs_slack_timer()
5733 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); in do_sched_cfs_slack_timer()
5737 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) in do_sched_cfs_slack_timer()
5738 runtime = cfs_b->runtime; in do_sched_cfs_slack_timer()
5740 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); in do_sched_cfs_slack_timer()
5745 distribute_cfs_runtime(cfs_b); in do_sched_cfs_slack_timer()
5810 struct cfs_bandwidth *cfs_b = in sched_cfs_slack_timer() local
5813 do_sched_cfs_slack_timer(cfs_b); in sched_cfs_slack_timer()
5822 struct cfs_bandwidth *cfs_b = in sched_cfs_period_timer() local
5829 raw_spin_lock_irqsave(&cfs_b->lock, flags); in sched_cfs_period_timer()
5831 overrun = hrtimer_forward_now(timer, cfs_b->period); in sched_cfs_period_timer()
5835 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); in sched_cfs_period_timer()
5838 u64 new, old = ktime_to_ns(cfs_b->period); in sched_cfs_period_timer()
5847 cfs_b->period = ns_to_ktime(new); in sched_cfs_period_timer()
5848 cfs_b->quota *= 2; in sched_cfs_period_timer()
5849 cfs_b->burst *= 2; in sched_cfs_period_timer()
5855 div_u64(cfs_b->quota, NSEC_PER_USEC)); in sched_cfs_period_timer()
5861 div_u64(cfs_b->quota, NSEC_PER_USEC)); in sched_cfs_period_timer()
5869 cfs_b->period_active = 0; in sched_cfs_period_timer()
5870 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); in sched_cfs_period_timer()
5875 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) in init_cfs_bandwidth() argument
5877 raw_spin_lock_init(&cfs_b->lock); in init_cfs_bandwidth()
5878 cfs_b->runtime = 0; in init_cfs_bandwidth()
5879 cfs_b->quota = RUNTIME_INF; in init_cfs_bandwidth()
5880 cfs_b->period = ns_to_ktime(default_cfs_period()); in init_cfs_bandwidth()
5881 cfs_b->burst = 0; in init_cfs_bandwidth()
5883 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); in init_cfs_bandwidth()
5884 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); in init_cfs_bandwidth()
5885 cfs_b->period_timer.function = sched_cfs_period_timer; in init_cfs_bandwidth()
5886 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in init_cfs_bandwidth()
5887 cfs_b->slack_timer.function = sched_cfs_slack_timer; in init_cfs_bandwidth()
5888 cfs_b->slack_started = false; in init_cfs_bandwidth()
5897 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) in start_cfs_bandwidth() argument
5899 lockdep_assert_held(&cfs_b->lock); in start_cfs_bandwidth()
5901 if (cfs_b->period_active) in start_cfs_bandwidth()
5904 cfs_b->period_active = 1; in start_cfs_bandwidth()
5905 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); in start_cfs_bandwidth()
5906 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); in start_cfs_bandwidth()
5909 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) in destroy_cfs_bandwidth() argument
5912 if (!cfs_b->throttled_cfs_rq.next) in destroy_cfs_bandwidth()
5915 hrtimer_cancel(&cfs_b->period_timer); in destroy_cfs_bandwidth()
5916 hrtimer_cancel(&cfs_b->slack_timer); in destroy_cfs_bandwidth()
5935 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in update_runtime_enabled() local
5938 raw_spin_lock(&cfs_b->lock); in update_runtime_enabled()
5939 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
5940 raw_spin_unlock(&cfs_b->lock); in update_runtime_enabled()
6005 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} in init_cfs_bandwidth() argument
6015 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} in destroy_cfs_bandwidth() argument