• Home
  • Raw
  • Download

Lines Matching refs:tg

72 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
240 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
242 return pd_to_blkg(&tg->pd); in tg_to_blkg()
269 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
271 if (tg) in sq_to_td()
272 return tg->td; in sq_to_td()
295 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
297 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit()
304 td = tg->td; in tg_bps_limit()
305 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
309 tg->iops[rw][td->limit_index]) in tg_bps_limit()
315 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
316 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
319 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
320 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
325 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
327 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit()
334 td = tg->td; in tg_iops_limit()
335 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
336 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
339 tg->bps[rw][td->limit_index]) in tg_iops_limit()
345 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
346 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { in tg_iops_limit()
349 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
352 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); in tg_iops_limit()
392 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
396 qn->tg = tg; in throtl_qnode_init()
415 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
465 *tg_to_put = qn->tg; in throtl_pop_queued()
467 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
487 struct throtl_grp *tg; in throtl_pd_alloc() local
490 tg = kzalloc_node(sizeof(*tg), gfp, node); in throtl_pd_alloc()
491 if (!tg) in throtl_pd_alloc()
494 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
497 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
498 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
501 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
502 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
503 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
504 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
505 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
506 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
507 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
508 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
509 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
512 tg->latency_target = DFL_LATENCY_TARGET; in throtl_pd_alloc()
513 tg->latency_target_conf = DFL_LATENCY_TARGET; in throtl_pd_alloc()
514 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
515 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
517 return &tg->pd; in throtl_pd_alloc()
522 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
523 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
525 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
543 tg->td = td; in throtl_pd_init()
551 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
553 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
554 struct throtl_data *td = tg->td; in tg_update_has_rules()
558 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
560 (tg_bps_limit(tg, rw) != U64_MAX || in tg_update_has_rules()
561 tg_iops_limit(tg, rw) != UINT_MAX)); in tg_update_has_rules()
566 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online() local
571 tg_update_has_rules(tg); in throtl_pd_online()
582 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_update_limit_valid() local
584 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
585 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in blk_throtl_update_limit_valid()
596 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_offline() local
598 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
599 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
600 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
601 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
603 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
605 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
606 throtl_upgrade_state(tg->td); in throtl_pd_offline()
611 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
613 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
614 kfree(tg); in throtl_pd_free()
650 struct throtl_grp *tg; in update_min_dispatch_time() local
652 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
653 if (!tg) in update_min_dispatch_time()
656 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
659 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
661 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
665 unsigned long key = tg->disptime; in tg_service_queue_add()
681 parent_sq->first_pending = &tg->rb_node; in tg_service_queue_add()
683 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
684 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); in tg_service_queue_add()
687 static void __throtl_enqueue_tg(struct throtl_grp *tg) in __throtl_enqueue_tg() argument
689 tg_service_queue_add(tg); in __throtl_enqueue_tg()
690 tg->flags |= THROTL_TG_PENDING; in __throtl_enqueue_tg()
691 tg->service_queue.parent_sq->nr_pending++; in __throtl_enqueue_tg()
694 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
696 if (!(tg->flags & THROTL_TG_PENDING)) in throtl_enqueue_tg()
697 __throtl_enqueue_tg(tg); in throtl_enqueue_tg()
700 static void __throtl_dequeue_tg(struct throtl_grp *tg) in __throtl_dequeue_tg() argument
702 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in __throtl_dequeue_tg()
703 tg->flags &= ~THROTL_TG_PENDING; in __throtl_dequeue_tg()
706 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
708 if (tg->flags & THROTL_TG_PENDING) in throtl_dequeue_tg()
709 __throtl_dequeue_tg(tg); in throtl_dequeue_tg()
769 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
772 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
773 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
781 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
782 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
784 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
785 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
787 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
788 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
791 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
793 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
794 tg->io_disp[rw] = 0; in throtl_start_new_slice()
795 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
796 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
797 throtl_log(&tg->service_queue, in throtl_start_new_slice()
799 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
800 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
803 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
806 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
809 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
812 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_extend_slice()
813 throtl_log(&tg->service_queue, in throtl_extend_slice()
815 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
816 tg->slice_end[rw], jiffies); in throtl_extend_slice()
820 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
822 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
829 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
834 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
841 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
852 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
854 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
856 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
860 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
864 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
870 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
871 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
873 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
875 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
876 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
878 tg->io_disp[rw] = 0; in throtl_trim_slice()
880 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
882 throtl_log(&tg->service_queue, in throtl_trim_slice()
885 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
888 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
896 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
900 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_iops_limit()
902 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_iops_limit()
911 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd; in tg_with_in_iops_limit()
919 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
926 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1; in tg_with_in_iops_limit()
938 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
946 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
950 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
952 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
954 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd; in tg_with_in_bps_limit()
958 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { in tg_with_in_bps_limit()
965 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_with_in_bps_limit()
966 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); in tg_with_in_bps_limit()
985 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
997 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
998 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
1001 if (tg_bps_limit(tg, rw) == U64_MAX && in tg_may_dispatch()
1002 tg_iops_limit(tg, rw) == UINT_MAX) { in tg_may_dispatch()
1015 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
1016 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
1018 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
1019 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1020 throtl_extend_slice(tg, rw, in tg_may_dispatch()
1021 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1024 if (tg_with_in_bps_limit(tg, bio, &bps_wait) && in tg_may_dispatch()
1025 tg_with_in_iops_limit(tg, bio, &iops_wait)) { in tg_may_dispatch()
1036 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
1037 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
1042 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
1048 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
1049 tg->io_disp[rw]++; in throtl_charge_bio()
1050 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
1051 tg->last_io_disp[rw]++; in throtl_charge_bio()
1073 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1075 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1079 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1088 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1093 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1096 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1098 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1104 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1108 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1114 throtl_dequeue_tg(tg); in tg_update_disptime()
1115 tg->disptime = disptime; in tg_update_disptime()
1116 throtl_enqueue_tg(tg); in tg_update_disptime()
1119 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1132 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1134 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1149 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1159 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1160 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1162 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1164 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1165 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1168 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1174 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1176 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1185 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1187 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1195 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1197 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1212 struct throtl_grp *tg = throtl_rb_first(parent_sq); in throtl_select_dispatch() local
1213 struct throtl_service_queue *sq = &tg->service_queue; in throtl_select_dispatch()
1215 if (!tg) in throtl_select_dispatch()
1218 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1221 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1223 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1226 tg_update_disptime(tg); in throtl_select_dispatch()
1255 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1295 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1296 tg_update_disptime(tg); in throtl_pending_timer_fn()
1300 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1350 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1351 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1361 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1362 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1383 static void tg_conf_updated(struct throtl_grp *tg, bool global) in tg_conf_updated() argument
1385 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1389 throtl_log(&tg->service_queue, in tg_conf_updated()
1391 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1392 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1402 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1430 throtl_start_new_slice(tg, 0); in tg_conf_updated()
1431 throtl_start_new_slice(tg, 1); in tg_conf_updated()
1433 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1434 tg_update_disptime(tg); in tg_conf_updated()
1444 struct throtl_grp *tg; in tg_set_conf() local
1458 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1461 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1463 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1465 tg_conf_updated(tg, false); in tg_set_conf()
1525 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit() local
1544 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1545 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1546 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1547 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1549 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && in tg_prfill_limit()
1550 tg->latency_target_conf == DFL_LATENCY_TARGET))) in tg_prfill_limit()
1553 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1555 tg->bps_conf[READ][off]); in tg_prfill_limit()
1556 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1558 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1559 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1561 tg->iops_conf[READ][off]); in tg_prfill_limit()
1562 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1564 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1566 if (tg->idletime_threshold_conf == ULONG_MAX) in tg_prfill_limit()
1570 tg->idletime_threshold_conf); in tg_prfill_limit()
1572 if (tg->latency_target_conf == ULONG_MAX) in tg_prfill_limit()
1576 " latency=%lu", tg->latency_target_conf); in tg_prfill_limit()
1597 struct throtl_grp *tg; in tg_set_limit() local
1608 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1610 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1611 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1612 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1613 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1615 idle_time = tg->idletime_threshold_conf; in tg_set_limit()
1616 latency_time = tg->latency_target_conf; in tg_set_limit()
1656 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1657 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1658 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1659 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1662 tg->bps[READ][index] = v[0]; in tg_set_limit()
1663 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1664 tg->iops[READ][index] = v[2]; in tg_set_limit()
1665 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1667 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1668 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1669 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1670 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1671 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1672 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1673 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1674 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1675 tg->idletime_threshold_conf = idle_time; in tg_set_limit()
1676 tg->latency_target_conf = latency_time; in tg_set_limit()
1679 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1680 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1681 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || in tg_set_limit()
1682 tg->latency_target_conf == DFL_LATENCY_TARGET) { in tg_set_limit()
1683 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1684 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1685 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1686 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1687 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in tg_set_limit()
1688 tg->latency_target = DFL_LATENCY_TARGET; in tg_set_limit()
1690 tg->idletime_threshold = tg->idletime_threshold_conf; in tg_set_limit()
1691 tg->latency_target = tg->latency_target_conf; in tg_set_limit()
1694 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1695 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1697 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1699 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1700 tg_conf_updated(tg, index == LIMIT_LOW && in tg_set_limit()
1701 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1746 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) in __tg_last_low_overflow_time() argument
1750 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1751 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1752 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1753 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1758 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) in tg_last_low_overflow_time() argument
1761 struct throtl_grp *parent = tg; in tg_last_low_overflow_time()
1762 unsigned long ret = __tg_last_low_overflow_time(tg); in tg_last_low_overflow_time()
1785 static bool throtl_tg_is_idle(struct throtl_grp *tg) in throtl_tg_is_idle() argument
1797 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); in throtl_tg_is_idle()
1798 ret = tg->latency_target == DFL_LATENCY_TARGET || in throtl_tg_is_idle()
1799 tg->idletime_threshold == DFL_IDLE_THRESHOLD || in throtl_tg_is_idle()
1800 (ktime_get_ns() >> 10) - tg->last_finish_time > time || in throtl_tg_is_idle()
1801 tg->avg_idletime > tg->idletime_threshold || in throtl_tg_is_idle()
1802 (tg->latency_target && tg->bio_cnt && in throtl_tg_is_idle()
1803 tg->bad_bio_cnt * 5 < tg->bio_cnt); in throtl_tg_is_idle()
1804 throtl_log(&tg->service_queue, in throtl_tg_is_idle()
1806 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, in throtl_tg_is_idle()
1807 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1811 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) in throtl_tg_can_upgrade() argument
1813 struct throtl_service_queue *sq = &tg->service_queue; in throtl_tg_can_upgrade()
1820 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; in throtl_tg_can_upgrade()
1821 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1832 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1833 throtl_tg_is_idle(tg)) in throtl_tg_can_upgrade()
1838 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) in throtl_hierarchy_can_upgrade() argument
1841 if (throtl_tg_can_upgrade(tg)) in throtl_hierarchy_can_upgrade()
1843 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_upgrade()
1844 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_upgrade()
1864 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_can_upgrade() local
1866 if (tg == this_tg) in throtl_can_upgrade()
1868 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_can_upgrade()
1870 if (!throtl_hierarchy_can_upgrade(tg)) { in throtl_can_upgrade()
1879 static void throtl_upgrade_check(struct throtl_grp *tg) in throtl_upgrade_check() argument
1883 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1886 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1889 tg->last_check_time = now; in throtl_upgrade_check()
1892 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1895 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1896 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1910 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_upgrade_state() local
1911 struct throtl_service_queue *sq = &tg->service_queue; in throtl_upgrade_state()
1913 tg->disptime = jiffies - 1; in throtl_upgrade_state()
1937 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) in throtl_tg_can_downgrade() argument
1939 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade()
1947 time_after_eq(now, tg_last_low_overflow_time(tg) + in throtl_tg_can_downgrade()
1949 (!throtl_tg_is_idle(tg) || in throtl_tg_can_downgrade()
1950 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) in throtl_tg_can_downgrade()
1955 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) in throtl_hierarchy_can_downgrade() argument
1958 if (!throtl_tg_can_downgrade(tg)) in throtl_hierarchy_can_downgrade()
1960 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_downgrade()
1961 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_downgrade()
1967 static void throtl_downgrade_check(struct throtl_grp *tg) in throtl_downgrade_check() argument
1974 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1975 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1977 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_downgrade_check()
1979 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1982 elapsed_time = now - tg->last_check_time; in throtl_downgrade_check()
1983 tg->last_check_time = now; in throtl_downgrade_check()
1985 if (time_before(now, tg_last_low_overflow_time(tg) + in throtl_downgrade_check()
1986 tg->td->throtl_slice)) in throtl_downgrade_check()
1989 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
1990 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
1992 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
1993 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
1996 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
1997 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
1999 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2000 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2003 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2004 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2005 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2006 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2009 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2010 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2011 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2012 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2019 if (throtl_hierarchy_can_downgrade(tg)) in throtl_downgrade_check()
2020 throtl_downgrade_state(tg->td, LIMIT_LOW); in throtl_downgrade_check()
2022 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2023 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2024 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2025 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2028 static void blk_throtl_update_idletime(struct throtl_grp *tg) in blk_throtl_update_idletime() argument
2031 unsigned long last_finish_time = tg->last_finish_time; in blk_throtl_update_idletime()
2034 last_finish_time == tg->checked_last_finish_time) in blk_throtl_update_idletime()
2037 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; in blk_throtl_update_idletime()
2038 tg->checked_last_finish_time = last_finish_time; in blk_throtl_update_idletime()
2113 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) in blk_throtl_assoc_bio() argument
2117 bio->bi_cg_private = tg; in blk_throtl_assoc_bio()
2126 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); in blk_throtl_bio() local
2130 struct throtl_data *td = tg->td; in blk_throtl_bio()
2135 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) in blk_throtl_bio()
2145 blk_throtl_assoc_bio(tg, bio); in blk_throtl_bio()
2146 blk_throtl_update_idletime(tg); in blk_throtl_bio()
2148 sq = &tg->service_queue; in blk_throtl_bio()
2152 if (tg->last_low_overflow_time[rw] == 0) in blk_throtl_bio()
2153 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2154 throtl_downgrade_check(tg); in blk_throtl_bio()
2155 throtl_upgrade_check(tg); in blk_throtl_bio()
2161 if (!tg_may_dispatch(tg, bio, NULL)) { in blk_throtl_bio()
2162 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2163 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2171 throtl_charge_bio(tg, bio); in blk_throtl_bio()
2184 throtl_trim_slice(tg, rw); in blk_throtl_bio()
2191 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
2193 tg = sq_to_tg(sq); in blk_throtl_bio()
2194 if (!tg) in blk_throtl_bio()
2201 tg->bytes_disp[rw], bio->bi_iter.bi_size, in blk_throtl_bio()
2202 tg_bps_limit(tg, rw), in blk_throtl_bio()
2203 tg->io_disp[rw], tg_iops_limit(tg, rw), in blk_throtl_bio()
2206 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2209 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
2218 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
2219 tg_update_disptime(tg); in blk_throtl_bio()
2220 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
2265 struct throtl_grp *tg; in blk_throtl_bio_endio() local
2271 tg = bio->bi_cg_private; in blk_throtl_bio_endio()
2272 if (!tg) in blk_throtl_bio_endio()
2277 tg->last_finish_time = finish_time_ns >> 10; in blk_throtl_bio_endio()
2287 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), in blk_throtl_bio_endio()
2290 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2296 threshold = tg->td->avg_buckets[bucket].latency + in blk_throtl_bio_endio()
2297 tg->latency_target; in blk_throtl_bio_endio()
2299 tg->bad_bio_cnt++; in blk_throtl_bio_endio()
2304 tg->bio_cnt++; in blk_throtl_bio_endio()
2307 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { in blk_throtl_bio_endio()
2308 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2309 tg->bio_cnt /= 2; in blk_throtl_bio_endio()
2310 tg->bad_bio_cnt /= 2; in blk_throtl_bio_endio()
2322 struct throtl_grp *tg; in tg_drain_bios() local
2324 while ((tg = throtl_rb_first(parent_sq))) { in tg_drain_bios()
2325 struct throtl_service_queue *sq = &tg->service_queue; in tg_drain_bios()
2328 throtl_dequeue_tg(tg); in tg_drain_bios()
2331 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()
2333 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in tg_drain_bios()