• Home
  • Raw
  • Download

Lines Matching refs:tg

71 	struct throtl_grp	*tg;		/* tg this qnode belongs to */  member
244 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) in tg_to_blkg() argument
246 return pd_to_blkg(&tg->pd); in tg_to_blkg()
273 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td() local
275 if (tg) in sq_to_td()
276 return tg->td; in sq_to_td()
299 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
301 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit()
308 td = tg->td; in tg_bps_limit()
309 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
313 tg->iops[rw][td->limit_index]) in tg_bps_limit()
319 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
320 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
323 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
324 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
329 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
331 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit()
338 td = tg->td; in tg_iops_limit()
339 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
340 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
343 tg->bps[rw][td->limit_index]) in tg_iops_limit()
349 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
350 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { in tg_iops_limit()
353 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
356 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); in tg_iops_limit()
396 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) in throtl_qnode_init() argument
400 qn->tg = tg; in throtl_qnode_init()
419 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
471 *tg_to_put = qn->tg; in throtl_pop_queued()
473 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
494 struct throtl_grp *tg; in throtl_pd_alloc() local
497 tg = kzalloc_node(sizeof(*tg), gfp, q->node); in throtl_pd_alloc()
498 if (!tg) in throtl_pd_alloc()
501 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
504 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()
507 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
510 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
511 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
514 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
515 tg->bps[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
516 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
517 tg->iops[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
518 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
519 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
520 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
521 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
522 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
525 tg->latency_target = DFL_LATENCY_TARGET; in throtl_pd_alloc()
526 tg->latency_target_conf = DFL_LATENCY_TARGET; in throtl_pd_alloc()
527 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
528 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; in throtl_pd_alloc()
530 return &tg->pd; in throtl_pd_alloc()
533 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_alloc()
535 kfree(tg); in throtl_pd_alloc()
541 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init() local
542 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init()
544 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
562 tg->td = td; in throtl_pd_init()
570 static void tg_update_has_rules(struct throtl_grp *tg) in tg_update_has_rules() argument
572 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
573 struct throtl_data *td = tg->td; in tg_update_has_rules()
577 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || in tg_update_has_rules()
579 (tg_bps_limit(tg, rw) != U64_MAX || in tg_update_has_rules()
580 tg_iops_limit(tg, rw) != UINT_MAX)); in tg_update_has_rules()
585 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online() local
590 tg_update_has_rules(tg); in throtl_pd_online()
601 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_update_limit_valid() local
603 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
604 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
617 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_offline() local
619 tg->bps[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
620 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
621 tg->iops[READ][LIMIT_LOW] = 0; in throtl_pd_offline()
622 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
624 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
626 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
627 throtl_upgrade_state(tg->td); in throtl_pd_offline()
632 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free() local
634 del_timer_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
635 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_free()
636 blkg_rwstat_exit(&tg->stat_ios); in throtl_pd_free()
637 kfree(tg); in throtl_pd_free()
662 struct throtl_grp *tg; in update_min_dispatch_time() local
664 tg = throtl_rb_first(parent_sq); in update_min_dispatch_time()
665 if (!tg) in update_min_dispatch_time()
668 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
671 static void tg_service_queue_add(struct throtl_grp *tg) in tg_service_queue_add() argument
673 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
677 unsigned long key = tg->disptime; in tg_service_queue_add()
692 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
693 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, in tg_service_queue_add()
697 static void throtl_enqueue_tg(struct throtl_grp *tg) in throtl_enqueue_tg() argument
699 if (!(tg->flags & THROTL_TG_PENDING)) { in throtl_enqueue_tg()
700 tg_service_queue_add(tg); in throtl_enqueue_tg()
701 tg->flags |= THROTL_TG_PENDING; in throtl_enqueue_tg()
702 tg->service_queue.parent_sq->nr_pending++; in throtl_enqueue_tg()
706 static void throtl_dequeue_tg(struct throtl_grp *tg) in throtl_dequeue_tg() argument
708 if (tg->flags & THROTL_TG_PENDING) { in throtl_dequeue_tg()
709 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in throtl_dequeue_tg()
710 tg->flags &= ~THROTL_TG_PENDING; in throtl_dequeue_tg()
771 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, in throtl_start_new_slice_with_credit() argument
774 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
775 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
777 atomic_set(&tg->io_split_cnt[rw], 0); in throtl_start_new_slice_with_credit()
785 if (time_after_eq(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
786 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
788 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
789 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
791 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
792 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
795 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) in throtl_start_new_slice() argument
797 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
798 tg->io_disp[rw] = 0; in throtl_start_new_slice()
799 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
800 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
802 atomic_set(&tg->io_split_cnt[rw], 0); in throtl_start_new_slice()
804 throtl_log(&tg->service_queue, in throtl_start_new_slice()
806 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
807 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
810 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
813 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
816 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
819 throtl_set_slice_end(tg, rw, jiffy_end); in throtl_extend_slice()
820 throtl_log(&tg->service_queue, in throtl_extend_slice()
822 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
823 tg->slice_end[rw], jiffies); in throtl_extend_slice()
827 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
829 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
836 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
841 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
848 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
859 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
861 time_elapsed = jiffies - tg->slice_start[rw]; in throtl_trim_slice()
863 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
867 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
871 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
877 if (tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
878 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
880 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
882 if (tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
883 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
885 tg->io_disp[rw] = 0; in throtl_trim_slice()
887 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
889 throtl_log(&tg->service_queue, in throtl_trim_slice()
892 tg->slice_start[rw], tg->slice_end[rw], jiffies); in throtl_trim_slice()
895 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_iops_limit() argument
909 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_with_in_iops_limit()
912 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_with_in_iops_limit()
929 if (tg->io_disp[rw] + 1 <= io_allowed) { in tg_with_in_iops_limit()
943 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, in tg_with_in_bps_limit() argument
957 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_with_in_bps_limit()
961 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
963 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
967 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { in tg_with_in_bps_limit()
974 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_with_in_bps_limit()
994 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, in tg_may_dispatch() argument
999 u64 bps_limit = tg_bps_limit(tg, rw); in tg_may_dispatch()
1000 u32 iops_limit = tg_iops_limit(tg, rw); in tg_may_dispatch()
1008 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
1009 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
1025 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
1026 throtl_start_new_slice(tg, rw); in tg_may_dispatch()
1028 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
1029 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1030 throtl_extend_slice(tg, rw, in tg_may_dispatch()
1031 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1035 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0); in tg_may_dispatch()
1037 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && in tg_may_dispatch()
1038 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { in tg_may_dispatch()
1049 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
1050 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
1055 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) in throtl_charge_bio() argument
1061 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
1062 tg->io_disp[rw]++; in throtl_charge_bio()
1063 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
1064 tg->last_io_disp[rw]++; in throtl_charge_bio()
1086 struct throtl_grp *tg) in throtl_add_bio_tg() argument
1088 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
1092 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
1101 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
1106 throtl_enqueue_tg(tg); in throtl_add_bio_tg()
1109 static void tg_update_disptime(struct throtl_grp *tg) in tg_update_disptime() argument
1111 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
1117 tg_may_dispatch(tg, bio, &read_wait); in tg_update_disptime()
1121 tg_may_dispatch(tg, bio, &write_wait); in tg_update_disptime()
1127 throtl_dequeue_tg(tg); in tg_update_disptime()
1128 tg->disptime = disptime; in tg_update_disptime()
1129 throtl_enqueue_tg(tg); in tg_update_disptime()
1132 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
1145 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
1147 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
1162 throtl_charge_bio(tg, bio); in tg_dispatch_one_bio()
1172 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1173 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
1175 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1177 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1178 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1181 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1187 static int throtl_dispatch_tg(struct throtl_grp *tg) in throtl_dispatch_tg() argument
1189 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1198 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1200 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1208 tg_may_dispatch(tg, bio, NULL)) { in throtl_dispatch_tg()
1210 tg_dispatch_one_bio(tg, bio_data_dir(bio)); in throtl_dispatch_tg()
1225 struct throtl_grp *tg; in throtl_select_dispatch() local
1231 tg = throtl_rb_first(parent_sq); in throtl_select_dispatch()
1232 if (!tg) in throtl_select_dispatch()
1235 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1238 throtl_dequeue_tg(tg); in throtl_select_dispatch()
1240 nr_disp += throtl_dispatch_tg(tg); in throtl_select_dispatch()
1242 sq = &tg->service_queue; in throtl_select_dispatch()
1244 tg_update_disptime(tg); in throtl_select_dispatch()
1273 struct throtl_grp *tg = sq_to_tg(sq); in throtl_pending_timer_fn() local
1313 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn()
1314 tg_update_disptime(tg); in throtl_pending_timer_fn()
1318 tg = sq_to_tg(sq); in throtl_pending_timer_fn()
1368 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64() local
1369 u64 v = *(u64 *)((void *)tg + off); in tg_prfill_conf_u64()
1379 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint() local
1380 unsigned int v = *(unsigned int *)((void *)tg + off); in tg_prfill_conf_uint()
1401 static void tg_conf_updated(struct throtl_grp *tg, bool global) in tg_conf_updated() argument
1403 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1407 throtl_log(&tg->service_queue, in tg_conf_updated()
1409 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1410 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1421 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1450 throtl_start_new_slice(tg, READ); in tg_conf_updated()
1451 throtl_start_new_slice(tg, WRITE); in tg_conf_updated()
1453 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1454 tg_update_disptime(tg); in tg_conf_updated()
1464 struct throtl_grp *tg; in tg_set_conf() local
1478 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1481 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1483 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1485 tg_conf_updated(tg, false); in tg_set_conf()
1581 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit() local
1600 if (tg->bps_conf[READ][off] == bps_dft && in tg_prfill_limit()
1601 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1602 tg->iops_conf[READ][off] == iops_dft && in tg_prfill_limit()
1603 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1605 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && in tg_prfill_limit()
1606 tg->latency_target_conf == DFL_LATENCY_TARGET))) in tg_prfill_limit()
1609 if (tg->bps_conf[READ][off] != U64_MAX) in tg_prfill_limit()
1611 tg->bps_conf[READ][off]); in tg_prfill_limit()
1612 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1614 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1615 if (tg->iops_conf[READ][off] != UINT_MAX) in tg_prfill_limit()
1617 tg->iops_conf[READ][off]); in tg_prfill_limit()
1618 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1620 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1622 if (tg->idletime_threshold_conf == ULONG_MAX) in tg_prfill_limit()
1626 tg->idletime_threshold_conf); in tg_prfill_limit()
1628 if (tg->latency_target_conf == ULONG_MAX) in tg_prfill_limit()
1632 " latency=%lu", tg->latency_target_conf); in tg_prfill_limit()
1653 struct throtl_grp *tg; in tg_set_limit() local
1664 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1666 v[0] = tg->bps_conf[READ][index]; in tg_set_limit()
1667 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1668 v[2] = tg->iops_conf[READ][index]; in tg_set_limit()
1669 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1671 idle_time = tg->idletime_threshold_conf; in tg_set_limit()
1672 latency_time = tg->latency_target_conf; in tg_set_limit()
1712 tg->bps_conf[READ][index] = v[0]; in tg_set_limit()
1713 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1714 tg->iops_conf[READ][index] = v[2]; in tg_set_limit()
1715 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1718 tg->bps[READ][index] = v[0]; in tg_set_limit()
1719 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1720 tg->iops[READ][index] = v[2]; in tg_set_limit()
1721 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1723 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], in tg_set_limit()
1724 tg->bps_conf[READ][LIMIT_MAX]); in tg_set_limit()
1725 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1726 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1727 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], in tg_set_limit()
1728 tg->iops_conf[READ][LIMIT_MAX]); in tg_set_limit()
1729 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1730 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1731 tg->idletime_threshold_conf = idle_time; in tg_set_limit()
1732 tg->latency_target_conf = latency_time; in tg_set_limit()
1735 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || in tg_set_limit()
1736 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1737 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || in tg_set_limit()
1738 tg->latency_target_conf == DFL_LATENCY_TARGET) { in tg_set_limit()
1739 tg->bps[READ][LIMIT_LOW] = 0; in tg_set_limit()
1740 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1741 tg->iops[READ][LIMIT_LOW] = 0; in tg_set_limit()
1742 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1743 tg->idletime_threshold = DFL_IDLE_THRESHOLD; in tg_set_limit()
1744 tg->latency_target = DFL_LATENCY_TARGET; in tg_set_limit()
1746 tg->idletime_threshold = tg->idletime_threshold_conf; in tg_set_limit()
1747 tg->latency_target = tg->latency_target_conf; in tg_set_limit()
1750 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1751 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1753 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1755 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1756 tg_conf_updated(tg, index == LIMIT_LOW && in tg_set_limit()
1757 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1802 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) in __tg_last_low_overflow_time() argument
1806 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) in __tg_last_low_overflow_time()
1807 rtime = tg->last_low_overflow_time[READ]; in __tg_last_low_overflow_time()
1808 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1809 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1814 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) in tg_last_low_overflow_time() argument
1817 struct throtl_grp *parent = tg; in tg_last_low_overflow_time()
1818 unsigned long ret = __tg_last_low_overflow_time(tg); in tg_last_low_overflow_time()
1841 static bool throtl_tg_is_idle(struct throtl_grp *tg) in throtl_tg_is_idle() argument
1853 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); in throtl_tg_is_idle()
1854 ret = tg->latency_target == DFL_LATENCY_TARGET || in throtl_tg_is_idle()
1855 tg->idletime_threshold == DFL_IDLE_THRESHOLD || in throtl_tg_is_idle()
1856 (ktime_get_ns() >> 10) - tg->last_finish_time > time || in throtl_tg_is_idle()
1857 tg->avg_idletime > tg->idletime_threshold || in throtl_tg_is_idle()
1858 (tg->latency_target && tg->bio_cnt && in throtl_tg_is_idle()
1859 tg->bad_bio_cnt * 5 < tg->bio_cnt); in throtl_tg_is_idle()
1860 throtl_log(&tg->service_queue, in throtl_tg_is_idle()
1862 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, in throtl_tg_is_idle()
1863 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1867 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) in throtl_tg_can_upgrade() argument
1869 struct throtl_service_queue *sq = &tg->service_queue; in throtl_tg_can_upgrade()
1876 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]; in throtl_tg_can_upgrade()
1877 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1888 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1889 throtl_tg_is_idle(tg)) in throtl_tg_can_upgrade()
1894 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) in throtl_hierarchy_can_upgrade() argument
1897 if (throtl_tg_can_upgrade(tg)) in throtl_hierarchy_can_upgrade()
1899 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_upgrade()
1900 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_upgrade()
1920 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_can_upgrade() local
1922 if (tg == this_tg) in throtl_can_upgrade()
1924 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_can_upgrade()
1926 if (!throtl_hierarchy_can_upgrade(tg)) { in throtl_can_upgrade()
1935 static void throtl_upgrade_check(struct throtl_grp *tg) in throtl_upgrade_check() argument
1939 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1942 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1945 tg->last_check_time = now; in throtl_upgrade_check()
1948 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1951 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1952 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1966 struct throtl_grp *tg = blkg_to_tg(blkg); in throtl_upgrade_state() local
1967 struct throtl_service_queue *sq = &tg->service_queue; in throtl_upgrade_state()
1969 tg->disptime = jiffies - 1; in throtl_upgrade_state()
1993 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) in throtl_tg_can_downgrade() argument
1995 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade()
2003 time_after_eq(now, tg_last_low_overflow_time(tg) + in throtl_tg_can_downgrade()
2005 (!throtl_tg_is_idle(tg) || in throtl_tg_can_downgrade()
2006 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) in throtl_tg_can_downgrade()
2011 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) in throtl_hierarchy_can_downgrade() argument
2014 if (!throtl_tg_can_downgrade(tg)) in throtl_hierarchy_can_downgrade()
2016 tg = sq_to_tg(tg->service_queue.parent_sq); in throtl_hierarchy_can_downgrade()
2017 if (!tg || !tg_to_blkg(tg)->parent) in throtl_hierarchy_can_downgrade()
2023 static void throtl_downgrade_check(struct throtl_grp *tg) in throtl_downgrade_check() argument
2030 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
2031 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
2033 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) in throtl_downgrade_check()
2035 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
2038 elapsed_time = now - tg->last_check_time; in throtl_downgrade_check()
2039 tg->last_check_time = now; in throtl_downgrade_check()
2041 if (time_before(now, tg_last_low_overflow_time(tg) + in throtl_downgrade_check()
2042 tg->td->throtl_slice)) in throtl_downgrade_check()
2045 if (tg->bps[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2046 bps = tg->last_bytes_disp[READ] * HZ; in throtl_downgrade_check()
2048 if (bps >= tg->bps[READ][LIMIT_LOW]) in throtl_downgrade_check()
2049 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2052 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2053 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2055 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2056 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2059 if (tg->iops[READ][LIMIT_LOW]) { in throtl_downgrade_check()
2060 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0); in throtl_downgrade_check()
2061 iops = tg->last_io_disp[READ] * HZ / elapsed_time; in throtl_downgrade_check()
2062 if (iops >= tg->iops[READ][LIMIT_LOW]) in throtl_downgrade_check()
2063 tg->last_low_overflow_time[READ] = now; in throtl_downgrade_check()
2066 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2067 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0); in throtl_downgrade_check()
2068 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2069 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2070 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2077 if (throtl_hierarchy_can_downgrade(tg)) in throtl_downgrade_check()
2078 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2080 tg->last_bytes_disp[READ] = 0; in throtl_downgrade_check()
2081 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2082 tg->last_io_disp[READ] = 0; in throtl_downgrade_check()
2083 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2086 static void blk_throtl_update_idletime(struct throtl_grp *tg) in blk_throtl_update_idletime() argument
2089 unsigned long last_finish_time = tg->last_finish_time; in blk_throtl_update_idletime()
2096 last_finish_time == tg->checked_last_finish_time) in blk_throtl_update_idletime()
2099 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; in blk_throtl_update_idletime()
2100 tg->checked_last_finish_time = last_finish_time; in blk_throtl_update_idletime()
2210 struct throtl_grp *tg = blkg_to_tg(blkg); in blk_throtl_bio() local
2214 struct throtl_data *td = tg->td; in blk_throtl_bio()
2223 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, in blk_throtl_bio()
2225 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); in blk_throtl_bio()
2228 if (!tg->has_rules[rw]) in blk_throtl_bio()
2235 blk_throtl_update_idletime(tg); in blk_throtl_bio()
2237 sq = &tg->service_queue; in blk_throtl_bio()
2241 if (tg->last_low_overflow_time[rw] == 0) in blk_throtl_bio()
2242 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2243 throtl_downgrade_check(tg); in blk_throtl_bio()
2244 throtl_upgrade_check(tg); in blk_throtl_bio()
2250 if (!tg_may_dispatch(tg, bio, NULL)) { in blk_throtl_bio()
2251 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2252 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2260 throtl_charge_bio(tg, bio); in blk_throtl_bio()
2273 throtl_trim_slice(tg, rw); in blk_throtl_bio()
2280 qn = &tg->qnode_on_parent[rw]; in blk_throtl_bio()
2282 tg = sq_to_tg(sq); in blk_throtl_bio()
2283 if (!tg) in blk_throtl_bio()
2290 tg->bytes_disp[rw], bio->bi_iter.bi_size, in blk_throtl_bio()
2291 tg_bps_limit(tg, rw), in blk_throtl_bio()
2292 tg->io_disp[rw], tg_iops_limit(tg, rw), in blk_throtl_bio()
2295 tg->last_low_overflow_time[rw] = jiffies; in blk_throtl_bio()
2298 throtl_add_bio_tg(bio, qn, tg); in blk_throtl_bio()
2307 if (tg->flags & THROTL_TG_WAS_EMPTY) { in blk_throtl_bio()
2308 tg_update_disptime(tg); in blk_throtl_bio()
2309 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in blk_throtl_bio()
2357 struct throtl_grp *tg; in blk_throtl_bio_endio() local
2367 tg = blkg_to_tg(blkg); in blk_throtl_bio_endio()
2368 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2372 tg->last_finish_time = finish_time_ns >> 10; in blk_throtl_bio_endio()
2382 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2385 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2390 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2391 tg->latency_target; in blk_throtl_bio_endio()
2393 tg->bad_bio_cnt++; in blk_throtl_bio_endio()
2398 tg->bio_cnt++; in blk_throtl_bio_endio()
2401 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { in blk_throtl_bio_endio()
2402 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2403 tg->bio_cnt /= 2; in blk_throtl_bio_endio()
2404 tg->bad_bio_cnt /= 2; in blk_throtl_bio_endio()