Lines Matching refs:td
117 struct throtl_data *td; member
272 return tg->td; in sq_to_td()
285 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
288 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
289 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
290 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
292 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
298 struct throtl_data *td; in tg_bps_limit() local
304 td = tg->td; in tg_bps_limit()
305 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
306 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
309 tg->iops[rw][td->limit_index]) in tg_bps_limit()
315 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
319 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
328 struct throtl_data *td; in tg_iops_limit() local
334 td = tg->td; in tg_iops_limit()
335 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
336 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
339 tg->bps[rw][td->limit_index]) in tg_iops_limit()
345 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
349 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
524 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
540 sq->parent_sq = &td->service_queue; in throtl_pd_init()
543 tg->td = td; in throtl_pd_init()
554 struct throtl_data *td = tg->td; in tg_update_has_rules() local
559 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
574 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
581 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
590 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
593 static void throtl_upgrade_state(struct throtl_data *td);
603 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
605 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
606 throtl_upgrade_state(tg->td); in throtl_pd_offline()
784 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
796 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
806 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
812 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_extend_slice()
852 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
856 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
860 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
864 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
880 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
900 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_iops_limit()
902 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_iops_limit()
950 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
952 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
1019 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1021 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1164 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1165 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1235 static bool throtl_can_upgrade(struct throtl_data *td,
1256 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1257 struct request_queue *q = td->queue; in throtl_pending_timer_fn()
1263 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1264 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1306 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1322 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1324 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1325 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1402 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1694 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1695 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1697 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1699 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1701 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1730 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1732 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1807 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1832 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1850 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1856 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1859 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1863 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1883 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1886 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1892 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1895 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1896 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1899 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1904 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1905 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1906 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1907 td->scale = 0; in throtl_upgrade_state()
1909 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1918 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1919 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1920 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1923 static void throtl_downgrade_state(struct throtl_data *td, int new) in throtl_downgrade_state() argument
1925 td->scale /= 2; in throtl_downgrade_state()
1927 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1928 if (td->scale) { in throtl_downgrade_state()
1929 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1933 td->limit_index = new; in throtl_downgrade_state()
1934 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1939 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
1946 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && in throtl_tg_can_downgrade()
1948 td->throtl_slice) && in throtl_tg_can_downgrade()
1974 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1975 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1979 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1986 tg->td->throtl_slice)) in throtl_downgrade_check()
2020 throtl_downgrade_state(tg->td, LIMIT_LOW); in throtl_downgrade_check()
2042 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2049 if (!blk_queue_nonrot(td->queue)) in throtl_update_latency_buckets()
2051 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
2053 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
2057 struct latency_bucket *tmp = &td->tmp_buckets[i]; in throtl_update_latency_buckets()
2063 bucket = per_cpu_ptr(td->latency_buckets, cpu); in throtl_update_latency_buckets()
2086 if (td->avg_buckets[i].latency < last_latency) in throtl_update_latency_buckets()
2087 td->avg_buckets[i].latency = last_latency; in throtl_update_latency_buckets()
2091 if (!td->avg_buckets[i].valid) in throtl_update_latency_buckets()
2094 latency = (td->avg_buckets[i].latency * 7 + in throtl_update_latency_buckets()
2097 td->avg_buckets[i].latency = max(latency, last_latency); in throtl_update_latency_buckets()
2098 td->avg_buckets[i].valid = true; in throtl_update_latency_buckets()
2099 last_latency = td->avg_buckets[i].latency; in throtl_update_latency_buckets()
2103 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2105 td->avg_buckets[i].latency, td->avg_buckets[i].valid); in throtl_update_latency_buckets()
2108 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2130 struct throtl_data *td = tg->td; in blk_throtl_bio() local
2140 throtl_update_latency_buckets(td); in blk_throtl_bio()
2163 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2164 throtl_upgrade_state(td); in blk_throtl_bio()
2208 td->nr_queued[rw]++; in blk_throtl_bio()
2229 if (throttled || !td->track_bio_latency) in blk_throtl_bio()
2236 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2242 if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ || in throtl_track_latency()
2243 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2248 latency = get_cpu_ptr(td->latency_buckets); in throtl_track_latency()
2251 put_cpu_ptr(td->latency_buckets); in throtl_track_latency()
2257 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2259 throtl_track_latency(td, blk_stat_size(&rq->issue_stat), in blk_throtl_stat_add()
2287 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), in blk_throtl_bio_endio()
2290 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2296 threshold = tg->td->avg_buckets[bucket].latency + in blk_throtl_bio_endio()
2308 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2346 struct throtl_data *td = q->td; in blk_throtl_drain() local
2361 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) in blk_throtl_drain()
2365 tg_drain_bios(&td->service_queue); in blk_throtl_drain()
2372 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], in blk_throtl_drain()
2381 struct throtl_data *td; in blk_throtl_init() local
2384 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2385 if (!td) in blk_throtl_init()
2387 td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2389 if (!td->latency_buckets) { in blk_throtl_init()
2390 kfree(td); in blk_throtl_init()
2394 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2395 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2397 q->td = td; in blk_throtl_init()
2398 td->queue = q; in blk_throtl_init()
2400 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2401 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2402 td->low_upgrade_time = jiffies; in blk_throtl_init()
2403 td->low_downgrade_time = jiffies; in blk_throtl_init()
2408 free_percpu(td->latency_buckets); in blk_throtl_init()
2409 kfree(td); in blk_throtl_init()
2416 BUG_ON(!q->td); in blk_throtl_exit()
2419 free_percpu(q->td->latency_buckets); in blk_throtl_exit()
2420 kfree(q->td); in blk_throtl_exit()
2425 struct throtl_data *td; in blk_throtl_register_queue() local
2428 td = q->td; in blk_throtl_register_queue()
2429 BUG_ON(!td); in blk_throtl_register_queue()
2432 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register_queue()
2433 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register_queue()
2435 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2436 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register_queue()
2438 td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2442 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2445 td->track_bio_latency = !q->mq_ops && !q->request_fn; in blk_throtl_register_queue()
2446 if (!td->track_bio_latency) in blk_throtl_register_queue()
2453 if (!q->td) in blk_throtl_sample_time_show()
2455 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2464 if (!q->td) in blk_throtl_sample_time_store()
2471 q->td->throtl_slice = t; in blk_throtl_sample_time_store()