Lines Matching refs:bfqg
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, in bfqg_stats_set_start_group_wait_time() argument
141 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_group_wait_time()
145 if (bfqg == curr_bfqg) in bfqg_stats_set_start_group_wait_time()
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) in bfqg_stats_update_dequeue() argument
168 bfq_stat_add(&bfqg->stats.dequeue, 1); in bfqg_stats_update_dequeue()
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) in bfqg_stats_set_start_empty_time() argument
173 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_empty_time()
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) in bfqg_stats_update_idle_time() argument
192 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_idle_time()
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) in bfqg_stats_set_start_idle_time() argument
206 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_set_start_idle_time()
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) in bfqg_stats_update_avg_queue_size() argument
214 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_avg_queue_size()
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, in bfqg_stats_update_io_add() argument
225 blkg_rwstat_add(&bfqg->stats.queued, op, 1); in bfqg_stats_update_io_add()
226 bfqg_stats_end_empty_time(&bfqg->stats); in bfqg_stats_update_io_add()
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) in bfqg_stats_update_io_add()
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); in bfqg_stats_update_io_add()
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_remove() argument
233 blkg_rwstat_add(&bfqg->stats.queued, op, -1); in bfqg_stats_update_io_remove()
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_merged() argument
238 blkg_rwstat_add(&bfqg->stats.merged, op, 1); in bfqg_stats_update_io_merged()
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, in bfqg_stats_update_completion() argument
244 struct bfqg_stats *stats = &bfqg->stats; in bfqg_stats_update_completion()
257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, in bfqg_stats_update_io_add() argument
259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } in bfqg_stats_update_io_remove() argument
260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } in bfqg_stats_update_io_merged() argument
261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, in bfqg_stats_update_completion() argument
263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } in bfqg_stats_update_dequeue() argument
264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } in bfqg_stats_set_start_empty_time() argument
265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } in bfqg_stats_update_idle_time() argument
266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } in bfqg_stats_set_start_idle_time() argument
267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } in bfqg_stats_update_avg_queue_size() argument
284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) in bfqg_to_blkg() argument
286 return pd_to_blkg(&bfqg->pd); in bfqg_to_blkg()
301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) in bfqg_parent() argument
303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; in bfqg_parent()
322 static void bfqg_get(struct bfq_group *bfqg) in bfqg_get() argument
324 bfqg->ref++; in bfqg_get()
327 static void bfqg_put(struct bfq_group *bfqg) in bfqg_put() argument
329 bfqg->ref--; in bfqg_put()
331 if (bfqg->ref == 0) in bfqg_put()
332 kfree(bfqg); in bfqg_put()
335 static void bfqg_and_blkg_get(struct bfq_group *bfqg) in bfqg_and_blkg_get() argument
338 bfqg_get(bfqg); in bfqg_and_blkg_get()
340 blkg_get(bfqg_to_blkg(bfqg)); in bfqg_and_blkg_get()
343 void bfqg_and_blkg_put(struct bfq_group *bfqg) in bfqg_and_blkg_put() argument
345 blkg_put(bfqg_to_blkg(bfqg)); in bfqg_and_blkg_put()
347 bfqg_put(bfqg); in bfqg_and_blkg_put()
352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); in bfqg_stats_update_legacy_io() local
354 if (!bfqg) in bfqg_stats_update_legacy_io()
357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); in bfqg_stats_update_legacy_io()
358 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); in bfqg_stats_update_legacy_io()
406 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) in bfqg_stats_xfer_dead() argument
410 if (!bfqg) /* root_group */ in bfqg_stats_xfer_dead()
413 parent = bfqg_parent(bfqg); in bfqg_stats_xfer_dead()
415 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); in bfqg_stats_xfer_dead()
420 bfqg_stats_add_aux(&parent->stats, &bfqg->stats); in bfqg_stats_xfer_dead()
421 bfqg_stats_reset(&bfqg->stats); in bfqg_stats_xfer_dead()
424 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) in bfq_init_entity() argument
437 bfqg_and_blkg_get(bfqg); in bfq_init_entity()
439 entity->parent = bfqg->my_entity; /* NULL for root group */ in bfq_init_entity()
440 entity->sched_data = &bfqg->sched_data; in bfq_init_entity()
524 struct bfq_group *bfqg; in bfq_pd_alloc() local
526 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); in bfq_pd_alloc()
527 if (!bfqg) in bfq_pd_alloc()
530 if (bfqg_stats_init(&bfqg->stats, gfp)) { in bfq_pd_alloc()
531 kfree(bfqg); in bfq_pd_alloc()
536 bfqg_get(bfqg); in bfq_pd_alloc()
537 return &bfqg->pd; in bfq_pd_alloc()
543 struct bfq_group *bfqg = blkg_to_bfqg(blkg); in bfq_pd_init() local
545 struct bfq_entity *entity = &bfqg->entity; in bfq_pd_init()
549 entity->my_sched_data = &bfqg->sched_data; in bfq_pd_init()
550 bfqg->my_entity = entity; /* in bfq_pd_init()
554 bfqg->bfqd = bfqd; in bfq_pd_init()
555 bfqg->active_entities = 0; in bfq_pd_init()
556 bfqg->online = true; in bfq_pd_init()
557 bfqg->rq_pos_tree = RB_ROOT; in bfq_pd_init()
562 struct bfq_group *bfqg = pd_to_bfqg(pd); in bfq_pd_free() local
564 bfqg_stats_exit(&bfqg->stats); in bfq_pd_free()
565 bfqg_put(bfqg); in bfq_pd_free()
570 struct bfq_group *bfqg = pd_to_bfqg(pd); in bfq_pd_reset_stats() local
572 bfqg_stats_reset(&bfqg->stats); in bfq_pd_reset_stats()
575 static void bfq_group_set_parent(struct bfq_group *bfqg, in bfq_group_set_parent() argument
580 entity = &bfqg->entity; in bfq_group_set_parent()
585 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) in bfq_link_bfqg() argument
595 entity = &bfqg->entity; in bfq_link_bfqg()
611 struct bfq_group *bfqg; in bfq_bio_bfqg() local
618 bfqg = blkg_to_bfqg(blkg); in bfq_bio_bfqg()
619 if (bfqg->online) { in bfq_bio_bfqg()
621 return bfqg; in bfq_bio_bfqg()
645 struct bfq_group *bfqg) in bfq_bfqq_move() argument
677 entity->parent = bfqg->my_entity; in bfq_bfqq_move()
678 entity->sched_data = &bfqg->sched_data; in bfq_bfqq_move()
680 bfqg_and_blkg_get(bfqg); in bfq_bfqq_move()
706 struct bfq_group *bfqg) in __bfq_bic_change_cgroup() argument
715 if (entity->sched_data != &bfqg->sched_data) { in __bfq_bic_change_cgroup()
724 if (sync_bfqq->entity.sched_data != &bfqg->sched_data) in __bfq_bic_change_cgroup()
725 bfq_bfqq_move(bfqd, sync_bfqq, bfqg); in __bfq_bic_change_cgroup()
736 &bfqg->sched_data) in __bfq_bic_change_cgroup()
757 return bfqg; in __bfq_bic_change_cgroup()
763 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); in bfq_bic_update_cgroup() local
766 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; in bfq_bic_update_cgroup()
779 bfq_link_bfqg(bfqd, bfqg); in bfq_bic_update_cgroup()
780 __bfq_bic_change_cgroup(bfqd, bic, bfqg); in bfq_bic_update_cgroup()
831 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); in bfq_bic_update_cgroup()
883 struct bfq_group *bfqg, in bfq_reparent_active_queues() argument
893 if (bfqg->sched_data.in_service_entity) in bfq_reparent_active_queues()
895 bfqg->sched_data.in_service_entity, in bfq_reparent_active_queues()
910 struct bfq_group *bfqg = pd_to_bfqg(pd); in bfq_pd_offline() local
911 struct bfq_data *bfqd = bfqg->bfqd; in bfq_pd_offline()
912 struct bfq_entity *entity = bfqg->my_entity; in bfq_pd_offline()
926 st = bfqg->sched_data.service_tree + i; in bfq_pd_offline()
940 bfq_reparent_active_queues(bfqd, bfqg, st, i); in bfq_pd_offline()
959 bfq_put_async_queues(bfqd, bfqg); in bfq_pd_offline()
960 bfqg->online = false; in bfq_pd_offline()
969 bfqg_stats_xfer_dead(bfqg); in bfq_pd_offline()
977 struct bfq_group *bfqg = blkg_to_bfqg(blkg); in bfq_end_wr_async() local
979 bfq_end_wr_async_queues(bfqd, bfqg); in bfq_end_wr_async()
1001 struct bfq_group *bfqg = pd_to_bfqg(pd); in bfqg_prfill_weight_device() local
1003 if (!bfqg->entity.dev_weight) in bfqg_prfill_weight_device()
1005 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); in bfqg_prfill_weight_device()
1019 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) in bfq_group_set_weight() argument
1023 bfqg->entity.dev_weight = dev_weight; in bfq_group_set_weight()
1030 if ((unsigned short)weight != bfqg->entity.new_weight) { in bfq_group_set_weight()
1031 bfqg->entity.new_weight = (unsigned short)weight; in bfq_group_set_weight()
1048 bfqg->entity.prio_changed = 1; in bfq_group_set_weight()
1068 struct bfq_group *bfqg = blkg_to_bfqg(blkg); in bfq_io_set_weight_legacy() local
1070 if (bfqg) in bfq_io_set_weight_legacy()
1071 bfq_group_set_weight(bfqg, val, 0); in bfq_io_set_weight_legacy()
1085 struct bfq_group *bfqg; in bfq_io_set_device_weight() local
1104 bfqg = blkg_to_bfqg(ctx.blkg); in bfq_io_set_device_weight()
1108 bfq_group_set_weight(bfqg, bfqg->entity.weight, v); in bfq_io_set_device_weight()
1204 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); in bfqg_prfill_sectors() local
1205 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); in bfqg_prfill_sectors()
1240 struct bfq_group *bfqg = pd_to_bfqg(pd); in bfqg_prfill_avg_queue_size() local
1241 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); in bfqg_prfill_avg_queue_size()
1245 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); in bfqg_prfill_avg_queue_size()
1428 struct bfq_group *bfqg) {} in bfq_bfqq_move() argument
1430 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) in bfq_init_entity() argument
1440 entity->sched_data = &bfqg->sched_data; in bfq_init_entity()
1460 void bfqg_and_blkg_get(struct bfq_group *bfqg) {} in bfqg_and_blkg_get() argument
1462 void bfqg_and_blkg_put(struct bfq_group *bfqg) {} in bfqg_and_blkg_put() argument
1466 struct bfq_group *bfqg; in bfq_create_group_hierarchy() local
1469 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); in bfq_create_group_hierarchy()
1470 if (!bfqg) in bfq_create_group_hierarchy()
1474 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; in bfq_create_group_hierarchy()
1476 return bfqg; in bfq_create_group_hierarchy()