• Home
  • Raw
  • Download

Lines Matching refs:blkg

73 static void blkg_free(struct blkcg_gq *blkg)  in blkg_free()  argument
77 if (!blkg) in blkg_free()
81 if (blkg->pd[i]) in blkg_free()
82 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free()
84 free_percpu(blkg->iostat_cpu); in blkg_free()
85 percpu_ref_exit(&blkg->refcnt); in blkg_free()
86 kfree(blkg); in blkg_free()
91 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); in __blkg_release() local
93 WARN_ON(!bio_list_empty(&blkg->async_bios)); in __blkg_release()
96 css_put(&blkg->blkcg->css); in __blkg_release()
97 if (blkg->parent) in __blkg_release()
98 blkg_put(blkg->parent); in __blkg_release()
99 blkg_free(blkg); in __blkg_release()
112 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); in blkg_release() local
114 call_rcu(&blkg->rcu_head, __blkg_release); in blkg_release()
119 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_async_bio_workfn() local
127 spin_lock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
128 bio_list_merge(&bios, &blkg->async_bios); in blkg_async_bio_workfn()
129 bio_list_init(&blkg->async_bios); in blkg_async_bio_workfn()
130 spin_unlock_bh(&blkg->async_bio_lock); in blkg_async_bio_workfn()
154 struct blkcg_gq *blkg; in blkg_alloc() local
158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
159 if (!blkg) in blkg_alloc()
162 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) in blkg_alloc()
165 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); in blkg_alloc()
166 if (!blkg->iostat_cpu) in blkg_alloc()
169 blkg->q = q; in blkg_alloc()
170 INIT_LIST_HEAD(&blkg->q_node); in blkg_alloc()
171 spin_lock_init(&blkg->async_bio_lock); in blkg_alloc()
172 bio_list_init(&blkg->async_bios); in blkg_alloc()
173 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); in blkg_alloc()
174 blkg->blkcg = blkcg; in blkg_alloc()
176 u64_stats_init(&blkg->iostat.sync); in blkg_alloc()
178 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc()
192 blkg->pd[i] = pd; in blkg_alloc()
193 pd->blkg = blkg; in blkg_alloc()
197 return blkg; in blkg_alloc()
200 blkg_free(blkg); in blkg_alloc()
207 struct blkcg_gq *blkg; in blkg_lookup_slowpath() local
215 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
216 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
219 rcu_assign_pointer(blkcg->blkg_hint, blkg); in blkg_lookup_slowpath()
221 return blkg; in blkg_lookup_slowpath()
236 struct blkcg_gq *blkg; in blkg_create() local
262 blkg = new_blkg; in blkg_create()
266 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
267 if (WARN_ON_ONCE(!blkg->parent)) { in blkg_create()
271 blkg_get(blkg->parent); in blkg_create()
278 if (blkg->pd[i] && pol->pd_init_fn) in blkg_create()
279 pol->pd_init_fn(blkg->pd[i]); in blkg_create()
284 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
286 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); in blkg_create()
287 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
292 if (blkg->pd[i] && pol->pd_online_fn) in blkg_create()
293 pol->pd_online_fn(blkg->pd[i]); in blkg_create()
296 blkg->online = true; in blkg_create()
300 return blkg; in blkg_create()
303 blkg_put(blkg); in blkg_create()
329 struct blkcg_gq *blkg; in blkg_lookup_create() local
334 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
335 if (blkg) in blkg_lookup_create()
336 return blkg; in blkg_lookup_create()
339 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
340 if (blkg) in blkg_lookup_create()
354 blkg = __blkg_lookup(parent, q, false); in blkg_lookup_create()
355 if (blkg) { in blkg_lookup_create()
357 ret_blkg = blkg; in blkg_lookup_create()
364 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
365 if (IS_ERR(blkg)) { in blkg_lookup_create()
366 blkg = ret_blkg; in blkg_lookup_create()
375 return blkg; in blkg_lookup_create()
378 static void blkg_destroy(struct blkcg_gq *blkg) in blkg_destroy() argument
380 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy()
383 lockdep_assert_held(&blkg->q->queue_lock); in blkg_destroy()
387 WARN_ON_ONCE(list_empty(&blkg->q_node)); in blkg_destroy()
388 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); in blkg_destroy()
393 if (blkg->pd[i] && pol->pd_offline_fn) in blkg_destroy()
394 pol->pd_offline_fn(blkg->pd[i]); in blkg_destroy()
397 blkg->online = false; in blkg_destroy()
399 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
400 list_del_init(&blkg->q_node); in blkg_destroy()
401 hlist_del_init_rcu(&blkg->blkcg_node); in blkg_destroy()
408 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) in blkg_destroy()
415 percpu_ref_kill(&blkg->refcnt); in blkg_destroy()
426 struct blkcg_gq *blkg, *n; in blkg_destroy_all() local
429 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
430 struct blkcg *blkcg = blkg->blkcg; in blkg_destroy_all()
433 blkg_destroy(blkg); in blkg_destroy_all()
445 struct blkcg_gq *blkg; in blkcg_reset_stats() local
456 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_reset_stats()
459 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats()
462 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); in blkcg_reset_stats()
467 if (blkg->pd[i] && pol->pd_reset_stats_fn) in blkcg_reset_stats()
468 pol->pd_reset_stats_fn(blkg->pd[i]); in blkcg_reset_stats()
477 const char *blkg_dev_name(struct blkcg_gq *blkg) in blkg_dev_name() argument
480 if (blkg->q->backing_dev_info->dev) in blkg_dev_name()
481 return bdi_dev_name(blkg->q->backing_dev_info); in blkg_dev_name()
509 struct blkcg_gq *blkg; in blkcg_print_blkgs() local
513 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_blkgs()
514 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
515 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
516 total += prfill(sf, blkg->pd[pol->plid], data); in blkcg_print_blkgs()
517 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
536 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_u64()
615 struct blkcg_gq *blkg; in blkg_conf_prep() local
627 blkg = blkg_lookup_check(blkcg, pol, q); in blkg_conf_prep()
628 if (IS_ERR(blkg)) { in blkg_conf_prep()
629 ret = PTR_ERR(blkg); in blkg_conf_prep()
633 if (blkg) in blkg_conf_prep()
670 blkg = blkg_lookup_check(pos, pol, q); in blkg_conf_prep()
671 if (IS_ERR(blkg)) { in blkg_conf_prep()
672 ret = PTR_ERR(blkg); in blkg_conf_prep()
677 if (blkg) { in blkg_conf_prep()
680 blkg = blkg_create(pos, q, new_blkg); in blkg_conf_prep()
681 if (IS_ERR(blkg)) { in blkg_conf_prep()
682 ret = PTR_ERR(blkg); in blkg_conf_prep()
694 ctx->blkg = blkg; in blkg_conf_prep()
768 struct blkcg_gq *blkg; in blkcg_rstat_flush() local
772 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_rstat_flush()
773 struct blkcg_gq *parent = blkg->parent; in blkcg_rstat_flush()
774 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_rstat_flush()
785 u64_stats_update_begin(&blkg->iostat.sync); in blkcg_rstat_flush()
788 blkg_iostat_add(&blkg->iostat.cur, &delta); in blkcg_rstat_flush()
790 u64_stats_update_end(&blkg->iostat.sync); in blkcg_rstat_flush()
795 blkg_iostat_set(&delta, &blkg->iostat.cur); in blkcg_rstat_flush()
796 blkg_iostat_sub(&delta, &blkg->iostat.last); in blkcg_rstat_flush()
798 blkg_iostat_add(&blkg->iostat.last, &delta); in blkcg_rstat_flush()
826 struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue); in blkcg_fill_root_iostats() local
849 u64_stats_update_begin(&blkg->iostat.sync); in blkcg_fill_root_iostats()
850 blkg_iostat_set(&blkg->iostat.cur, &tmp); in blkcg_fill_root_iostats()
851 u64_stats_update_end(&blkg->iostat.sync); in blkcg_fill_root_iostats()
860 struct blkcg_gq *blkg; in blkcg_print_stat() local
869 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { in blkcg_print_stat()
870 struct blkg_iostat_set *bis = &blkg->iostat; in blkcg_print_stat()
879 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
881 if (!blkg->online) in blkcg_print_stat()
884 dname = blkg_dev_name(blkg); in blkcg_print_stat()
915 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { in blkcg_print_stat()
919 atomic_read(&blkg->use_delay), in blkcg_print_stat()
920 (unsigned long long)atomic64_read(&blkg->delay_nsec)); in blkcg_print_stat()
927 if (!blkg->pd[i] || !pol->pd_stat_fn) in blkcg_print_stat()
930 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off); in blkcg_print_stat()
945 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1026 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, in blkcg_destroy_blkgs() local
1028 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs()
1042 blkg_destroy(blkg); in blkcg_destroy_blkgs()
1162 struct blkcg_gq *new_blkg, *blkg; in blkcg_init_queue() local
1175 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1176 if (IS_ERR(blkg)) in blkcg_init_queue()
1178 q->root_blkg = blkg; in blkcg_init_queue()
1209 return PTR_ERR(blkg); in blkcg_init_queue()
1320 struct blkcg_gq *blkg, *pinned_blkg = NULL; in blkcg_activate_policy() local
1332 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1335 if (blkg->pd[pol->plid]) in blkcg_activate_policy()
1339 if (blkg == pinned_blkg) { in blkcg_activate_policy()
1344 blkg->blkcg); in blkcg_activate_policy()
1354 blkg_get(blkg); in blkcg_activate_policy()
1355 pinned_blkg = blkg; in blkcg_activate_policy()
1362 blkg->blkcg); in blkcg_activate_policy()
1369 blkg->pd[pol->plid] = pd; in blkcg_activate_policy()
1370 pd->blkg = blkg; in blkcg_activate_policy()
1376 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1377 pol->pd_init_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1380 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1381 pol->pd_online_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1399 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1400 struct blkcg *blkcg = blkg->blkcg; in blkcg_activate_policy()
1403 if (blkg->pd[pol->plid]) { in blkcg_activate_policy()
1404 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_activate_policy()
1405 blkg->pd[pol->plid] = NULL; in blkcg_activate_policy()
1426 struct blkcg_gq *blkg; in blkcg_deactivate_policy() local
1438 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1439 struct blkcg *blkcg = blkg->blkcg; in blkcg_deactivate_policy()
1442 if (blkg->pd[pol->plid]) { in blkcg_deactivate_policy()
1444 pol->pd_offline_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1445 pol->pd_free_fn(blkg->pd[pol->plid]); in blkcg_deactivate_policy()
1446 blkg->pd[pol->plid] = NULL; in blkcg_deactivate_policy()
1580 struct blkcg_gq *blkg = bio->bi_blkg; in __blkcg_punt_bio_submit() local
1586 if (!blkg->parent) in __blkcg_punt_bio_submit()
1589 spin_lock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1590 bio_list_add(&blkg->async_bios, bio); in __blkcg_punt_bio_submit()
1591 spin_unlock_bh(&blkg->async_bio_lock); in __blkcg_punt_bio_submit()
1593 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); in __blkcg_punt_bio_submit()
1603 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) in blkcg_scale_delay() argument
1605 u64 old = atomic64_read(&blkg->delay_start); in blkcg_scale_delay()
1608 if (atomic_read(&blkg->use_delay) < 0) in blkcg_scale_delay()
1625 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { in blkcg_scale_delay()
1626 u64 cur = atomic64_read(&blkg->delay_nsec); in blkcg_scale_delay()
1627 u64 sub = min_t(u64, blkg->last_delay, now - old); in blkcg_scale_delay()
1628 int cur_use = atomic_read(&blkg->use_delay); in blkcg_scale_delay()
1634 if (cur_use < blkg->last_use) in blkcg_scale_delay()
1635 sub = max_t(u64, sub, blkg->last_delay >> 1); in blkcg_scale_delay()
1644 atomic64_set(&blkg->delay_nsec, 0); in blkcg_scale_delay()
1645 blkg->last_delay = 0; in blkcg_scale_delay()
1647 atomic64_sub(sub, &blkg->delay_nsec); in blkcg_scale_delay()
1648 blkg->last_delay = cur - sub; in blkcg_scale_delay()
1650 blkg->last_use = cur_use; in blkcg_scale_delay()
1660 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) in blkcg_maybe_throttle_blkg() argument
1669 while (blkg->parent) { in blkcg_maybe_throttle_blkg()
1670 int use_delay = atomic_read(&blkg->use_delay); in blkcg_maybe_throttle_blkg()
1675 blkcg_scale_delay(blkg, now); in blkcg_maybe_throttle_blkg()
1676 this_delay = atomic64_read(&blkg->delay_nsec); in blkcg_maybe_throttle_blkg()
1682 blkg = blkg->parent; in blkcg_maybe_throttle_blkg()
1731 struct blkcg_gq *blkg; in blkcg_maybe_throttle_current() local
1749 blkg = blkg_lookup(blkcg, q); in blkcg_maybe_throttle_current()
1750 if (!blkg) in blkcg_maybe_throttle_current()
1752 if (!blkg_tryget(blkg)) in blkcg_maybe_throttle_current()
1756 blkcg_maybe_throttle_blkg(blkg, use_memdelay); in blkcg_maybe_throttle_current()
1757 blkg_put(blkg); in blkcg_maybe_throttle_current()
1808 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) in blkcg_add_delay() argument
1810 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) in blkcg_add_delay()
1812 blkcg_scale_delay(blkg, now); in blkcg_add_delay()
1813 atomic64_add(delta, &blkg->delay_nsec); in blkcg_add_delay()
1828 struct blkcg_gq *blkg, *ret_blkg = NULL; in blkg_tryget_closest() local
1831 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue); in blkg_tryget_closest()
1832 while (blkg) { in blkg_tryget_closest()
1833 if (blkg_tryget(blkg)) { in blkg_tryget_closest()
1834 ret_blkg = blkg; in blkg_tryget_closest()
1837 blkg = blkg->parent; in blkg_tryget_closest()