• Home
  • Raw
  • Download

Lines Matching refs:q

52 static bool blkcg_policy_enabled(struct request_queue *q,  in blkcg_policy_enabled()  argument
55 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
106 blkg->q = q; in blkg_alloc()
113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
122 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
143 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
153 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
154 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
156 lockdep_assert_held(q->queue_lock); in blkg_lookup_slowpath()
171 struct request_queue *q, in blkg_create() argument
179 lockdep_assert_held(q->queue_lock); in blkg_create()
187 wb_congested = wb_congested_get_create(&q->backing_dev_info, in blkg_create()
197 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); in blkg_create()
208 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
226 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
229 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
272 struct request_queue *q) in blkg_lookup_create() argument
277 lockdep_assert_held(q->queue_lock); in blkg_lookup_create()
283 if (unlikely(blk_queue_bypass(q))) in blkg_lookup_create()
284 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); in blkg_lookup_create()
286 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
298 while (parent && !__blkg_lookup(parent, q, false)) { in blkg_lookup_create()
303 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
315 lockdep_assert_held(blkg->q->queue_lock); in blkg_destroy()
336 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
361 static void blkg_destroy_all(struct request_queue *q) in blkg_destroy_all() argument
365 lockdep_assert_held(q->queue_lock); in blkg_destroy_all()
367 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
375 q->root_blkg = NULL; in blkg_destroy_all()
376 q->root_rl.blkg = NULL; in blkg_destroy_all()
407 struct request_queue *q) in __blk_queue_next_rl() argument
416 if (rl == &q->root_rl) { in __blk_queue_next_rl()
417 ent = &q->blkg_list; in __blk_queue_next_rl()
428 if (ent == &q->root_blkg->q_node) in __blk_queue_next_rl()
430 if (ent == &q->blkg_list) in __blk_queue_next_rl()
472 if (blkg->q->backing_dev_info.dev) in blkg_dev_name()
473 return dev_name(blkg->q->backing_dev_info.dev); in blkg_dev_name()
507 spin_lock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
508 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
510 spin_unlock_irq(blkg->q->queue_lock); in blkcg_print_blkgs()
706 lockdep_assert_held(blkg->q->queue_lock); in blkg_stat_recursive_sum()
749 lockdep_assert_held(blkg->q->queue_lock); in blkg_rwstat_recursive_sum()
886 spin_lock_irq(blkg->q->queue_lock); in blkcg_print_stat()
898 spin_unlock_irq(blkg->q->queue_lock); in blkcg_print_stat()
946 struct request_queue *q = blkg->q; in blkcg_css_offline() local
948 if (spin_trylock(q->queue_lock)) { in blkcg_css_offline()
950 spin_unlock(q->queue_lock); in blkcg_css_offline()
1056 int blkcg_init_queue(struct request_queue *q) in blkcg_init_queue() argument
1062 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); in blkcg_init_queue()
1074 spin_lock_irq(q->queue_lock); in blkcg_init_queue()
1075 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1076 spin_unlock_irq(q->queue_lock); in blkcg_init_queue()
1085 q->root_blkg = blkg; in blkcg_init_queue()
1086 q->root_rl.blkg = blkg; in blkcg_init_queue()
1088 ret = blk_throtl_init(q); in blkcg_init_queue()
1090 spin_lock_irq(q->queue_lock); in blkcg_init_queue()
1091 blkg_destroy_all(q); in blkcg_init_queue()
1092 spin_unlock_irq(q->queue_lock); in blkcg_init_queue()
1103 void blkcg_drain_queue(struct request_queue *q) in blkcg_drain_queue() argument
1105 lockdep_assert_held(q->queue_lock); in blkcg_drain_queue()
1111 if (!q->root_blkg) in blkcg_drain_queue()
1114 blk_throtl_drain(q); in blkcg_drain_queue()
1123 void blkcg_exit_queue(struct request_queue *q) in blkcg_exit_queue() argument
1125 spin_lock_irq(q->queue_lock); in blkcg_exit_queue()
1126 blkg_destroy_all(q); in blkcg_exit_queue()
1127 spin_unlock_irq(q->queue_lock); in blkcg_exit_queue()
1129 blk_throtl_exit(q); in blkcg_exit_queue()
1214 int blkcg_activate_policy(struct request_queue *q, in blkcg_activate_policy() argument
1221 if (blkcg_policy_enabled(q, pol)) in blkcg_activate_policy()
1224 blk_queue_bypass_start(q); in blkcg_activate_policy()
1227 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); in blkcg_activate_policy()
1234 spin_lock_irq(q->queue_lock); in blkcg_activate_policy()
1236 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1242 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node); in blkcg_activate_policy()
1246 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1257 __set_bit(pol->plid, q->blkcg_pols); in blkcg_activate_policy()
1260 spin_unlock_irq(q->queue_lock); in blkcg_activate_policy()
1262 blk_queue_bypass_end(q); in blkcg_activate_policy()
1277 void blkcg_deactivate_policy(struct request_queue *q, in blkcg_deactivate_policy() argument
1282 if (!blkcg_policy_enabled(q, pol)) in blkcg_deactivate_policy()
1285 blk_queue_bypass_start(q); in blkcg_deactivate_policy()
1286 spin_lock_irq(q->queue_lock); in blkcg_deactivate_policy()
1288 __clear_bit(pol->plid, q->blkcg_pols); in blkcg_deactivate_policy()
1290 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1304 spin_unlock_irq(q->queue_lock); in blkcg_deactivate_policy()
1305 blk_queue_bypass_end(q); in blkcg_deactivate_policy()