• Home
  • Raw
  • Download

Lines Matching refs:q

61 static bool blkcg_policy_enabled(struct request_queue *q,  in blkcg_policy_enabled()  argument
64 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
151 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
169 blkg->q = q; in blkg_alloc()
184 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
188 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); in blkg_alloc()
205 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
215 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
216 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
218 lockdep_assert_held(&q->queue_lock); in blkg_lookup_slowpath()
233 struct request_queue *q, in blkg_create() argument
240 lockdep_assert_held(&q->queue_lock); in blkg_create()
243 if (blk_queue_dying(q)) { in blkg_create()
256 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); in blkg_create()
266 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); in blkg_create()
284 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); in blkg_create()
287 list_add(&blkg->q_node, &q->blkg_list); in blkg_create()
327 struct request_queue *q) in blkg_lookup_create() argument
334 blkg = blkg_lookup(blkcg, q); in blkg_lookup_create()
338 spin_lock_irqsave(&q->queue_lock, flags); in blkg_lookup_create()
339 blkg = __blkg_lookup(blkcg, q, true); in blkg_lookup_create()
351 struct blkcg_gq *ret_blkg = q->root_blkg; in blkg_lookup_create()
354 blkg = __blkg_lookup(parent, q, false); in blkg_lookup_create()
364 blkg = blkg_create(pos, q, NULL); in blkg_lookup_create()
374 spin_unlock_irqrestore(&q->queue_lock, flags); in blkg_lookup_create()
383 lockdep_assert_held(&blkg->q->queue_lock); in blkg_destroy()
399 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); in blkg_destroy()
424 static void blkg_destroy_all(struct request_queue *q) in blkg_destroy_all() argument
428 spin_lock_irq(&q->queue_lock); in blkg_destroy_all()
429 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
437 q->root_blkg = NULL; in blkg_destroy_all()
438 spin_unlock_irq(&q->queue_lock); in blkg_destroy_all()
480 if (blkg->q->backing_dev_info->dev) in blkg_dev_name()
481 return bdi_dev_name(blkg->q->backing_dev_info); in blkg_dev_name()
514 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
515 if (blkcg_policy_enabled(blkg->q, pol)) in blkcg_print_blkgs()
517 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_blkgs()
549 struct request_queue *q) in blkg_lookup_check() argument
552 lockdep_assert_held(&q->queue_lock); in blkg_lookup_check()
554 if (!blkcg_policy_enabled(q, pol)) in blkg_lookup_check()
556 return __blkg_lookup(blkcg, q, true /* update_hint */); in blkg_lookup_check()
614 struct request_queue *q; in blkg_conf_prep() local
622 q = disk->queue; in blkg_conf_prep()
625 spin_lock_irq(&q->queue_lock); in blkg_conf_prep()
627 blkg = blkg_lookup_check(blkcg, pol, q); in blkg_conf_prep()
646 while (parent && !__blkg_lookup(parent, q, false)) { in blkg_conf_prep()
652 spin_unlock_irq(&q->queue_lock); in blkg_conf_prep()
655 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); in blkg_conf_prep()
668 spin_lock_irq(&q->queue_lock); in blkg_conf_prep()
670 blkg = blkg_lookup_check(pos, pol, q); in blkg_conf_prep()
680 blkg = blkg_create(pos, q, new_blkg); in blkg_conf_prep()
701 spin_unlock_irq(&q->queue_lock); in blkg_conf_prep()
879 spin_lock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
945 spin_unlock_irq(&blkg->q->queue_lock); in blkcg_print_stat()
1028 struct request_queue *q = blkg->q; in blkcg_destroy_blkgs() local
1030 if (need_resched() || !spin_trylock(&q->queue_lock)) { in blkcg_destroy_blkgs()
1043 spin_unlock(&q->queue_lock); in blkcg_destroy_blkgs()
1160 int blkcg_init_queue(struct request_queue *q) in blkcg_init_queue() argument
1166 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); in blkcg_init_queue()
1174 spin_lock_irq(&q->queue_lock); in blkcg_init_queue()
1175 blkg = blkg_create(&blkcg_root, q, new_blkg); in blkcg_init_queue()
1178 q->root_blkg = blkg; in blkcg_init_queue()
1179 spin_unlock_irq(&q->queue_lock); in blkcg_init_queue()
1185 ret = blk_ioprio_init(q); in blkcg_init_queue()
1189 ret = blk_throtl_init(q); in blkcg_init_queue()
1193 ret = blk_iolatency_init(q); in blkcg_init_queue()
1195 blk_throtl_exit(q); in blkcg_init_queue()
1202 blkg_destroy_all(q); in blkcg_init_queue()
1205 spin_unlock_irq(&q->queue_lock); in blkcg_init_queue()
1218 void blkcg_exit_queue(struct request_queue *q) in blkcg_exit_queue() argument
1220 blkg_destroy_all(q); in blkcg_exit_queue()
1221 blk_throtl_exit(q); in blkcg_exit_queue()
1316 int blkcg_activate_policy(struct request_queue *q, in blkcg_activate_policy() argument
1323 if (blkcg_policy_enabled(q, pol)) in blkcg_activate_policy()
1326 if (queue_is_mq(q)) in blkcg_activate_policy()
1327 blk_mq_freeze_queue(q); in blkcg_activate_policy()
1329 spin_lock_irq(&q->queue_lock); in blkcg_activate_policy()
1332 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1343 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, in blkcg_activate_policy()
1357 spin_unlock_irq(&q->queue_lock); in blkcg_activate_policy()
1361 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, in blkcg_activate_policy()
1376 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1380 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) in blkcg_activate_policy()
1383 __set_bit(pol->plid, q->blkcg_pols); in blkcg_activate_policy()
1386 spin_unlock_irq(&q->queue_lock); in blkcg_activate_policy()
1388 if (queue_is_mq(q)) in blkcg_activate_policy()
1389 blk_mq_unfreeze_queue(q); in blkcg_activate_policy()
1398 spin_lock_irq(&q->queue_lock); in blkcg_activate_policy()
1399 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_activate_policy()
1409 spin_unlock_irq(&q->queue_lock); in blkcg_activate_policy()
1423 void blkcg_deactivate_policy(struct request_queue *q, in blkcg_deactivate_policy() argument
1428 if (!blkcg_policy_enabled(q, pol)) in blkcg_deactivate_policy()
1431 if (queue_is_mq(q)) in blkcg_deactivate_policy()
1432 blk_mq_freeze_queue(q); in blkcg_deactivate_policy()
1434 spin_lock_irq(&q->queue_lock); in blkcg_deactivate_policy()
1436 __clear_bit(pol->plid, q->blkcg_pols); in blkcg_deactivate_policy()
1438 list_for_each_entry(blkg, &q->blkg_list, q_node) { in blkcg_deactivate_policy()
1451 spin_unlock_irq(&q->queue_lock); in blkcg_deactivate_policy()
1453 if (queue_is_mq(q)) in blkcg_deactivate_policy()
1454 blk_mq_unfreeze_queue(q); in blkcg_deactivate_policy()
1728 struct request_queue *q = current->throttle_queue; in blkcg_maybe_throttle_current() local
1734 if (!q) in blkcg_maybe_throttle_current()
1749 blkg = blkg_lookup(blkcg, q); in blkcg_maybe_throttle_current()
1758 blk_put_queue(q); in blkcg_maybe_throttle_current()
1762 blk_put_queue(q); in blkcg_maybe_throttle_current()
1782 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) in blkcg_schedule_throttle() argument
1787 if (!blk_get_queue(q)) in blkcg_schedule_throttle()
1792 current->throttle_queue = q; in blkcg_schedule_throttle()