/block/ |
D | blk-ioc.c | 66 lockdep_assert_held(&ioc->lock); in ioc_destroy_icq() 99 spin_lock_irq(&ioc->lock); in ioc_release_fn() 114 spin_unlock(&ioc->lock); in ioc_release_fn() 116 spin_lock(&ioc->lock); in ioc_release_fn() 130 spin_unlock_irq(&ioc->lock); in ioc_release_fn() 157 spin_lock_irqsave(&ioc->lock, flags); in put_io_context() 163 spin_unlock_irqrestore(&ioc->lock, flags); in put_io_context() 186 spin_lock_irq(&ioc->lock); in put_io_context_active() 193 spin_unlock_irq(&ioc->lock); in put_io_context_active() 222 spin_lock_irqsave(&ioc->lock, flags); in __ioc_clear_queue() [all …]
|
D | blk-iocost.c | 420 spinlock_t lock; member 741 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock() 742 spin_lock(&iocg->waitq.lock); in iocg_lock() 744 spin_lock_irqsave(&iocg->waitq.lock, *flags); in iocg_lock() 751 spin_unlock(&iocg->waitq.lock); in iocg_unlock() 752 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock() 754 spin_unlock_irqrestore(&iocg->waitq.lock, *flags); in iocg_unlock() 777 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us() 917 lockdep_assert_held(&ioc->lock); in ioc_refresh_params() 961 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate() [all …]
|
D | blk-stat.c | 17 spinlock_t lock; member 151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback() 154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback() 162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback() 166 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_remove_callback() 191 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_enable_accounting() 194 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_enable_accounting() 207 spin_lock_init(&stats->lock); in blk_alloc_queue_stats()
|
D | badblocks.c | 73 seq = read_seqbegin(&bb->lock); in badblocks_check() 121 if (read_seqretry(&bb->lock, seq)) in badblocks_check() 185 write_seqlock_irqsave(&bb->lock, flags); in badblocks_set() 311 write_sequnlock_irqrestore(&bb->lock, flags); in badblocks_set() 351 write_seqlock_irq(&bb->lock); in badblocks_clear() 421 write_sequnlock_irq(&bb->lock); in badblocks_clear() 438 write_seqlock_irq(&bb->lock); in ack_all_badblocks() 454 write_sequnlock_irq(&bb->lock); in ack_all_badblocks() 478 seq = read_seqbegin(&bb->lock); in badblocks_show() 500 if (read_seqretry(&bb->lock, seq)) in badblocks_show() [all …]
|
D | kyber-iosched.c | 146 spinlock_t lock; member 181 spinlock_t lock; member 456 spin_lock_init(&kcq->lock); in kyber_ctx_queue_init() 489 spin_lock_init(&khd->lock); in kyber_init_hctx() 576 spin_lock(&kcq->lock); in kyber_bio_merge() 578 spin_unlock(&kcq->lock); in kyber_bio_merge() 599 spin_lock(&kcq->lock); in kyber_insert_requests() 607 spin_unlock(&kcq->lock); in kyber_insert_requests() 669 spin_lock(&kcq->lock); in flush_busy_kcq() 673 spin_unlock(&kcq->lock); in flush_busy_kcq() [all …]
|
D | keyslot-manager.c | 56 down_write(&ksm->lock); in blk_ksm_hw_enter() 61 up_write(&ksm->lock); in blk_ksm_hw_exit() 98 init_rwsem(&ksm->lock); in blk_ksm_init() 246 down_read(&ksm->lock); in blk_ksm_get_slot_for_key() 248 up_read(&ksm->lock); in blk_ksm_get_slot_for_key() 421 down_write(&ksm->lock); in blk_ksm_reprogram_all_keys() 432 up_write(&ksm->lock); in blk_ksm_reprogram_all_keys() 627 init_rwsem(&ksm->lock); in blk_ksm_init_passthrough()
|
D | mq-deadline-main.c | 102 spinlock_t lock; member 380 lockdep_assert_held(&dd->lock); in __dd_dispatch_request() 500 spin_lock(&dd->lock); in dd_dispatch_request() 522 spin_unlock(&dd->lock); in dd_dispatch_request() 634 spin_lock_init(&dd->lock); in dd_init_sched() 699 spin_lock(&dd->lock); in dd_bio_merge() 701 spin_unlock(&dd->lock); in dd_bio_merge() 724 lockdep_assert_held(&dd->lock); in dd_insert_request() 780 spin_lock(&dd->lock); in dd_insert_requests() 788 spin_unlock(&dd->lock); in dd_insert_requests() [all …]
|
D | blk-cgroup.c | 283 spin_lock(&blkcg->lock); in blkg_create() 297 spin_unlock(&blkcg->lock); in blkg_create() 384 lockdep_assert_held(&blkcg->lock); in blkg_destroy() 432 spin_lock(&blkcg->lock); in blkg_destroy_all() 434 spin_unlock(&blkcg->lock); in blkg_destroy_all() 449 spin_lock_irq(&blkcg->lock); in blkcg_reset_stats() 472 spin_unlock_irq(&blkcg->lock); in blkcg_reset_stats() 1023 spin_lock_irq(&blkcg->lock); in blkcg_destroy_blkgs() 1036 spin_unlock_irq(&blkcg->lock); in blkcg_destroy_blkgs() 1038 spin_lock_irq(&blkcg->lock); in blkcg_destroy_blkgs() [all …]
|
D | blk-mq-sched.c | 293 spin_lock(&hctx->lock); in __blk_mq_sched_dispatch_requests() 296 spin_unlock(&hctx->lock); in __blk_mq_sched_dispatch_requests() 373 spin_lock(&ctx->lock); in __blk_mq_sched_bio_merge() 384 spin_unlock(&ctx->lock); in __blk_mq_sched_bio_merge() 472 spin_lock(&ctx->lock); in blk_mq_sched_insert_request() 474 spin_unlock(&ctx->lock); in blk_mq_sched_insert_request()
|
D | genhd.c | 652 int (*lock)(dev_t, void *), void *data) in blk_register_region() 654 kobj_map(bdev_map, devt, range, module, probe, lock, data); in blk_register_region() 1890 spinlock_t lock; member 1965 spin_lock_irqsave(&ev->lock, flags); in disk_block_events() 1967 spin_unlock_irqrestore(&ev->lock, flags); in disk_block_events() 1981 spin_lock_irqsave(&ev->lock, flags); in __disk_unblock_events() 1997 spin_unlock_irqrestore(&ev->lock, flags); in __disk_unblock_events() 2035 spin_lock_irq(&ev->lock); in disk_flush_events() 2040 spin_unlock_irq(&ev->lock); in disk_flush_events() 2070 spin_lock_irq(&ev->lock); in disk_clear_events() [all …]
|
D | blk-iolatency.c | 108 spinlock_t lock; member 558 spin_lock_irqsave(&lat_info->lock, flags); in iolatency_check_latencies() 590 spin_unlock_irqrestore(&lat_info->lock, flags); in iolatency_check_latencies() 693 spin_lock_irqsave(&lat_info->lock, flags); in blkiolatency_timer_fn() 715 spin_unlock_irqrestore(&lat_info->lock, flags); in blkiolatency_timer_fn() 821 spin_lock(&lat_info->lock); in iolatency_clear_scaling() 826 spin_unlock(&lat_info->lock); in iolatency_clear_scaling() 994 spin_lock_init(&iolat->child_lat.lock); in iolatency_pd_init()
|
D | blk-mq.c | 1035 spin_lock(&ctx->lock); in flush_busy_ctx() 1038 spin_unlock(&ctx->lock); in flush_busy_ctx() 1070 spin_lock(&ctx->lock); in dispatch_rq_from_ctx() 1077 spin_unlock(&ctx->lock); in dispatch_rq_from_ctx() 1200 spin_lock_irq(&wq->lock); in blk_mq_mark_tag_wait() 1204 spin_unlock_irq(&wq->lock); in blk_mq_mark_tag_wait() 1236 spin_unlock_irq(&wq->lock); in blk_mq_mark_tag_wait() 1247 spin_unlock_irq(&wq->lock); in blk_mq_mark_tag_wait() 1459 spin_lock(&hctx->lock); in blk_mq_dispatch_rq_list() 1461 spin_unlock(&hctx->lock); in blk_mq_dispatch_rq_list() [all …]
|
D | blk-mq-debugfs.c | 365 __acquires(&hctx->lock) in hctx_dispatch_start() 369 spin_lock(&hctx->lock); in hctx_dispatch_start() 381 __releases(&hctx->lock) in hctx_dispatch_stop() 385 spin_unlock(&hctx->lock); in hctx_dispatch_stop() 633 __acquires(&ctx->lock) \ 637 spin_lock(&ctx->lock); \ 650 __releases(&ctx->lock) \ 654 spin_unlock(&ctx->lock); \
|
D | bsg.c | 32 spinlock_t lock; member 207 spin_lock_init(&bd->lock); in bsg_alloc_device() 351 spin_lock_irq(&bd->lock); in bsg_set_command_q() 353 spin_unlock_irq(&bd->lock); in bsg_set_command_q()
|
D | bfq-iosched.c | 430 lockdep_assert_held(&bfqd->lock); in bfq_schedule_dispatch() 2236 spin_lock_irq(&bfqd->lock); in bfq_bio_merge() 2255 spin_unlock_irq(&bfqd->lock); in bfq_bio_merge() 2399 spin_lock_irq(&bfqd->lock); in bfq_end_wr() 2407 spin_unlock_irq(&bfqd->lock); in bfq_end_wr() 4832 spin_lock_irq(&bfqd->lock); in bfq_dispatch_request() 4843 spin_unlock_irq(&bfqd->lock); in bfq_dispatch_request() 4982 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq_bfqq() 4985 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq_bfqq() 5532 spin_lock_irq(&bfqd->lock); in bfq_insert_request() [all …]
|
D | blk-mq-tag.c | 210 spin_lock_irqsave(&tags->lock, flags); in blk_mq_find_and_get_req() 214 spin_unlock_irqrestore(&tags->lock, flags); in blk_mq_find_and_get_req() 558 spin_lock_init(&tags->lock); in blk_mq_init_tags()
|
D | blk-mq-tag.h | 28 spinlock_t lock; member
|
D | bfq-cgroup.c | 916 spin_lock_irqsave(&bfqd->lock, flags); in bfq_pd_offline() 962 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_pd_offline() 1065 spin_lock_irq(&blkcg->lock); in bfq_io_set_weight_legacy() 1073 spin_unlock_irq(&blkcg->lock); in bfq_io_set_weight_legacy()
|
D | blk-mq.h | 20 spinlock_t lock; member
|
D | bfq-iosched.h | 726 spinlock_t lock; member
|
D | Kconfig | 194 Enabling this option enables users to setup/unlock/lock
|