/block/ |
D | blk-exec.c | 74 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) in blk_rq_poll_completion() argument 79 } while (!completion_done(wait)); in blk_rq_poll_completion() 95 DECLARE_COMPLETION_ONSTACK(wait); in blk_execute_rq() 98 rq->end_io_data = &wait; in blk_execute_rq() 105 blk_rq_poll_completion(rq, &wait); in blk_execute_rq() 107 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); in blk_execute_rq() 109 wait_for_completion_io(&wait); in blk_execute_rq()
|
D | kyber-iosched.c | 195 static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 496 init_waitqueue_func_entry(&khd->domain_wait[i].wait, in kyber_init_hctx() 498 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx() 499 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); in kyber_init_hctx() 697 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait); in kyber_domain_wake() local 699 sbitmap_del_wait_queue(wait); in kyber_domain_wake() 710 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; in kyber_get_domain_token() local 721 if (nr < 0 && list_empty_careful(&wait->wait.entry)) { in kyber_get_domain_token() 725 sbitmap_add_wait_queue(domain_tokens, ws, wait); in kyber_get_domain_token() 741 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) { in kyber_get_domain_token() [all …]
|
D | blk-crypto-fallback.c | 190 struct crypto_wait *wait) in blk_crypto_fallback_alloc_cipher_req() argument 205 crypto_req_done, wait); in blk_crypto_fallback_alloc_cipher_req() 269 DECLARE_CRYPTO_WAIT(wait); in blk_crypto_fallback_encrypt_bio() 304 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { in blk_crypto_fallback_encrypt_bio() 339 &wait)) { in blk_crypto_fallback_encrypt_bio() 385 DECLARE_CRYPTO_WAIT(wait); in blk_crypto_fallback_decrypt_bio() 407 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { in blk_crypto_fallback_decrypt_bio() 427 &wait)) { in blk_crypto_fallback_decrypt_bio()
|
D | blk-rq-qos.c | 265 has_sleeper = wq_has_sleeper(&rqw->wait); in rq_qos_wait() 269 has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq, in rq_qos_wait() 276 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait() 292 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait()
|
D | blk-throttle.c | 902 u32 iops_limit, unsigned long *wait) in tg_with_in_iops_limit() argument 910 if (wait) in tg_with_in_iops_limit() 911 *wait = 0; in tg_with_in_iops_limit() 936 if (wait) in tg_with_in_iops_limit() 937 *wait = 0; in tg_with_in_iops_limit() 944 if (wait) in tg_with_in_iops_limit() 945 *wait = jiffy_wait; in tg_with_in_iops_limit() 950 u64 bps_limit, unsigned long *wait) in tg_with_in_bps_limit() argument 958 if (wait) in tg_with_in_bps_limit() 959 *wait = 0; in tg_with_in_bps_limit() [all …]
|
D | blk-iocost.c | 568 struct wait_queue_entry wait; member 1449 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); in iocg_wake_fn() local 1451 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); in iocg_wake_fn() 1458 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); in iocg_wake_fn() 1459 wait->committed = true; in iocg_wake_fn() 2590 struct iocg_wait wait; in ioc_rqos_throttle() local 2697 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); in ioc_rqos_throttle() 2698 wait.wait.private = current; in ioc_rqos_throttle() 2699 wait.bio = bio; in ioc_rqos_throttle() 2700 wait.abs_cost = abs_cost; in ioc_rqos_throttle() [all …]
|
D | blk-mq-tag.c | 91 DEFINE_SBQ_WAIT(wait); in blk_mq_get_tag() 133 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); in blk_mq_get_tag() 142 sbitmap_finish_wait(bt, ws, &wait); in blk_mq_get_tag() 164 sbitmap_finish_wait(bt, ws, &wait); in blk_mq_get_tag()
|
D | blk-rq-qos.h | 24 wait_queue_head_t wait; member 86 init_waitqueue_head(&rq_wait->wait); in rq_wait_init()
|
D | bdev.c | 587 DEFINE_WAIT(wait); in bd_prepare_to_claim() 589 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); in bd_prepare_to_claim() 592 finish_wait(wq, &wait); in bd_prepare_to_claim() 1020 void sync_bdevs(bool wait) in sync_bdevs() argument 1053 } else if (wait) { in sync_bdevs()
|
D | blk-wbt.c | 123 if (wq_has_sleeper(&rqw->wait)) in rwb_wake_all() 124 wake_up_all(&rqw->wait); in rwb_wake_all() 162 if (wq_has_sleeper(&rqw->wait)) { in wbt_rqw_done() 166 wake_up_all(&rqw->wait); in wbt_rqw_done()
|
D | blk-iolatency.c | 276 wake_up(&rqw->wait); in iolat_cleanup_cb() 387 wake_up_all(&iolat->rq_wait.wait); in scale_change() 458 wake_up_all(&iolat->rq_wait.wait); in check_scale_change() 640 wake_up(&rqw->wait); in blkcg_iolatency_done_bio()
|
D | blk-mq.c | 1115 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, in blk_mq_dispatch_wake() argument 1120 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); in blk_mq_dispatch_wake() 1123 if (!list_empty(&wait->entry)) { in blk_mq_dispatch_wake() 1126 list_del_init(&wait->entry); in blk_mq_dispatch_wake() 1147 wait_queue_entry_t *wait; in blk_mq_mark_tag_wait() local 1164 wait = &hctx->dispatch_wait; in blk_mq_mark_tag_wait() 1165 if (!list_empty_careful(&wait->entry)) in blk_mq_mark_tag_wait() 1168 wq = &bt_wait_ptr(sbq, hctx)->wait; in blk_mq_mark_tag_wait() 1172 if (!list_empty(&wait->entry)) { in blk_mq_mark_tag_wait() 1179 wait->flags &= ~WQ_FLAG_EXCLUSIVE; in blk_mq_mark_tag_wait() [all …]
|
D | fops.c | 144 static int blkdev_iopoll(struct kiocb *kiocb, bool wait) in blkdev_iopoll() argument 149 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); in blkdev_iopoll()
|