• Home
  • Raw
  • Download

Lines Matching refs:ret

584 		int ret;  in blk_mq_alloc_request()  local
586 ret = blk_queue_enter(q, flags); in blk_mq_alloc_request()
587 if (ret) in blk_mq_alloc_request()
588 return ERR_PTR(ret); in blk_mq_alloc_request()
617 int ret; in blk_mq_alloc_request_hctx() local
636 ret = blk_queue_enter(q, flags); in blk_mq_alloc_request_hctx()
637 if (ret) in blk_mq_alloc_request_hctx()
638 return ERR_PTR(ret); in blk_mq_alloc_request_hctx()
644 ret = -EXDEV; in blk_mq_alloc_request_hctx()
661 ret = -EWOULDBLOCK; in blk_mq_alloc_request_hctx()
674 return ERR_PTR(ret); in blk_mq_alloc_request_hctx()
1331 blk_status_t ret; member
1334 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) in blk_end_sync_rq() argument
1338 wait->ret = ret; in blk_end_sync_rq()
1403 return wait.ret; in blk_execute_rq()
1551 enum blk_eh_timer_return ret; in blk_mq_rq_timed_out() local
1553 ret = req->q->mq_ops->timeout(req); in blk_mq_rq_timed_out()
1554 if (ret == BLK_EH_DONE) in blk_mq_rq_timed_out()
1556 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); in blk_mq_rq_timed_out()
1826 bool ret; in blk_mq_mark_tag_wait() local
1886 ret = blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1887 if (!ret) { in blk_mq_mark_tag_wait()
2030 blk_status_t ret = BLK_STS_OK; in blk_mq_dispatch_rq_list() local
2072 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
2073 switch (ret) { in blk_mq_dispatch_rq_list()
2094 blk_mq_end_request(rq, ret); in blk_mq_dispatch_rq_list()
2105 ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued) in blk_mq_dispatch_rq_list()
2604 blk_status_t ret; in __blk_mq_issue_directly() local
2611 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
2612 switch (ret) { in __blk_mq_issue_directly()
2626 return ret; in __blk_mq_issue_directly()
2687 blk_status_t ret = in blk_mq_try_issue_directly() local
2690 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) in blk_mq_try_issue_directly()
2692 else if (ret != BLK_STS_OK) in blk_mq_try_issue_directly()
2693 blk_mq_end_request(rq, ret); in blk_mq_try_issue_directly()
2710 blk_status_t ret; in blk_mq_plug_issue_direct() local
2718 ret = blk_mq_request_issue_directly(rq, last); in blk_mq_plug_issue_direct()
2719 switch (ret) { in blk_mq_plug_issue_direct()
2729 blk_mq_end_request(rq, ret); in blk_mq_plug_issue_direct()
2836 blk_status_t ret; in blk_mq_try_issue_list_directly() local
2841 ret = blk_mq_request_issue_directly(rq, list_empty(list)); in blk_mq_try_issue_list_directly()
2842 if (ret != BLK_STS_OK) { in blk_mq_try_issue_list_directly()
2844 if (ret == BLK_STS_RESOURCE || in blk_mq_try_issue_list_directly()
2845 ret == BLK_STS_DEV_RESOURCE) { in blk_mq_try_issue_list_directly()
2850 blk_mq_end_request(rq, ret); in blk_mq_try_issue_list_directly()
2965 blk_status_t ret; in blk_mq_submit_bio() local
3014 ret = blk_crypto_rq_get_keyslot(rq); in blk_mq_submit_bio()
3015 if (ret != BLK_STS_OK) { in blk_mq_submit_bio()
3016 bio->bi_status = ret; in blk_mq_submit_bio()
3047 blk_status_t ret; in blk_insert_cloned_request() local
3093 ret = blk_mq_request_issue_directly(rq, true)); in blk_insert_cloned_request()
3094 if (ret) in blk_insert_cloned_request()
3096 return ret; in blk_insert_cloned_request()
3372 int ret; in blk_mq_init_request() local
3375 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request()
3376 if (ret) in blk_mq_init_request()
3377 return ret; in blk_mq_init_request()
3794 int ret; in blk_mq_alloc_map_and_rqs() local
3800 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); in blk_mq_alloc_map_and_rqs()
3801 if (ret) { in blk_mq_alloc_map_and_rqs()
4077 int ret; in blk_mq_init_queue_data() local
4083 ret = blk_mq_init_allocated_queue(set, q); in blk_mq_init_queue_data()
4084 if (ret) { in blk_mq_init_queue_data()
4086 return ERR_PTR(ret); in blk_mq_init_queue_data()
4464 int i, ret; in blk_mq_alloc_tag_set() local
4512 ret = -ENOMEM; in blk_mq_alloc_tag_set()
4524 ret = blk_mq_alloc_set_map_and_rqs(set); in blk_mq_alloc_tag_set()
4525 if (ret) in blk_mq_alloc_tag_set()
4540 return ret; in blk_mq_alloc_tag_set()
4586 int ret; in blk_mq_update_nr_requests() local
4598 ret = 0; in blk_mq_update_nr_requests()
4607 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, in blk_mq_update_nr_requests()
4610 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, in blk_mq_update_nr_requests()
4613 if (ret) in blk_mq_update_nr_requests()
4618 if (!ret) { in blk_mq_update_nr_requests()
4631 return ret; in blk_mq_update_nr_requests()
4828 unsigned long ret = 0; in blk_mq_poll_nsecs() local
4849 return ret; in blk_mq_poll_nsecs()
4852 ret = (q->poll_stat[bucket].mean + 1) / 2; in blk_mq_poll_nsecs()
4854 return ret; in blk_mq_poll_nsecs()
4928 int ret; in blk_mq_poll_classic() local
4931 ret = q->mq_ops->poll(hctx, iob); in blk_mq_poll_classic()
4932 if (ret > 0) { in blk_mq_poll_classic()
4934 return ret; in blk_mq_poll_classic()
4942 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) in blk_mq_poll_classic()