Home
last modified time | relevance | path

Searched refs:req (Results 1 – 12 of 12) sorted by relevance

/block/
Dbsg-lib.c55 struct request *req = job->req; in bsg_job_done() local
56 struct request *rsp = req->next_rq; in bsg_job_done()
59 err = job->req->errors = result; in bsg_job_done()
62 job->req->sense_len = sizeof(u32); in bsg_job_done()
64 job->req->sense_len = job->reply_len; in bsg_job_done()
66 req->resid_len = 0; in bsg_job_done()
74 blk_complete_request(req); in bsg_job_done()
90 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) in bsg_map_buffer() argument
92 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); in bsg_map_buffer()
94 BUG_ON(!req->nr_phys_segments); in bsg_map_buffer()
[all …]
Dblk-merge.c486 struct request *req, in ll_new_hw_segment() argument
491 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
494 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
501 req->nr_phys_segments += nr_phys_segs; in ll_new_hw_segment()
505 req->cmd_flags |= REQ_NOMERGE; in ll_new_hw_segment()
506 if (req == q->last_merge) in ll_new_hw_segment()
511 int ll_back_merge_fn(struct request_queue *q, struct request *req, in ll_back_merge_fn() argument
514 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
516 if (blk_integrity_rq(req) && in ll_back_merge_fn()
517 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
[all …]
Dblk-timeout.c78 void blk_delete_timer(struct request *req) in blk_delete_timer() argument
80 list_del_init(&req->timeout_list); in blk_delete_timer()
83 static void blk_rq_timed_out(struct request *req) in blk_rq_timed_out() argument
85 struct request_queue *q = req->q; in blk_rq_timed_out()
89 ret = q->rq_timed_out_fn(req); in blk_rq_timed_out()
93 __blk_complete_request(req); in blk_rq_timed_out()
96 blk_add_timer(req); in blk_rq_timed_out()
97 blk_clear_rq_complete(req); in blk_rq_timed_out()
160 void blk_abort_request(struct request *req) in blk_abort_request() argument
162 if (blk_mark_rq_complete(req)) in blk_abort_request()
[all …]
Dblk-core.c1465 void __blk_put_request(struct request_queue *q, struct request *req) in __blk_put_request() argument
1471 blk_mq_free_request(req); in __blk_put_request()
1475 blk_pm_put_request(req); in __blk_put_request()
1477 elv_completed_request(q, req); in __blk_put_request()
1480 WARN_ON(req->bio != NULL); in __blk_put_request()
1486 if (req->cmd_flags & REQ_ALLOCED) { in __blk_put_request()
1487 unsigned int flags = req->cmd_flags; in __blk_put_request()
1488 struct request_list *rl = blk_rq_rl(req); in __blk_put_request()
1490 BUG_ON(!list_empty(&req->queuelist)); in __blk_put_request()
1491 BUG_ON(ELV_ON_HASH(req)); in __blk_put_request()
[all …]
Dblk-softirq.c105 void __blk_complete_request(struct request *req) in __blk_complete_request() argument
108 struct request_queue *q = req->q; in __blk_complete_request()
120 if (req->cpu != -1) { in __blk_complete_request()
121 ccpu = req->cpu; in __blk_complete_request()
139 list_add_tail(&req->ipi_list, list); in __blk_complete_request()
147 if (list->next == &req->ipi_list) in __blk_complete_request()
149 } else if (raise_blk_irq(ccpu, req)) in __blk_complete_request()
166 void blk_complete_request(struct request *req) in blk_complete_request() argument
168 if (unlikely(blk_should_fake_timeout(req->q))) in blk_complete_request()
170 if (!blk_mark_rq_complete(req)) in blk_complete_request()
[all …]
Dblk-integrity.c186 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, in blk_integrity_merge_rq() argument
189 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0) in blk_integrity_merge_rq()
192 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0) in blk_integrity_merge_rq()
195 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq()
199 if (req->nr_integrity_segments + next->nr_integrity_segments > in blk_integrity_merge_rq()
203 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq()
210 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, in blk_integrity_merge_bio() argument
216 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
219 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
222 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) in blk_integrity_merge_bio()
[all …]
Dblk.h64 void init_request_from_bio(struct request *req, struct bio *bio);
98 void blk_add_timer(struct request *req);
102 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
104 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
111 void blk_account_io_start(struct request *req, bool new_io);
112 void blk_account_io_completion(struct request *req, unsigned int bytes);
113 void blk_account_io_done(struct request *req);
209 int ll_back_merge_fn(struct request_queue *q, struct request *req,
211 int ll_front_merge_fn(struct request_queue *q, struct request *req,
Ddeadline-iosched.c125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument
150 *req = __rq; in deadline_merge()
155 struct request *req, int type) in deadline_merged_request() argument
163 elv_rb_del(deadline_rb_root(dd, req), req); in deadline_merged_request()
164 deadline_add_rq_rb(dd, req); in deadline_merged_request()
169 deadline_merged_requests(struct request_queue *q, struct request *req, in deadline_merged_requests() argument
176 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { in deadline_merged_requests()
177 if (time_before(next->fifo_time, req->fifo_time)) { in deadline_merged_requests()
178 list_move(&req->queuelist, &next->queuelist); in deadline_merged_requests()
179 req->fifo_time = next->fifo_time; in deadline_merged_requests()
Dblk-mq.h61 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
Delevator.c411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) in elv_merge() argument
432 *req = q->last_merge; in elv_merge()
445 *req = __rq; in elv_merge()
450 return e->type->ops.elevator_merge_fn(q, req, bio); in elv_merge()
Dblk-mq.c558 void blk_mq_rq_timed_out(struct request *req, bool reserved) in blk_mq_rq_timed_out() argument
560 struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out()
572 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) in blk_mq_rq_timed_out()
576 ret = ops->timeout(req, reserved); in blk_mq_rq_timed_out()
580 __blk_mq_complete_request(req); in blk_mq_rq_timed_out()
583 blk_add_timer(req); in blk_mq_rq_timed_out()
584 blk_clear_rq_complete(req); in blk_mq_rq_timed_out()
Dcfq-iosched.c2515 static int cfq_merge(struct request_queue *q, struct request **req, in cfq_merge() argument
2523 *req = __rq; in cfq_merge()
2530 static void cfq_merged_request(struct request_queue *q, struct request *req, in cfq_merged_request() argument
2534 struct cfq_queue *cfqq = RQ_CFQQ(req); in cfq_merged_request()
2536 cfq_reposition_rq_rb(cfqq, req); in cfq_merged_request()
2540 static void cfq_bio_merged(struct request_queue *q, struct request *req, in cfq_bio_merged() argument
2543 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); in cfq_bio_merged()