/block/ |
D | blk-flush.c | 94 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) in blk_flush_policy() argument 98 if (blk_rq_sectors(rq)) in blk_flush_policy() 102 if (rq->cmd_flags & REQ_FLUSH) in blk_flush_policy() 104 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 110 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 112 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 115 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 122 rq->bio = rq->biotail; in blk_flush_restore_request() 125 rq->cmd_flags &= ~REQ_FLUSH_SEQ; in blk_flush_restore_request() 126 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | elevator.c | 50 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument 58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() 62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument 72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok() 75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok() 249 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument 251 hash_del(&rq->hash); in __elv_rqhash_del() 254 static void elv_rqhash_del(struct request_queue *q, struct request *rq) in elv_rqhash_del() argument [all …]
|
D | blk-exec.c | 22 static void blk_end_sync_rq(struct request *rq, int error) in blk_end_sync_rq() argument 24 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq() 26 rq->end_io_data = NULL; in blk_end_sync_rq() 27 __blk_put_request(rq->q, rq); in blk_end_sync_rq() 52 struct request *rq, int at_head, in blk_execute_rq_nowait() argument 60 rq->rq_disk = bd_disk; in blk_execute_rq_nowait() 61 rq->end_io = done; in blk_execute_rq_nowait() 66 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; in blk_execute_rq_nowait() 71 rq->errors = -ENXIO; in blk_execute_rq_nowait() 72 if (rq->end_io) in blk_execute_rq_nowait() [all …]
|
D | blk-core.c | 63 static void drive_stat_acct(struct request *rq, int new_io) in drive_stat_acct() argument 66 int rw = rq_data_dir(rq); in drive_stat_acct() 69 if (!blk_do_io_stat(rq)) in drive_stat_acct() 75 part = rq->part; in drive_stat_acct() 78 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in drive_stat_acct() 88 part = &rq->rq_disk->part0; in drive_stat_acct() 93 rq->part = part; in drive_stat_acct() 134 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 136 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 138 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() [all …]
|
D | blk.h | 25 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 27 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 31 void blk_dequeue_request(struct request *rq); 33 bool __blk_end_bidi_request(struct request *rq, int error, 51 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument 53 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mark_rq_complete() 56 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument 58 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_clear_rq_complete() 64 #define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash) argument 66 void blk_insert_flush(struct request *rq); [all …]
|
D | scsi_ioctl.c | 224 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument 227 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) in blk_fill_sghdr_rq() 229 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) in blk_fill_sghdr_rq() 235 rq->cmd_len = hdr->cmd_len; in blk_fill_sghdr_rq() 236 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_fill_sghdr_rq() 238 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq() 239 if (!rq->timeout) in blk_fill_sghdr_rq() 240 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq() 241 if (!rq->timeout) in blk_fill_sghdr_rq() 242 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq() [all …]
|
D | deadline-iosched.c | 57 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 59 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 66 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 79 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 81 elv_rb_add(root, rq); in deadline_add_rq_rb() 85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 87 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 89 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
D | bsg.c | 83 struct request *rq; member 175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument 180 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); in blk_fill_sgv4_hdr_rq() 181 if (!rq->cmd) in blk_fill_sgv4_hdr_rq() 185 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, in blk_fill_sgv4_hdr_rq() 190 if (blk_verify_command(rq->cmd, has_write_perm)) in blk_fill_sgv4_hdr_rq() 198 rq->cmd_len = hdr->request_len; in blk_fill_sgv4_hdr_rq() 199 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_fill_sgv4_hdr_rq() 201 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sgv4_hdr_rq() 202 if (!rq->timeout) in blk_fill_sgv4_hdr_rq() [all …]
|
D | blk-map.c | 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument 15 if (!rq->bio) in blk_rq_append_bio() 16 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio() 17 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio() 20 rq->biotail->bi_next = bio; in blk_rq_append_bio() 21 rq->biotail = bio; in blk_rq_append_bio() 23 rq->__data_len += bio->bi_size; in blk_rq_append_bio() 42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq, in __blk_rq_map_user() argument 50 reading = rq_data_dir(rq) == READ; in __blk_rq_map_user() 77 ret = blk_rq_append_bio(q, rq, bio); in __blk_rq_map_user() [all …]
|
D | blk-merge.c | 71 void blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument 73 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); in blk_recalc_rq_segments() 160 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument 176 rq_for_each_segment(bvec, rq, iter) { in blk_rq_map_sg() 182 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && in blk_rq_map_sg() 183 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg() 185 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg() 188 rq->extra_len += pad_len; in blk_rq_map_sg() 191 if (q->dma_drain_size && q->dma_drain_needed(rq)) { in blk_rq_map_sg() 192 if (rq->cmd_flags & REQ_WRITE) in blk_rq_map_sg() [all …]
|
D | noop-iosched.c | 15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument 26 struct request *rq; in noop_dispatch() local 27 rq = list_entry(nd->queue.next, struct request, queuelist); in noop_dispatch() 28 list_del_init(&rq->queuelist); in noop_dispatch() 29 elv_dispatch_sort(q, rq); in noop_dispatch() 35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument 39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request() 43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument 47 if (rq->queuelist.prev == &nd->queue) in noop_former_request() 49 return list_entry(rq->queuelist.prev, struct request, queuelist); in noop_former_request() [all …]
|
D | blk-softirq.c | 31 struct request *rq; in blk_done_softirq() local 33 rq = list_entry(local_list.next, struct request, csd.list); in blk_done_softirq() 34 list_del_init(&rq->csd.list); in blk_done_softirq() 35 rq->q->softirq_done_fn(rq); in blk_done_softirq() 42 struct request *rq = data; in trigger_softirq() local 48 list_add_tail(&rq->csd.list, list); in trigger_softirq() 50 if (list->next == &rq->csd.list) in trigger_softirq() 59 static int raise_blk_irq(int cpu, struct request *rq) in raise_blk_irq() argument 62 struct call_single_data *data = &rq->csd; in raise_blk_irq() 65 data->info = rq; in raise_blk_irq() [all …]
|
D | blk-tag.c | 280 void blk_queue_end_tag(struct request_queue *q, struct request *rq) in blk_queue_end_tag() argument 283 unsigned tag = rq->tag; /* negative tags invalid */ in blk_queue_end_tag() 287 list_del_init(&rq->queuelist); in blk_queue_end_tag() 288 rq->cmd_flags &= ~REQ_QUEUED; in blk_queue_end_tag() 289 rq->tag = -1; in blk_queue_end_tag() 328 int blk_queue_start_tag(struct request_queue *q, struct request *rq) in blk_queue_start_tag() argument 334 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { in blk_queue_start_tag() 337 __func__, rq, in blk_queue_start_tag() 338 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); in blk_queue_start_tag() 350 if (!rq_is_sync(rq) && max_depth > 1) { in blk_queue_start_tag() [all …]
|
D | cfq-iosched.c | 57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) argument 58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) argument 59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) argument 2200 static void cfq_del_rq_rb(struct request *rq) in cfq_del_rq_rb() argument 2202 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_del_rq_rb() 2203 const int sync = rq_is_sync(rq); in cfq_del_rq_rb() 2208 elv_rb_del(&cfqq->sort_list, rq); in cfq_del_rq_rb() 2223 static void cfq_add_rq_rb(struct request *rq) in cfq_add_rq_rb() argument 2225 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_add_rq_rb() 2229 cfqq->queued[rq_is_sync(rq)]++; in cfq_add_rq_rb() [all …]
|
D | blk-timeout.c | 114 struct request *rq, *tmp; in blk_rq_timed_out_timer() local 119 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { in blk_rq_timed_out_timer() 120 if (time_after_eq(jiffies, rq->deadline)) { in blk_rq_timed_out_timer() 121 list_del_init(&rq->timeout_list); in blk_rq_timed_out_timer() 126 if (blk_mark_rq_complete(rq)) in blk_rq_timed_out_timer() 128 blk_rq_timed_out(rq); in blk_rq_timed_out_timer() 129 } else if (!next_set || time_after(next, rq->deadline)) { in blk_rq_timed_out_timer() 130 next = rq->deadline; in blk_rq_timed_out_timer()
|
D | blk-cgroup.h | 348 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) in blk_rq_set_rl() argument 350 rq->rl = rl; in blk_rq_set_rl() 359 static inline struct request_list *blk_rq_rl(struct request *rq) in blk_rq_rl() argument 361 return rq->rl; in blk_rq_rl() 558 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } in blk_rq_set_rl() argument 559 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } in blk_rq_rl() argument
|
D | bsg-lib.c | 82 static void bsg_softirq_done(struct request *rq) in bsg_softirq_done() argument 84 struct bsg_job *job = rq->special; in bsg_softirq_done() 86 blk_end_request_all(rq, rq->errors); in bsg_softirq_done()
|