/block/ |
D | blk-flush.c | 98 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) in blk_flush_policy() argument 102 if (blk_rq_sectors(rq)) in blk_flush_policy() 106 if (rq->cmd_flags & REQ_FLUSH) in blk_flush_policy() 108 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 114 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 116 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 119 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 126 rq->bio = rq->biotail; in blk_flush_restore_request() 129 rq->cmd_flags &= ~REQ_FLUSH_SEQ; in blk_flush_restore_request() 130 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | elevator.c | 50 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument 58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() 62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument 72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok() 75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok() 245 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument 247 hash_del(&rq->hash); in __elv_rqhash_del() 248 rq->cmd_flags &= ~REQ_HASHED; in __elv_rqhash_del() [all …]
|
D | blk-exec.c | 23 static void blk_end_sync_rq(struct request *rq, int error) in blk_end_sync_rq() argument 25 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq() 27 rq->end_io_data = NULL; in blk_end_sync_rq() 52 struct request *rq, int at_head, in blk_execute_rq_nowait() argument 58 WARN_ON(rq->cmd_type == REQ_TYPE_FS); in blk_execute_rq_nowait() 60 rq->rq_disk = bd_disk; in blk_execute_rq_nowait() 61 rq->end_io = done; in blk_execute_rq_nowait() 68 blk_mq_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait() 75 rq->cmd_flags |= REQ_QUIET; in blk_execute_rq_nowait() 76 rq->errors = -ENXIO; in blk_execute_rq_nowait() [all …]
|
D | blk-mq.c | 162 struct request *rq, unsigned int rw_flags) in blk_mq_rq_ctx_init() argument 167 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init() 169 rq->q = q; in blk_mq_rq_ctx_init() 170 rq->mq_ctx = ctx; in blk_mq_rq_ctx_init() 171 rq->cmd_flags |= rw_flags; in blk_mq_rq_ctx_init() 173 rq->cpu = -1; in blk_mq_rq_ctx_init() 174 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init() 175 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init() 176 rq->rq_disk = NULL; in blk_mq_rq_ctx_init() 177 rq->part = NULL; in blk_mq_rq_ctx_init() [all …]
|
D | scsi_ioctl.c | 230 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument 233 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) in blk_fill_sghdr_rq() 235 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) in blk_fill_sghdr_rq() 241 rq->cmd_len = hdr->cmd_len; in blk_fill_sghdr_rq() 243 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq() 244 if (!rq->timeout) in blk_fill_sghdr_rq() 245 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq() 246 if (!rq->timeout) in blk_fill_sghdr_rq() 247 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq() 248 if (rq->timeout < BLK_MIN_SG_TIMEOUT) in blk_fill_sghdr_rq() [all …]
|
D | blk-core.c | 124 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 126 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 128 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() 129 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init() 130 rq->cpu = -1; in blk_rq_init() 131 rq->q = q; in blk_rq_init() 132 rq->__sector = (sector_t) -1; in blk_rq_init() 133 INIT_HLIST_NODE(&rq->hash); in blk_rq_init() 134 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init() 135 rq->cmd = rq->__cmd; in blk_rq_init() [all …]
|
D | deadline-iosched.c | 57 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 59 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 66 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 79 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 81 elv_rb_add(root, rq); in deadline_add_rq_rb() 85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 87 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 89 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
D | bsg.c | 83 struct request *rq; member 139 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument 144 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); in blk_fill_sgv4_hdr_rq() 145 if (!rq->cmd) in blk_fill_sgv4_hdr_rq() 149 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, in blk_fill_sgv4_hdr_rq() 154 if (blk_verify_command(rq->cmd, has_write_perm)) in blk_fill_sgv4_hdr_rq() 162 rq->cmd_len = hdr->request_len; in blk_fill_sgv4_hdr_rq() 164 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sgv4_hdr_rq() 165 if (!rq->timeout) in blk_fill_sgv4_hdr_rq() 166 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq() [all …]
|
D | blk.h | 65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 67 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 71 void blk_dequeue_request(struct request *rq); 73 bool __blk_end_bidi_request(struct request *rq, int error, 127 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument 129 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mark_rq_complete() 132 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument 134 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_clear_rq_complete() 140 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) argument 142 void blk_insert_flush(struct request *rq); [all …]
|
D | noop-iosched.c | 15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument 24 struct request *rq; in noop_dispatch() local 26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); in noop_dispatch() 27 if (rq) { in noop_dispatch() 28 list_del_init(&rq->queuelist); in noop_dispatch() 29 elv_dispatch_sort(q, rq); in noop_dispatch() 35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument 39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request() 43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument 47 if (rq->queuelist.prev == &nd->queue) in noop_former_request() [all …]
|
D | blk-merge.c | 288 void blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument 291 &rq->q->queue_flags); in blk_recalc_rq_segments() 293 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments() 440 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument 446 if (rq->bio) in blk_rq_map_sg() 447 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg() 449 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && in blk_rq_map_sg() 450 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg() 452 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg() 455 rq->extra_len += pad_len; in blk_rq_map_sg() [all …]
|
D | blk-map.c | 30 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument 33 if (!rq->bio) in blk_rq_append_bio() 34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio() 35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio() 38 rq->biotail->bi_next = bio; in blk_rq_append_bio() 39 rq->biotail = bio; in blk_rq_append_bio() 41 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio() 81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument 137 rq->cmd_flags |= REQ_COPY_USER; in blk_rq_map_user_iov() 141 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov() [all …]
|
D | blk-softirq.c | 31 struct request *rq; in blk_done_softirq() local 33 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq() 34 list_del_init(&rq->ipi_list); in blk_done_softirq() 35 rq->q->softirq_done_fn(rq); in blk_done_softirq() 42 struct request *rq = data; in trigger_softirq() local 48 list_add_tail(&rq->ipi_list, list); in trigger_softirq() 50 if (list->next == &rq->ipi_list) in trigger_softirq() 59 static int raise_blk_irq(int cpu, struct request *rq) in raise_blk_irq() argument 62 struct call_single_data *data = &rq->csd; in raise_blk_irq() 65 data->info = rq; in raise_blk_irq() [all …]
|
D | blk-tag.c | 265 void blk_queue_end_tag(struct request_queue *q, struct request *rq) in blk_queue_end_tag() argument 268 unsigned tag = rq->tag; /* negative tags invalid */ in blk_queue_end_tag() 272 list_del_init(&rq->queuelist); in blk_queue_end_tag() 273 rq->cmd_flags &= ~REQ_QUEUED; in blk_queue_end_tag() 274 rq->tag = -1; in blk_queue_end_tag() 313 int blk_queue_start_tag(struct request_queue *q, struct request *rq) in blk_queue_start_tag() argument 319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { in blk_queue_start_tag() 322 __func__, rq, in blk_queue_start_tag() 323 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); in blk_queue_start_tag() 335 if (!rq_is_sync(rq) && max_depth > 1) { in blk_queue_start_tag() [all …]
|
D | blk-timeout.c | 113 static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, in blk_rq_check_expired() argument 116 if (time_after_eq(jiffies, rq->deadline)) { in blk_rq_check_expired() 117 list_del_init(&rq->timeout_list); in blk_rq_check_expired() 122 if (!blk_mark_rq_complete(rq)) in blk_rq_check_expired() 123 blk_rq_timed_out(rq); in blk_rq_check_expired() 124 } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { in blk_rq_check_expired() 125 *next_timeout = rq->deadline; in blk_rq_check_expired() 134 struct request *rq, *tmp; in blk_rq_timed_out_timer() local 141 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) in blk_rq_timed_out_timer() 142 blk_rq_check_expired(rq, &next, &next_set); in blk_rq_timed_out_timer()
|
D | cfq-iosched.c | 57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) argument 58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) argument 59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) argument 2397 static void cfq_del_rq_rb(struct request *rq) in cfq_del_rq_rb() argument 2399 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_del_rq_rb() 2400 const int sync = rq_is_sync(rq); in cfq_del_rq_rb() 2405 elv_rb_del(&cfqq->sort_list, rq); in cfq_del_rq_rb() 2420 static void cfq_add_rq_rb(struct request *rq) in cfq_add_rq_rb() argument 2422 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_add_rq_rb() 2426 cfqq->queued[rq_is_sync(rq)]++; in cfq_add_rq_rb() [all …]
|
D | blk-mq-tag.c | 427 struct request *rq; in bt_for_each() local 436 rq = hctx->tags->rqs[off + bit]; in bt_for_each() 437 if (rq->q == hctx->queue) in bt_for_each() 438 fn(hctx, rq, data, reserved); in bt_for_each() 449 struct request *rq; in bt_tags_for_each() local 460 rq = tags->rqs[off + bit]; in bt_tags_for_each() 461 fn(rq, data, reserved); in bt_tags_for_each() 691 u32 blk_mq_unique_tag(struct request *rq) in blk_mq_unique_tag() argument 693 struct request_queue *q = rq->q; in blk_mq_unique_tag() 698 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_unique_tag() [all …]
|
D | bsg-lib.c | 82 static void bsg_softirq_done(struct request *rq) in bsg_softirq_done() argument 84 struct bsg_job *job = rq->special; in bsg_softirq_done() 86 blk_end_request_all(rq, rq->errors); in bsg_softirq_done()
|
D | blk-mq-tag.h | 101 unsigned int tag, struct request *rq) in blk_mq_tag_set_rq() argument 103 hctx->tags->rqs[tag] = rq; in blk_mq_tag_set_rq()
|
D | blk-mq-sysfs.c | 144 struct request *rq; in sysfs_list_show() local 147 list_for_each_entry(rq, list, queuelist) { in sysfs_list_show() 148 const int rq_len = 2 * sizeof(rq) + 2; in sysfs_list_show() 160 "\t%p\n", rq); in sysfs_list_show()
|