/block/ |
D | blk-sysfs.c | 44 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 46 return queue_var_show(q->nr_requests, (page)); in queue_requests_show() 50 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 55 if (!q->request_fn && !q->mq_ops) in queue_requests_store() 65 if (q->request_fn) in queue_requests_store() 66 err = blk_update_nr_requests(q, nr); in queue_requests_store() 68 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store() 76 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 78 unsigned long ra_kb = q->backing_dev_info.ra_pages << in queue_ra_show() 85 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument [all …]
|
D | blk-core.c | 77 if (rl == &rl->q->root_rl) in blk_clear_congested() 78 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_clear_congested() 88 if (rl == &rl->q->root_rl) in blk_set_congested() 89 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); in blk_set_congested() 93 void blk_queue_congestion_threshold(struct request_queue *q) in blk_queue_congestion_threshold() argument 97 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold() 98 if (nr > q->nr_requests) in blk_queue_congestion_threshold() 99 nr = q->nr_requests; in blk_queue_congestion_threshold() 100 q->nr_congestion_on = nr; in blk_queue_congestion_threshold() 102 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold() [all …]
|
D | elevator.c | 58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() local 59 struct elevator_queue *e = q->elevator; in elv_iosched_allow_merge() 62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 153 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument 158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc() 180 int elevator_init(struct request_queue *q, char *name) in elevator_init() argument 189 lockdep_assert_held(&q->sysfs_lock); in elevator_init() 191 if (unlikely(q->elevator)) in elevator_init() 194 INIT_LIST_HEAD(&q->queue_head); in elevator_init() 195 q->last_merge = NULL; in elevator_init() [all …]
|
D | blk.h | 40 struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument 44 if (!q->mq_ops) in blk_get_flush_queue() 45 return q->fq; in blk_get_flush_queue() 47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue() 52 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument 54 kobject_get(&q->kobj); in __blk_get_queue() 57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 59 void blk_free_flush_queue(struct blk_flush_queue *q); 61 int blk_init_rl(struct request_list *rl, struct request_queue *q, 65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, [all …]
|
D | blk-settings.c | 33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) in blk_queue_prep_rq() argument 35 q->prep_rq_fn = pfn; in blk_queue_prep_rq() 50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) in blk_queue_unprep_rq() argument 52 q->unprep_rq_fn = ufn; in blk_queue_unprep_rq() 56 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) in blk_queue_softirq_done() argument 58 q->softirq_done_fn = fn; in blk_queue_softirq_done() 62 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument 64 q->rq_timeout = timeout; in blk_queue_rq_timeout() 68 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) in blk_queue_rq_timed_out() argument 70 q->rq_timed_out_fn = fn; in blk_queue_rq_timed_out() [all …]
|
D | blk-merge.c | 12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument 25 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split() 27 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_bio_discard_split() 44 alignment = (q->limits.discard_alignment >> 9) % granularity; in blk_bio_discard_split() 55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() argument 62 if (!q->limits.max_write_same_sectors) in blk_bio_write_same_split() 65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split() 68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split() 71 static inline unsigned get_max_io_size(struct request_queue *q, in get_max_io_size() argument 74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size() [all …]
|
D | blk-mq.c | 81 void blk_mq_freeze_queue_start(struct request_queue *q) in blk_mq_freeze_queue_start() argument 85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); in blk_mq_freeze_queue_start() 87 percpu_ref_kill(&q->q_usage_counter); in blk_mq_freeze_queue_start() 88 blk_mq_run_hw_queues(q, false); in blk_mq_freeze_queue_start() 93 static void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument 95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); in blk_mq_freeze_queue_wait() 102 void blk_freeze_queue(struct request_queue *q) in blk_freeze_queue() argument 111 blk_mq_freeze_queue_start(q); in blk_freeze_queue() 112 blk_mq_freeze_queue_wait(q); in blk_freeze_queue() 115 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument [all …]
|
D | blk-timeout.c | 22 int blk_should_fake_timeout(struct request_queue *q) in blk_should_fake_timeout() argument 24 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) in blk_should_fake_timeout() 56 struct request_queue *q = disk->queue; in part_timeout_store() local 60 spin_lock_irq(q->queue_lock); in part_timeout_store() 62 queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store() 64 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store() 65 spin_unlock_irq(q->queue_lock); in part_timeout_store() 85 struct request_queue *q = req->q; in blk_rq_timed_out() local 88 if (q->rq_timed_out_fn) in blk_rq_timed_out() 89 ret = q->rq_timed_out_fn(req); in blk_rq_timed_out() [all …]
|
D | blk-flush.c | 95 static bool blk_kick_flush(struct request_queue *q, 135 if (rq->q->mq_ops) { in blk_flush_queue_rq() 136 struct request_queue *q = rq->q; in blk_flush_queue_rq() local 139 blk_mq_kick_requeue_list(q); in blk_flush_queue_rq() 143 list_add(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq() 145 list_add_tail(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq() 170 struct request_queue *q = rq->q; in blk_flush_complete_seq() local 206 if (q->mq_ops) in blk_flush_complete_seq() 216 kicked = blk_kick_flush(q, fq); in blk_flush_complete_seq() 222 struct request_queue *q = flush_rq->q; in flush_end_io() local [all …]
|
D | blk-tag.c | 23 struct request *blk_queue_find_tag(struct request_queue *q, int tag) in blk_queue_find_tag() argument 25 return blk_map_queue_find_tag(q->queue_tags, tag); in blk_queue_find_tag() 61 void __blk_queue_free_tags(struct request_queue *q) in __blk_queue_free_tags() argument 63 struct blk_queue_tag *bqt = q->queue_tags; in __blk_queue_free_tags() 70 q->queue_tags = NULL; in __blk_queue_free_tags() 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in __blk_queue_free_tags() 82 void blk_queue_free_tags(struct request_queue *q) in blk_queue_free_tags() argument 84 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in blk_queue_free_tags() 89 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) in init_tag_map() argument 95 if (q && depth > q->nr_requests * 2) { in init_tag_map() [all …]
|
D | blk-ioc.c | 41 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq() 56 struct request_queue *q = icq->q; in ioc_destroy_icq() local 57 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq() 60 lockdep_assert_held(q->queue_lock); in ioc_destroy_icq() 62 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq() 105 struct request_queue *q = icq->q; in ioc_release_fn() local 107 if (spin_trylock(q->queue_lock)) { in ioc_release_fn() 109 spin_unlock(q->queue_lock); in ioc_release_fn() 185 if (spin_trylock(icq->q->queue_lock)) { in put_io_context_active() 187 spin_unlock(icq->q->queue_lock); in put_io_context_active() [all …]
|
D | blk-cgroup.c | 52 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument 55 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled() 91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument 98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 106 blkg->q = q; in blkg_alloc() 113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc() 122 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc() 126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc() 143 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument 153 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath() [all …]
|
D | blk-map.c | 12 static bool iovec_gap_to_prv(struct request_queue *q, in iovec_gap_to_prv() argument 17 if (!queue_virt_boundary(q)) in iovec_gap_to_prv() 26 return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || in iovec_gap_to_prv() 27 prev_end & queue_virt_boundary(q)); in iovec_gap_to_prv() 30 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument 34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio() 35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio() 81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument 105 if ((uaddr & queue_dma_alignment(q)) || in blk_rq_map_user_iov() 106 iovec_gap_to_prv(q, &prv, &iov)) in blk_rq_map_user_iov() [all …]
|
D | bsg-lib.c | 100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer() 113 struct request_queue *q = req->q; in bsg_create_job() local 119 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); in bsg_create_job() 125 if (q->bsg_job_size) in bsg_create_job() 163 void bsg_request_fn(struct request_queue *q) in bsg_request_fn() argument 165 struct device *dev = q->queuedata; in bsg_request_fn() 174 req = blk_fetch_request(q); in bsg_request_fn() 177 spin_unlock_irq(q->queue_lock); in bsg_request_fn() 183 spin_lock_irq(q->queue_lock); in bsg_request_fn() 188 ret = q->bsg_job_fn(job); in bsg_request_fn() [all …]
|
D | blk-mq-sysfs.c | 37 struct request_queue *q; in blk_mq_sysfs_show() local 42 q = ctx->queue; in blk_mq_sysfs_show() 48 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_show() 49 if (!blk_queue_dying(q)) in blk_mq_sysfs_show() 51 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_show() 60 struct request_queue *q; in blk_mq_sysfs_store() local 65 q = ctx->queue; in blk_mq_sysfs_store() 71 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_store() 72 if (!blk_queue_dying(q)) in blk_mq_sysfs_store() 74 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_store() [all …]
|
D | noop-iosched.c | 15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument 21 static int noop_dispatch(struct request_queue *q, int force) in noop_dispatch() argument 23 struct noop_data *nd = q->elevator->elevator_data; in noop_dispatch() 29 elv_dispatch_sort(q, rq); in noop_dispatch() 35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument 37 struct noop_data *nd = q->elevator->elevator_data; in noop_add_request() 43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument 45 struct noop_data *nd = q->elevator->elevator_data; in noop_former_request() 53 noop_latter_request(struct request_queue *q, struct request *rq) in noop_latter_request() argument 55 struct noop_data *nd = q->elevator->elevator_data; in noop_latter_request() [all …]
|
D | blk-mq.h | 29 void blk_mq_freeze_queue(struct request_queue *q); 30 void blk_mq_free_queue(struct request_queue *q); 31 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32 void blk_mq_wake_waiters(struct request_queue *q); 58 extern int blk_mq_sysfs_register(struct request_queue *q); 59 extern void blk_mq_sysfs_unregister(struct request_queue *q); 63 void blk_mq_release(struct request_queue *q); 74 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, in __blk_mq_get_ctx() argument 77 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx() 86 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) in blk_mq_get_ctx() argument [all …]
|
D | scsi_ioctl.c | 60 static int scsi_get_idlun(struct request_queue *q, int __user *p) in scsi_get_idlun() argument 65 static int scsi_get_bus(struct request_queue *q, int __user *p) in scsi_get_bus() argument 70 static int sg_get_timeout(struct request_queue *q) in sg_get_timeout() argument 72 return jiffies_to_clock_t(q->sg_timeout); in sg_get_timeout() 75 static int sg_set_timeout(struct request_queue *q, int __user *p) in sg_set_timeout() argument 80 q->sg_timeout = clock_t_to_jiffies(timeout); in sg_set_timeout() 85 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() argument 87 unsigned int max_sectors = queue_max_sectors(q); in max_sectors_bytes() 94 static int sg_get_reserved_size(struct request_queue *q, int __user *p) in sg_get_reserved_size() argument 96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); in sg_get_reserved_size() [all …]
|
D | blk-exec.c | 51 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq_nowait() argument 67 if (q->mq_ops) { in blk_execute_rq_nowait() 72 spin_lock_irq(q->queue_lock); in blk_execute_rq_nowait() 74 if (unlikely(blk_queue_dying(q))) { in blk_execute_rq_nowait() 78 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait() 82 __elv_add_request(q, rq, where); in blk_execute_rq_nowait() 83 __blk_run_queue(q); in blk_execute_rq_nowait() 84 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait() 99 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq() argument 114 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); in blk_execute_rq()
|
D | deadline-iosched.c | 99 deadline_add_request(struct request_queue *q, struct request *rq) in deadline_add_request() argument 101 struct deadline_data *dd = q->elevator->elevator_data; in deadline_add_request() 116 static void deadline_remove_request(struct request_queue *q, struct request *rq) in deadline_remove_request() argument 118 struct deadline_data *dd = q->elevator->elevator_data; in deadline_remove_request() 125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument 127 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merge() 154 static void deadline_merged_request(struct request_queue *q, in deadline_merged_request() argument 157 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merged_request() 169 deadline_merged_requests(struct request_queue *q, struct request *req, in deadline_merged_requests() argument 186 deadline_remove_request(q, next); in deadline_merged_requests() [all …]
|
D | bounce.c | 182 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, in __blk_queue_bounce() argument 192 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce() 202 if (page_to_pfn(page) <= queue_bounce_pfn(q)) in __blk_queue_bounce() 205 to->bv_page = mempool_alloc(pool, q->bounce_gfp); in __blk_queue_bounce() 220 trace_block_bio_bounce(q, *bio_orig); in __blk_queue_bounce() 238 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) in blk_queue_bounce() argument 253 if (!(q->bounce_gfp & GFP_DMA)) { in blk_queue_bounce() 254 if (queue_bounce_pfn(q) >= blk_max_pfn) in blk_queue_bounce() 265 __blk_queue_bounce(q, bio_orig, pool); in blk_queue_bounce()
|
D | blk-lib.c | 44 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_discard() local 53 if (!q) in blkdev_issue_discard() 56 if (!blk_queue_discard(q)) in blkdev_issue_discard() 60 granularity = max(q->limits.discard_granularity >> 9, 1U); in blkdev_issue_discard() 64 if (!blk_queue_secdiscard(q)) in blkdev_issue_discard() 149 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_write_same() local 155 if (!q) in blkdev_issue_write_same() 288 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_zeroout() local 290 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && in blkdev_issue_zeroout()
|
D | bsg.c | 139 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument 166 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq() 179 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) in bsg_validate_sgv4_hdr() argument 211 struct request_queue *q = bd->queue; in bsg_map_hdr() local 216 struct bsg_class_device *bcd = &q->bsg_dev; in bsg_map_hdr() 229 ret = bsg_validate_sgv4_hdr(q, hdr, &rw); in bsg_map_hdr() 236 rq = blk_get_request(q, rw, GFP_KERNEL); in bsg_map_hdr() 241 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); in bsg_map_hdr() 246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { in bsg_map_hdr() 251 next_rq = blk_get_request(q, READ, GFP_KERNEL); in bsg_map_hdr() [all …]
|
D | blk-throttle.c | 355 struct throtl_data *td = blkg->q->td; in throtl_pd_init() 1035 struct request_queue *q = td->queue; in throtl_pending_timer_fn() local 1040 spin_lock_irq(q->queue_lock); in throtl_pending_timer_fn() 1060 spin_unlock_irq(q->queue_lock); in throtl_pending_timer_fn() 1062 spin_lock_irq(q->queue_lock); in throtl_pending_timer_fn() 1084 spin_unlock_irq(q->queue_lock); in throtl_pending_timer_fn() 1100 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn() local 1108 spin_lock_irq(q->queue_lock); in blk_throtl_dispatch_work_fn() 1112 spin_unlock_irq(q->queue_lock); in blk_throtl_dispatch_work_fn() 1390 static void throtl_shutdown_wq(struct request_queue *q) in throtl_shutdown_wq() argument [all …]
|
D | blk-softirq.c | 35 rq->q->softirq_done_fn(rq); in blk_done_softirq() 108 struct request_queue *q = req->q; in __blk_complete_request() local 112 BUG_ON(!q->softirq_done_fn); in __blk_complete_request() 122 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) in __blk_complete_request() 168 if (unlikely(blk_should_fake_timeout(req->q))) in blk_complete_request()
|