/block/ |
D | blk-sysfs.c | 50 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 52 return queue_var_show(q->nr_requests, page); in queue_requests_show() 56 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 61 if (!queue_is_mq(q)) in queue_requests_store() 71 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store() 78 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 82 if (!q->disk) in queue_ra_show() 84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show() 89 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument 94 if (!q->disk) in queue_ra_store() [all …]
|
D | blk-pm.c | 29 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument 31 q->dev = dev; in blk_pm_runtime_init() 32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init() 33 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init() 34 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init() 59 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument 63 if (!q->dev) in blk_pre_runtime_suspend() 66 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); in blk_pre_runtime_suspend() 68 spin_lock_irq(&q->queue_lock); in blk_pre_runtime_suspend() 69 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend() [all …]
|
D | elevator.c | 62 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 86 static inline bool elv_support_features(struct request_queue *q, in elv_support_features() argument 89 return (q->required_elevator_features & e->elevator_features) == in elv_support_features() 90 q->required_elevator_features; in elv_support_features() 116 static struct elevator_type *elevator_find_get(struct request_queue *q, in elevator_find_get() argument 123 if (e && (!elv_support_features(q, e) || !elevator_tryget(e))) in elevator_find_get() 131 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument 136 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc() [all …]
|
D | blk-settings.c | 30 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument 32 q->rq_timeout = timeout; in blk_queue_rq_timeout() 107 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) in blk_queue_bounce_limit() argument 109 q->limits.bounce = bounce; in blk_queue_bounce_limit() 184 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument 186 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors() 215 if (!q->disk) in blk_queue_max_hw_sectors() 217 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors() 233 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument 235 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors() [all …]
|
D | blk-core.c | 87 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument 89 set_bit(flag, &q->queue_flags); in blk_queue_flag_set() 98 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument 100 clear_bit(flag, &q->queue_flags); in blk_queue_flag_clear() 112 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_set() argument 114 return test_and_set_bit(flag, &q->queue_flags); in blk_queue_flag_test_and_set() 239 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument 241 del_timer_sync(&q->timeout); in blk_sync_queue() 242 cancel_work_sync(&q->timeout_work); in blk_sync_queue() 250 void blk_set_pm_only(struct request_queue *q) in blk_set_pm_only() argument [all …]
|
D | blk-mq-sched.c | 89 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched() local 90 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched() 114 budget_token = blk_mq_get_dispatch_budget(q); in __blk_mq_do_dispatch_sched() 120 blk_mq_put_dispatch_budget(q, budget_token); in __blk_mq_do_dispatch_sched() 156 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in __blk_mq_do_dispatch_sched() 217 struct request_queue *q = hctx->queue; in blk_mq_do_dispatch_ctx() local 234 budget_token = blk_mq_get_dispatch_budget(q); in blk_mq_do_dispatch_ctx() 240 blk_mq_put_dispatch_budget(q, budget_token); in blk_mq_do_dispatch_ctx() 248 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in blk_mq_do_dispatch_ctx() 321 struct request_queue *q = hctx->queue; in blk_mq_sched_dispatch_requests() local [all …]
|
D | blk-mq.c | 52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 103 unsigned int blk_mq_in_flight(struct request_queue *q, in blk_mq_in_flight() argument 108 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight() 113 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, in blk_mq_in_flight_rw() argument 118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight_rw() 123 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start() argument 125 mutex_lock(&q->mq_freeze_lock); in blk_freeze_queue_start() 126 if (++q->mq_freeze_depth == 1) { in blk_freeze_queue_start() 127 percpu_ref_kill(&q->q_usage_counter); in blk_freeze_queue_start() 128 mutex_unlock(&q->mq_freeze_lock); in blk_freeze_queue_start() [all …]
|
D | blk-rq-qos.h | 61 static inline struct rq_qos *rq_qos_id(struct request_queue *q, in rq_qos_id() argument 65 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { in rq_qos_id() 72 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) in wbt_rq_qos() argument 74 return rq_qos_id(q, RQ_QOS_WBT); in wbt_rq_qos() 77 static inline struct rq_qos *iolat_rq_qos(struct request_queue *q) in iolat_rq_qos() argument 79 return rq_qos_id(q, RQ_QOS_LATENCY); in iolat_rq_qos() 113 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) in rq_qos_cleanup() argument 115 if (q->rq_qos) in rq_qos_cleanup() 116 __rq_qos_cleanup(q->rq_qos, bio); in rq_qos_cleanup() 119 static inline void rq_qos_done(struct request_queue *q, struct request *rq) in rq_qos_done() argument [all …]
|
D | blk-mq-sysfs.c | 54 struct request_queue *q; in blk_mq_hw_sysfs_show() local 59 q = hctx->queue; in blk_mq_hw_sysfs_show() 64 mutex_lock(&q->sysfs_lock); in blk_mq_hw_sysfs_show() 66 mutex_unlock(&q->sysfs_lock); in blk_mq_hw_sysfs_show() 160 struct request_queue *q = hctx->queue; in blk_mq_register_hctx() local 167 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); in blk_mq_register_hctx() 192 void blk_mq_sysfs_deinit(struct request_queue *q) in blk_mq_sysfs_deinit() argument 198 ctx = per_cpu_ptr(q->queue_ctx, cpu); in blk_mq_sysfs_deinit() 201 kobject_put(q->mq_kobj); in blk_mq_sysfs_deinit() 204 void blk_mq_sysfs_init(struct request_queue *q) in blk_mq_sysfs_init() argument [all …]
|
D | blk-mq.h | 43 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 45 void blk_mq_exit_queue(struct request_queue *q); 46 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 47 void blk_mq_wake_waiters(struct request_queue *q); 78 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, in blk_mq_map_queue_type() argument 82 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type() 105 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue() argument 115 extern void blk_mq_sysfs_init(struct request_queue *q); 116 extern void blk_mq_sysfs_deinit(struct request_queue *q); 119 int blk_mq_sysfs_register_hctxs(struct request_queue *q); [all …]
|
D | blk-mq-debugfs.c | 22 __acquires(&q->requeue_lock) in queue_requeue_list_start() 24 struct request_queue *q = m->private; in queue_requeue_list_start() local 26 spin_lock_irq(&q->requeue_lock); in queue_requeue_list_start() 27 return seq_list_start(&q->requeue_list, *pos); in queue_requeue_list_start() 32 struct request_queue *q = m->private; in queue_requeue_list_next() local 34 return seq_list_next(v, &q->requeue_list, pos); in queue_requeue_list_next() 38 __releases(&q->requeue_lock) in queue_requeue_list_stop() 40 struct request_queue *q = m->private; in queue_requeue_list_stop() local 42 spin_unlock_irq(&q->requeue_lock); in queue_requeue_list_stop() 74 struct request_queue *q = data; in queue_pm_only_show() local [all …]
|
D | blk-ioc.c | 48 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq() 76 struct request_queue *q = icq->q; in ioc_destroy_icq() local 77 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq() 80 lockdep_assert_held(&q->queue_lock); in ioc_destroy_icq() 85 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq() 121 struct request_queue *q = icq->q; in ioc_release_fn() local 123 if (spin_trylock(&q->queue_lock)) { in ioc_release_fn() 125 spin_unlock(&q->queue_lock); in ioc_release_fn() 132 spin_lock(&q->queue_lock); in ioc_release_fn() 137 spin_unlock(&q->queue_lock); in ioc_release_fn() [all …]
|
D | blk.h | 33 void blk_free_flush_queue(struct blk_flush_queue *q); 42 void blk_disable_sub_page_limits(struct queue_limits *q); 44 void blk_freeze_queue(struct request_queue *q); 45 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); 46 void blk_queue_start_drain(struct request_queue *q); 47 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 50 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) in blk_try_enter_queue() argument 53 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) in blk_try_enter_queue() 60 if (blk_queue_pm_only(q) && in blk_try_enter_queue() 61 (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) in blk_try_enter_queue() [all …]
|
D | blk-merge.c | 52 static inline bool bio_will_gap(struct request_queue *q, in bio_will_gap() argument 57 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap() 69 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap() 83 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap() 85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); in bio_will_gap() 90 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge() 95 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge() 426 if (queue_max_discard_segments(rq->q) > 1) { in blk_recalc_rq_segments() 441 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, in blk_recalc_rq_segments() 463 static unsigned blk_bvec_map_sg(struct request_queue *q, in blk_bvec_map_sg() argument [all …]
|
D | blk-stat.c | 52 struct request_queue *q = rq->q; in blk_stat_add() local 65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add() 136 void blk_stat_add_callback(struct request_queue *q, in blk_stat_add_callback() argument 151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback() 152 list_add_tail_rcu(&cb->list, &q->stats->callbacks); in blk_stat_add_callback() 153 blk_queue_flag_set(QUEUE_FLAG_STATS, q); in blk_stat_add_callback() 154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback() 157 void blk_stat_remove_callback(struct request_queue *q, in blk_stat_remove_callback() argument 162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback() 164 if (list_empty(&q->stats->callbacks) && !q->stats->accounting) in blk_stat_remove_callback() [all …]
|
D | blk-flush.c | 94 static void blk_kick_flush(struct request_queue *q, 98 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument 100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue() 141 struct block_device *part = rq->q->disk->part0; in blk_account_io_flush() 167 struct request_queue *q = rq->q; in blk_flush_complete_seq() local 191 spin_lock(&q->requeue_lock); in blk_flush_complete_seq() 192 list_move(&rq->queuelist, &q->requeue_list); in blk_flush_complete_seq() 193 spin_unlock(&q->requeue_lock); in blk_flush_complete_seq() 194 blk_mq_kick_requeue_list(q); in blk_flush_complete_seq() 213 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq() [all …]
|
D | blk-mq-debugfs.h | 25 void blk_mq_debugfs_register(struct request_queue *q); 26 void blk_mq_debugfs_register_hctx(struct request_queue *q, 29 void blk_mq_debugfs_register_hctxs(struct request_queue *q); 30 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); 32 void blk_mq_debugfs_register_sched(struct request_queue *q); 33 void blk_mq_debugfs_unregister_sched(struct request_queue *q); 34 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 45 static inline void blk_mq_debugfs_register(struct request_queue *q) in blk_mq_debugfs_register() argument 49 static inline void blk_mq_debugfs_register_hctx(struct request_queue *q, in blk_mq_debugfs_register_hctx() argument 58 static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q) in blk_mq_debugfs_register_hctxs() argument [all …]
|
D | blk-rq-qos.c | 289 void rq_qos_exit(struct request_queue *q) in rq_qos_exit() argument 291 mutex_lock(&q->rq_qos_mutex); in rq_qos_exit() 292 while (q->rq_qos) { in rq_qos_exit() 293 struct rq_qos *rqos = q->rq_qos; in rq_qos_exit() 294 q->rq_qos = rqos->next; in rq_qos_exit() 297 mutex_unlock(&q->rq_qos_mutex); in rq_qos_exit() 303 struct request_queue *q = disk->queue; in rq_qos_add() local 305 lockdep_assert_held(&q->rq_qos_mutex); in rq_qos_add() 315 blk_mq_freeze_queue(q); in rq_qos_add() 317 if (rq_qos_id(q, rqos->id)) in rq_qos_add() [all …]
|
D | mq-deadline.c | 179 pos -= bdev_offset_from_zone_start(rq->q->disk->part0, pos); in deadline_from_pos() 210 static void deadline_remove_request(struct request_queue *q, in deadline_remove_request() argument 222 elv_rqhash_del(q, rq); in deadline_remove_request() 223 if (q->last_merge == rq) in deadline_remove_request() 224 q->last_merge = NULL; in deadline_remove_request() 227 static void dd_request_merged(struct request_queue *q, struct request *req, in dd_request_merged() argument 230 struct deadline_data *dd = q->elevator->elevator_data; in dd_request_merged() 247 static void dd_merged_requests(struct request_queue *q, struct request *req, in dd_merged_requests() argument 250 struct deadline_data *dd = q->elevator->elevator_data; in dd_merged_requests() 273 deadline_remove_request(q, &dd->per_prio[prio], next); in dd_merged_requests() [all …]
|
D | blk-mq-sched.h | 10 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 12 bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 14 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, 21 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 22 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 23 void blk_mq_sched_free_rqs(struct request_queue *q); 37 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument 41 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge() 44 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 52 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() [all …]
|
D | bsg-lib.c | 28 static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, in bsg_transport_sg_io_fn() argument 43 rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ? in bsg_transport_sg_io_fn() 64 job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0); in bsg_transport_sg_io_fn() 70 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_sg_io_fn() 84 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), in bsg_transport_sg_io_fn() 87 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), in bsg_transport_sg_io_fn() 196 if (likely(!blk_should_fake_timeout(rq->q))) in bsg_job_done() 222 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer() 275 struct request_queue *q = hctx->queue; in bsg_queue_rq() local 276 struct device *dev = q->queuedata; in bsg_queue_rq() [all …]
|
D | blk-timeout.c | 23 bool __blk_should_fake_timeout(struct request_queue *q) in __blk_should_fake_timeout() argument 55 struct request_queue *q = disk->queue; in part_timeout_store() local 60 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store() 62 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store() 87 kblockd_schedule_work(&req->q->timeout_work); in blk_abort_request() 130 struct request_queue *q = req->q; in blk_add_timer() local 138 req->timeout = q->rq_timeout; in blk_add_timer() 152 if (!timer_pending(&q->timeout) || in blk_add_timer() 153 time_before(expiry, q->timeout.expires)) { in blk_add_timer() 154 unsigned long diff = q->timeout.expires - expiry; in blk_add_timer() [all …]
|
D | blk-cgroup.c | 113 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument 116 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled() 123 struct request_queue *q = blkg->q; in blkg_free_workfn() local 133 mutex_lock(&q->blkcg_mutex); in blkg_free_workfn() 139 spin_lock_irq(&q->queue_lock); in blkg_free_workfn() 141 spin_unlock_irq(&q->queue_lock); in blkg_free_workfn() 142 mutex_unlock(&q->blkcg_mutex); in blkg_free_workfn() 144 blk_put_queue(q); in blkg_free_workfn() 323 blkg->q = disk->queue; in blkg_alloc() 475 struct request_queue *q = disk->queue; in blkg_lookup_create() local [all …]
|
D | blk-mq-tag.c | 49 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy() local 51 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || in __blk_mq_tag_busy() 52 test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in __blk_mq_tag_busy() 87 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle() local 90 &q->queue_flags)) in __blk_mq_tag_idle() 188 data->ctx = blk_mq_get_ctx(data->q); in blk_mq_get_tag() 189 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, in blk_mq_get_tag() 243 struct request_queue *q; member 267 struct request_queue *q = iter_data->q; in bt_iter() local 268 struct blk_mq_tag_set *set = q->tag_set; in bt_iter() [all …]
|
D | blk-pm.h | 9 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument 11 if (!q->dev || !blk_queue_pm_only(q)) in blk_pm_resume_queue() 13 if (pm && q->rpm_status != RPM_SUSPENDED) in blk_pm_resume_queue() 15 pm_request_resume(q->dev); in blk_pm_resume_queue() 21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy() 22 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy() 25 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument
|