Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 58) sorted by relevance

123

/block/
Dblk-sysfs.c61 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
63 return queue_var_show(q->nr_requests, page); in queue_requests_show()
67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
72 if (!queue_is_mq(q)) in queue_requests_store()
82 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
89 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
93 if (!q->disk) in queue_ra_show()
95 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show()
100 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
105 if (!q->disk) in queue_ra_store()
[all …]
Dblk-pm.c31 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
33 q->dev = dev; in blk_pm_runtime_init()
34 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
35 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
36 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
61 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
65 if (!q->dev) in blk_pre_runtime_suspend()
68 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); in blk_pre_runtime_suspend()
70 spin_lock_irq(&q->queue_lock); in blk_pre_runtime_suspend()
71 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
[all …]
Delevator.c62 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local
63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge()
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
140 static struct elevator_type *elevator_get(struct request_queue *q, in elevator_get() argument
147 e = elevator_find(name, q->required_elevator_features); in elevator_get()
152 e = elevator_find(name, q->required_elevator_features); in elevator_get()
164 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
169 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
191 void __elevator_exit(struct request_queue *q, struct elevator_queue *e) in __elevator_exit() argument
194 blk_mq_exit_sched(q, e); in __elevator_exit()
[all …]
Dblk-rq-qos.h30 struct request_queue *q; member
62 static inline struct rq_qos *rq_qos_id(struct request_queue *q, in rq_qos_id() argument
66 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { in rq_qos_id()
73 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) in wbt_rq_qos() argument
75 return rq_qos_id(q, RQ_QOS_WBT); in wbt_rq_qos()
78 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) in blkcg_rq_qos() argument
80 return rq_qos_id(q, RQ_QOS_LATENCY); in blkcg_rq_qos()
89 static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos) in rq_qos_add() argument
98 blk_mq_freeze_queue(q); in rq_qos_add()
100 spin_lock_irq(&q->queue_lock); in rq_qos_add()
[all …]
Dblk-settings.c29 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument
31 q->rq_timeout = timeout; in blk_queue_rq_timeout()
107 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) in blk_queue_bounce_limit() argument
109 q->limits.bounce = bounce; in blk_queue_bounce_limit()
184 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument
186 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
210 if (!q->disk) in blk_queue_max_hw_sectors()
212 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
228 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument
230 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
[all …]
Dblk.h22 struct request_queue q; member
27 to_internal_q(struct request_queue *q) in to_internal_q() argument
29 return container_of(q, struct internal_request_queue, q); in to_internal_q()
49 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
51 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
54 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
56 kobject_get(&q->kobj); in __blk_get_queue()
63 void blk_free_flush_queue(struct blk_flush_queue *q);
72 void blk_disable_sub_page_limits(struct queue_limits *q);
74 void blk_freeze_queue(struct request_queue *q);
[all …]
Dblk-mq.c47 static void blk_mq_poll_stats_start(struct request_queue *q);
116 unsigned int blk_mq_in_flight(struct request_queue *q, in blk_mq_in_flight() argument
121 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight()
126 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, in blk_mq_in_flight_rw() argument
131 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight_rw()
136 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start() argument
138 mutex_lock(&q->mq_freeze_lock); in blk_freeze_queue_start()
139 if (++q->mq_freeze_depth == 1) { in blk_freeze_queue_start()
140 percpu_ref_kill(&q->q_usage_counter); in blk_freeze_queue_start()
141 mutex_unlock(&q->mq_freeze_lock); in blk_freeze_queue_start()
[all …]
Dblk-merge.c17 static inline bool bio_will_gap(struct request_queue *q, in bio_will_gap() argument
22 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap()
34 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
48 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap()
50 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); in bio_will_gap()
55 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
60 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
63 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument
76 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
78 max_discard_sectors = min(q->limits.max_discard_sectors, in blk_bio_discard_split()
[all …]
Dblk-mq-sched.c23 struct request_queue *q = rq->q; in blk_mq_sched_assign_ioc() local
34 spin_lock_irq(&q->queue_lock); in blk_mq_sched_assign_ioc()
35 icq = ioc_lookup_icq(ioc, q); in blk_mq_sched_assign_ioc()
36 spin_unlock_irq(&q->queue_lock); in blk_mq_sched_assign_ioc()
39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC); in blk_mq_sched_assign_ioc()
119 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched() local
120 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched()
144 budget_token = blk_mq_get_dispatch_budget(q); in __blk_mq_do_dispatch_sched()
150 blk_mq_put_dispatch_budget(q, budget_token); in __blk_mq_do_dispatch_sched()
186 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in __blk_mq_do_dispatch_sched()
[all …]
Dblk-core.c86 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument
88 set_bit(flag, &q->queue_flags); in blk_queue_flag_set()
97 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
99 clear_bit(flag, &q->queue_flags); in blk_queue_flag_clear()
111 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_set() argument
113 return test_and_set_bit(flag, &q->queue_flags); in blk_queue_flag_test_and_set()
117 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
122 rq->q = q; in blk_rq_init()
302 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
304 del_timer_sync(&q->timeout); in blk_sync_queue()
[all …]
Dblk-mq-sysfs.c59 struct request_queue *q; in blk_mq_hw_sysfs_show() local
64 q = hctx->queue; in blk_mq_hw_sysfs_show()
69 mutex_lock(&q->sysfs_lock); in blk_mq_hw_sysfs_show()
71 mutex_unlock(&q->sysfs_lock); in blk_mq_hw_sysfs_show()
81 struct request_queue *q; in blk_mq_hw_sysfs_store() local
86 q = hctx->queue; in blk_mq_hw_sysfs_store()
91 mutex_lock(&q->sysfs_lock); in blk_mq_hw_sysfs_store()
93 mutex_unlock(&q->sysfs_lock); in blk_mq_hw_sysfs_store()
188 struct request_queue *q = hctx->queue; in blk_mq_register_hctx() local
195 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); in blk_mq_register_hctx()
[all …]
Dblk-mq.h42 void blk_mq_exit_queue(struct request_queue *q);
43 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
44 void blk_mq_wake_waiters(struct request_queue *q);
94 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, in blk_mq_map_queue_type() argument
98 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; in blk_mq_map_queue_type()
107 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue() argument
127 extern void blk_mq_sysfs_init(struct request_queue *q);
128 extern void blk_mq_sysfs_deinit(struct request_queue *q);
129 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
130 extern int blk_mq_sysfs_register(struct request_queue *q);
[all …]
Dblk-zoned.c60 return rq->q->seq_zones_wlock && blk_rq_is_seq_zoned_write(rq); in blk_req_needs_zone_write_lock()
68 if (test_and_set_bit(zno, rq->q->seq_zones_wlock)) in blk_req_zone_write_trylock()
81 rq->q->seq_zones_wlock))) in __blk_req_zone_write_lock()
92 if (rq->q->seq_zones_wlock) in __blk_req_zone_write_unlock()
94 rq->q->seq_zones_wlock)); in __blk_req_zone_write_unlock()
186 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_reset_all_emulated() local
188 sector_t zone_sectors = blk_queue_zone_sectors(q); in blkdev_zone_reset_all_emulated()
194 need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones); in blkdev_zone_reset_all_emulated()
199 q->nr_zones, blk_zone_need_reset_cb, in blkdev_zone_reset_all_emulated()
206 if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) { in blkdev_zone_reset_all_emulated()
[all …]
Dblk-mq-debugfs.h23 void blk_mq_debugfs_register(struct request_queue *q);
24 void blk_mq_debugfs_unregister(struct request_queue *q);
25 void blk_mq_debugfs_register_hctx(struct request_queue *q,
28 void blk_mq_debugfs_register_hctxs(struct request_queue *q);
29 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
31 void blk_mq_debugfs_register_sched(struct request_queue *q);
32 void blk_mq_debugfs_unregister_sched(struct request_queue *q);
33 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
39 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
45 static inline void blk_mq_debugfs_register(struct request_queue *q) in blk_mq_debugfs_register() argument
[all …]
Dblk-cgroup.c61 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
64 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
151 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
169 blkg->q = q; in blkg_alloc()
184 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
188 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); in blkg_alloc()
205 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
215 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
216 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
[all …]
Dblk-mq-debugfs.c29 struct request_queue *q = data; in queue_poll_stat_show() local
34 print_stat(m, &q->poll_stat[2 * bucket]); in queue_poll_stat_show()
38 print_stat(m, &q->poll_stat[2 * bucket + 1]); in queue_poll_stat_show()
45 __acquires(&q->requeue_lock) in queue_requeue_list_start()
47 struct request_queue *q = m->private; in queue_requeue_list_start() local
49 spin_lock_irq(&q->requeue_lock); in queue_requeue_list_start()
50 return seq_list_start(&q->requeue_list, *pos); in queue_requeue_list_start()
55 struct request_queue *q = m->private; in queue_requeue_list_next() local
57 return seq_list_next(v, &q->requeue_list, pos); in queue_requeue_list_next()
61 __releases(&q->requeue_lock) in queue_requeue_list_stop()
[all …]
Dblk-ioc.c45 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq()
63 struct request_queue *q = icq->q; in ioc_destroy_icq() local
64 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq()
68 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq()
104 struct request_queue *q = icq->q; in ioc_release_fn() local
106 if (spin_trylock(&q->queue_lock)) { in ioc_release_fn()
108 spin_unlock(&q->queue_lock); in ioc_release_fn()
115 spin_lock(&q->queue_lock); in ioc_release_fn()
125 spin_unlock(&q->queue_lock); in ioc_release_fn()
239 void ioc_clear_queue(struct request_queue *q) in ioc_clear_queue() argument
[all …]
Dblk-mq-sched.h12 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
14 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
16 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
29 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
30 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
31 void blk_mq_sched_free_requests(struct request_queue *q);
34 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument
37 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in blk_mq_sched_bio_merge()
40 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
44 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument
[all …]
Dmq-deadline.c181 pos -= bdev_offset_from_zone_start(rq->q->disk->part0, pos); in deadline_from_pos()
212 static void deadline_remove_request(struct request_queue *q, in deadline_remove_request() argument
224 elv_rqhash_del(q, rq); in deadline_remove_request()
225 if (q->last_merge == rq) in deadline_remove_request()
226 q->last_merge = NULL; in deadline_remove_request()
229 static void dd_request_merged(struct request_queue *q, struct request *req, in dd_request_merged() argument
232 struct deadline_data *dd = q->elevator->elevator_data; in dd_request_merged()
249 static void dd_merged_requests(struct request_queue *q, struct request *req, in dd_merged_requests() argument
252 struct deadline_data *dd = q->elevator->elevator_data; in dd_merged_requests()
275 deadline_remove_request(q, &dd->per_prio[prio], next); in dd_merged_requests()
[all …]
Dblk-stat.c53 struct request_queue *q = rq->q; in blk_stat_add() local
65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add()
136 void blk_stat_add_callback(struct request_queue *q, in blk_stat_add_callback() argument
151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback()
152 list_add_tail_rcu(&cb->list, &q->stats->callbacks); in blk_stat_add_callback()
153 blk_queue_flag_set(QUEUE_FLAG_STATS, q); in blk_stat_add_callback()
154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback()
157 void blk_stat_remove_callback(struct request_queue *q, in blk_stat_remove_callback() argument
162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback()
164 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) in blk_stat_remove_callback()
[all …]
Dblk-timeout.c23 bool __blk_should_fake_timeout(struct request_queue *q) in __blk_should_fake_timeout() argument
55 struct request_queue *q = disk->queue; in part_timeout_store() local
60 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
62 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
87 kblockd_schedule_work(&req->q->timeout_work); in blk_abort_request()
130 struct request_queue *q = req->q; in blk_add_timer() local
138 req->timeout = q->rq_timeout; in blk_add_timer()
152 if (!timer_pending(&q->timeout) || in blk_add_timer()
153 time_before(expiry, q->timeout.expires)) { in blk_add_timer()
154 unsigned long diff = q->timeout.expires - expiry; in blk_add_timer()
[all …]
Dbsg-lib.c28 static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, in bsg_transport_sg_io_fn() argument
42 rq = blk_get_request(q, hdr->dout_xfer_len ? in bsg_transport_sg_io_fn()
57 job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); in bsg_transport_sg_io_fn()
63 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_sg_io_fn()
77 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), in bsg_transport_sg_io_fn()
80 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), in bsg_transport_sg_io_fn()
189 if (likely(!blk_should_fake_timeout(rq->q))) in bsg_job_done()
215 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer()
268 struct request_queue *q = hctx->queue; in bsg_queue_rq() local
269 struct device *dev = q->queuedata; in bsg_queue_rq()
[all …]
Dblk-pm.h9 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument
11 if (!q->dev || !blk_queue_pm_only(q)) in blk_pm_resume_queue()
13 if (pm && q->rpm_status != RPM_SUSPENDED) in blk_pm_resume_queue()
15 pm_request_resume(q->dev); in blk_pm_resume_queue()
21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
22 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy()
25 static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) in blk_pm_resume_queue() argument
Dblk-wbt.c100 struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb; in wb_recent_wait()
237 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi; in latency_exceeded()
290 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi; in rwb_trace_step()
360 if (!rwb->rqos.q->disk) in wb_timer_fn()
365 trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step, in wb_timer_fn()
425 u64 wbt_get_min_lat(struct request_queue *q) in wbt_get_min_lat() argument
427 struct rq_qos *rqos = wbt_rq_qos(q); in wbt_get_min_lat()
433 void wbt_set_min_lat(struct request_queue *q, u64 val) in wbt_set_min_lat() argument
435 struct rq_qos *rqos = wbt_rq_qos(q); in wbt_set_min_lat()
629 void wbt_set_write_cache(struct request_queue *q, bool write_cache_on) in wbt_set_write_cache() argument
[all …]
Dblk-flush.c95 static void blk_kick_flush(struct request_queue *q,
167 struct request_queue *q = rq->q; in blk_flush_complete_seq() local
211 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq()
216 struct request_queue *q = flush_rq->q; in flush_end_io() local
220 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io()
243 if (!q->elevator) { in flush_end_io()
285 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, in blk_kick_flush() argument
309 blk_rq_init(q, flush_rq); in blk_kick_flush()
322 if (!q->elevator) { in blk_kick_flush()
353 struct request_queue *q = rq->q; in mq_flush_data_end_io() local
[all …]

123