Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 58) sorted by relevance

123

/block/
Dblk-sysfs.c61 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
63 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
72 if (!queue_is_mq(q)) in queue_requests_store()
82 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
89 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
91 unsigned long ra_kb = q->backing_dev_info->ra_pages << in queue_ra_show()
98 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
106 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
111 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
[all …]
Dblk-pm.c31 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
33 q->dev = dev; in blk_pm_runtime_init()
34 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
35 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
36 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
61 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
65 if (!q->dev) in blk_pre_runtime_suspend()
68 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); in blk_pre_runtime_suspend()
70 spin_lock_irq(&q->queue_lock); in blk_pre_runtime_suspend()
71 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
[all …]
Delevator.c62 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local
63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge()
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
140 static struct elevator_type *elevator_get(struct request_queue *q, in elevator_get() argument
147 e = elevator_find(name, q->required_elevator_features); in elevator_get()
152 e = elevator_find(name, q->required_elevator_features); in elevator_get()
164 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
169 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
191 void __elevator_exit(struct request_queue *q, struct elevator_queue *e) in __elevator_exit() argument
194 blk_mq_exit_sched(q, e); in __elevator_exit()
[all …]
Dblk-rq-qos.h30 struct request_queue *q; member
62 static inline struct rq_qos *rq_qos_id(struct request_queue *q, in rq_qos_id() argument
66 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { in rq_qos_id()
73 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) in wbt_rq_qos() argument
75 return rq_qos_id(q, RQ_QOS_WBT); in wbt_rq_qos()
78 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) in blkcg_rq_qos() argument
80 return rq_qos_id(q, RQ_QOS_LATENCY); in blkcg_rq_qos()
89 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) in rq_qos_add() argument
98 blk_mq_freeze_queue(q); in rq_qos_add()
100 spin_lock_irq(&q->queue_lock); in rq_qos_add()
[all …]
Dblk-settings.c25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument
27 q->rq_timeout = timeout; in blk_queue_rq_timeout()
102 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) in blk_queue_bounce_limit() argument
107 q->bounce_gfp = GFP_NOIO; in blk_queue_bounce_limit()
116 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); in blk_queue_bounce_limit()
120 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
124 q->bounce_gfp = GFP_NOIO | GFP_DMA; in blk_queue_bounce_limit()
125 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
149 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument
151 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
[all …]
Dblk-merge.c19 static inline bool bio_will_gap(struct request_queue *q, in bio_will_gap() argument
24 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap()
36 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
50 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap()
52 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); in bio_will_gap()
57 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
62 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
65 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument
78 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
80 max_discard_sectors = min(q->limits.max_discard_sectors, in blk_bio_discard_split()
[all …]
Dblk-mq.c48 static void blk_mq_poll_stats_start(struct request_queue *q);
117 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) in blk_mq_in_flight() argument
121 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight()
126 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, in blk_mq_in_flight_rw() argument
131 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight_rw()
136 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start() argument
138 mutex_lock(&q->mq_freeze_lock); in blk_freeze_queue_start()
139 if (++q->mq_freeze_depth == 1) { in blk_freeze_queue_start()
140 percpu_ref_kill(&q->q_usage_counter); in blk_freeze_queue_start()
141 mutex_unlock(&q->mq_freeze_lock); in blk_freeze_queue_start()
[all …]
Dblk-core.c89 void blk_queue_flag_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_set() argument
91 set_bit(flag, &q->queue_flags); in blk_queue_flag_set()
100 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) in blk_queue_flag_clear() argument
102 clear_bit(flag, &q->queue_flags); in blk_queue_flag_clear()
114 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) in blk_queue_flag_test_and_set() argument
116 return test_and_set_bit(flag, &q->queue_flags); in blk_queue_flag_test_and_set()
120 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
125 rq->q = q; in blk_rq_init()
307 void blk_sync_queue(struct request_queue *q) in blk_sync_queue() argument
309 del_timer_sync(&q->timeout); in blk_sync_queue()
[all …]
Dblk-mq-sched.c23 struct request_queue *q = rq->q; in blk_mq_sched_assign_ioc() local
34 spin_lock_irq(&q->queue_lock); in blk_mq_sched_assign_ioc()
35 icq = ioc_lookup_icq(ioc, q); in blk_mq_sched_assign_ioc()
36 spin_unlock_irq(&q->queue_lock); in blk_mq_sched_assign_ioc()
39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC); in blk_mq_sched_assign_ioc()
118 struct request_queue *q = hctx->queue; in __blk_mq_do_dispatch_sched() local
119 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched()
142 if (!blk_mq_get_dispatch_budget(q)) in __blk_mq_do_dispatch_sched()
147 blk_mq_put_dispatch_budget(q); in __blk_mq_do_dispatch_sched()
171 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); in __blk_mq_do_dispatch_sched()
[all …]
Dblk-mq-sysfs.c65 struct request_queue *q; in blk_mq_sysfs_show() local
70 q = ctx->queue; in blk_mq_sysfs_show()
75 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_show()
77 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_show()
86 struct request_queue *q; in blk_mq_sysfs_store() local
91 q = ctx->queue; in blk_mq_sysfs_store()
96 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_store()
98 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_store()
107 struct request_queue *q; in blk_mq_hw_sysfs_show() local
112 q = hctx->queue; in blk_mq_hw_sysfs_show()
[all …]
Dblk.h37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
42 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
44 kobject_get(&q->kobj); in __blk_get_queue()
51 void blk_free_flush_queue(struct blk_flush_queue *q);
53 void blk_freeze_queue(struct request_queue *q);
55 static inline bool biovec_phys_mergeable(struct request_queue *q, in biovec_phys_mergeable() argument
58 unsigned long mask = queue_segment_boundary(q); in biovec_phys_mergeable()
71 static inline bool __bvec_gap_to_prev(struct request_queue *q, in __bvec_gap_to_prev() argument
74 return (offset & queue_virt_boundary(q)) || in __bvec_gap_to_prev()
[all …]
Dblk-pm.h9 static inline void blk_pm_request_resume(struct request_queue *q) in blk_pm_request_resume() argument
11 if (q->dev && (q->rpm_status == RPM_SUSPENDED || in blk_pm_request_resume()
12 q->rpm_status == RPM_SUSPENDING)) in blk_pm_request_resume()
13 pm_request_resume(q->dev); in blk_pm_request_resume()
18 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
19 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy()
24 lockdep_assert_held(&rq->q->queue_lock); in blk_pm_requeue_request()
26 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
27 rq->q->nr_pending--; in blk_pm_requeue_request()
30 static inline void blk_pm_add_request(struct request_queue *q, in blk_pm_add_request() argument
[all …]
Dblk-mq.h42 void blk_mq_exit_queue(struct request_queue *q);
43 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
44 void blk_mq_wake_waiters(struct request_queue *q);
94 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, in blk_mq_map_queue_type() argument
98 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; in blk_mq_map_queue_type()
107 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, in blk_mq_map_queue() argument
127 extern void blk_mq_sysfs_init(struct request_queue *q);
128 extern void blk_mq_sysfs_deinit(struct request_queue *q);
129 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
130 extern int blk_mq_sysfs_register(struct request_queue *q);
[all …]
Dblk-mq-debugfs.h21 void blk_mq_debugfs_register(struct request_queue *q);
22 void blk_mq_debugfs_unregister(struct request_queue *q);
23 void blk_mq_debugfs_register_hctx(struct request_queue *q,
26 void blk_mq_debugfs_register_hctxs(struct request_queue *q);
27 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
29 void blk_mq_debugfs_register_sched(struct request_queue *q);
30 void blk_mq_debugfs_unregister_sched(struct request_queue *q);
31 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
37 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
39 static inline void blk_mq_debugfs_register(struct request_queue *q) in blk_mq_debugfs_register() argument
[all …]
Dblk-mq-debugfs.c29 struct request_queue *q = data; in queue_poll_stat_show() local
34 print_stat(m, &q->poll_stat[2 * bucket]); in queue_poll_stat_show()
38 print_stat(m, &q->poll_stat[2 * bucket + 1]); in queue_poll_stat_show()
45 __acquires(&q->requeue_lock) in queue_requeue_list_start()
47 struct request_queue *q = m->private; in queue_requeue_list_start() local
49 spin_lock_irq(&q->requeue_lock); in queue_requeue_list_start()
50 return seq_list_start(&q->requeue_list, *pos); in queue_requeue_list_start()
55 struct request_queue *q = m->private; in queue_requeue_list_next() local
57 return seq_list_next(v, &q->requeue_list, pos); in queue_requeue_list_next()
61 __releases(&q->requeue_lock) in queue_requeue_list_stop()
[all …]
Dblk-ioc.c45 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq()
63 struct request_queue *q = icq->q; in ioc_destroy_icq() local
64 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq()
68 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq()
104 struct request_queue *q = icq->q; in ioc_release_fn() local
106 if (spin_trylock(&q->queue_lock)) { in ioc_release_fn()
108 spin_unlock(&q->queue_lock); in ioc_release_fn()
115 spin_lock(&q->queue_lock); in ioc_release_fn()
125 spin_unlock(&q->queue_lock); in ioc_release_fn()
239 void ioc_clear_queue(struct request_queue *q) in ioc_clear_queue() argument
[all …]
Dblk-cgroup.c61 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
64 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
151 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
169 blkg->q = q; in blkg_alloc()
184 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
188 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); in blkg_alloc()
205 struct request_queue *q, bool update_hint) in blkg_lookup_slowpath() argument
215 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); in blkg_lookup_slowpath()
216 if (blkg && blkg->q == q) { in blkg_lookup_slowpath()
[all …]
Dblk-zoned.c55 static inline sector_t blk_zone_start(struct request_queue *q, in blk_zone_start() argument
58 sector_t zone_mask = blk_queue_zone_sectors(q) - 1; in blk_zone_start()
68 if (!rq->q->seq_zones_wlock) in blk_req_needs_zone_write_lock()
89 if (test_and_set_bit(zno, rq->q->seq_zones_wlock)) in blk_req_zone_write_trylock()
102 rq->q->seq_zones_wlock))) in __blk_req_zone_write_lock()
113 if (rq->q->seq_zones_wlock) in __blk_req_zone_write_unlock()
115 rq->q->seq_zones_wlock)); in __blk_req_zone_write_unlock()
206 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt() local
207 sector_t zone_sectors = blk_queue_zone_sectors(q); in blkdev_zone_mgmt()
213 if (!blk_queue_is_zoned(q)) in blkdev_zone_mgmt()
[all …]
Dblk-mq-sched.h11 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
13 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
15 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
27 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
28 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
29 void blk_mq_sched_free_requests(struct request_queue *q);
32 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument
35 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in blk_mq_sched_bio_merge()
38 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
42 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument
[all …]
Dmq-deadline-main.c192 static void deadline_remove_request(struct request_queue *q, in deadline_remove_request() argument
204 elv_rqhash_del(q, rq); in deadline_remove_request()
205 if (q->last_merge == rq) in deadline_remove_request()
206 q->last_merge = NULL; in deadline_remove_request()
209 static void dd_request_merged(struct request_queue *q, struct request *req, in dd_request_merged() argument
212 struct deadline_data *dd = q->elevator->elevator_data; in dd_request_merged()
229 static void dd_merged_requests(struct request_queue *q, struct request *req, in dd_merged_requests() argument
232 struct deadline_data *dd = q->elevator->elevator_data; in dd_merged_requests()
255 deadline_remove_request(q, &dd->per_prio[prio], next); in dd_merged_requests()
272 deadline_remove_request(rq->q, per_prio, rq); in deadline_move_request()
[all …]
Dscsi_ioctl.c46 static int scsi_get_idlun(struct request_queue *q, int __user *p) in scsi_get_idlun() argument
51 static int scsi_get_bus(struct request_queue *q, int __user *p) in scsi_get_bus() argument
56 static int sg_get_timeout(struct request_queue *q) in sg_get_timeout() argument
58 return jiffies_to_clock_t(q->sg_timeout); in sg_get_timeout()
61 static int sg_set_timeout(struct request_queue *q, int __user *p) in sg_set_timeout() argument
66 q->sg_timeout = clock_t_to_jiffies(timeout); in sg_set_timeout()
71 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() argument
73 unsigned int max_sectors = queue_max_sectors(q); in max_sectors_bytes()
80 static int sg_get_reserved_size(struct request_queue *q, int __user *p) in sg_get_reserved_size() argument
82 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); in sg_get_reserved_size()
[all …]
Dblk-stat.c53 struct request_queue *q = rq->q; in blk_stat_add() local
65 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add()
136 void blk_stat_add_callback(struct request_queue *q, in blk_stat_add_callback() argument
151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback()
152 list_add_tail_rcu(&cb->list, &q->stats->callbacks); in blk_stat_add_callback()
153 blk_queue_flag_set(QUEUE_FLAG_STATS, q); in blk_stat_add_callback()
154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback()
157 void blk_stat_remove_callback(struct request_queue *q, in blk_stat_remove_callback() argument
162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback()
164 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) in blk_stat_remove_callback()
[all …]
Dblk-timeout.c23 bool __blk_should_fake_timeout(struct request_queue *q) in __blk_should_fake_timeout() argument
55 struct request_queue *q = disk->queue; in part_timeout_store() local
60 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
62 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
87 kblockd_schedule_work(&req->q->timeout_work); in blk_abort_request()
130 struct request_queue *q = req->q; in blk_add_timer() local
138 req->timeout = q->rq_timeout; in blk_add_timer()
152 if (!timer_pending(&q->timeout) || in blk_add_timer()
153 time_before(expiry, q->timeout.expires)) { in blk_add_timer()
154 unsigned long diff = q->timeout.expires - expiry; in blk_add_timer()
[all …]
Dbsg-lib.c48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); in bsg_transport_fill_hdr()
54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_fill_hdr()
188 if (likely(!blk_should_fake_timeout(rq->q))) in bsg_job_done()
214 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer()
267 struct request_queue *q = hctx->queue; in bsg_queue_rq() local
268 struct device *dev = q->queuedata; in bsg_queue_rq()
271 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_queue_rq()
324 void bsg_remove_queue(struct request_queue *q) in bsg_remove_queue() argument
326 if (q) { in bsg_remove_queue()
328 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_remove_queue()
[all …]
Dbsg.c136 static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) in bsg_sg_io() argument
146 if (!q->bsg_dev.class_dev) in bsg_sg_io()
151 ret = q->bsg_dev.ops->check_proto(&hdr); in bsg_sg_io()
155 rq = blk_get_request(q, hdr.dout_xfer_len ? in bsg_sg_io()
160 ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); in bsg_sg_io()
168 rq->timeout = q->sg_timeout; in bsg_sg_io()
175 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), in bsg_sg_io()
178 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), in bsg_sg_io()
187 blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); in bsg_sg_io()
188 ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); in bsg_sg_io()
[all …]

123