• Home
  • Raw
  • Download

Lines Matching refs:q

44 static ssize_t queue_requests_show(struct request_queue *q, char *page)  in queue_requests_show()  argument
46 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
50 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
55 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
65 if (q->request_fn) in queue_requests_store()
66 err = blk_update_nr_requests(q, nr); in queue_requests_store()
68 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
76 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
78 unsigned long ra_kb = q->backing_dev_info.ra_pages << in queue_ra_show()
85 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
93 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); in queue_ra_store()
98 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) in queue_max_sectors_show() argument
100 int max_sectors_kb = queue_max_sectors(q) >> 1; in queue_max_sectors_show()
105 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) in queue_max_segments_show() argument
107 return queue_var_show(queue_max_segments(q), (page)); in queue_max_segments_show()
110 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) in queue_max_integrity_segments_show() argument
112 return queue_var_show(q->limits.max_integrity_segments, (page)); in queue_max_integrity_segments_show()
115 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) in queue_max_segment_size_show() argument
117 if (blk_queue_cluster(q)) in queue_max_segment_size_show()
118 return queue_var_show(queue_max_segment_size(q), (page)); in queue_max_segment_size_show()
123 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) in queue_logical_block_size_show() argument
125 return queue_var_show(queue_logical_block_size(q), page); in queue_logical_block_size_show()
128 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) in queue_physical_block_size_show() argument
130 return queue_var_show(queue_physical_block_size(q), page); in queue_physical_block_size_show()
133 static ssize_t queue_io_min_show(struct request_queue *q, char *page) in queue_io_min_show() argument
135 return queue_var_show(queue_io_min(q), page); in queue_io_min_show()
138 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) in queue_io_opt_show() argument
140 return queue_var_show(queue_io_opt(q), page); in queue_io_opt_show()
143 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) in queue_discard_granularity_show() argument
145 return queue_var_show(q->limits.discard_granularity, page); in queue_discard_granularity_show()
148 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) in queue_discard_max_hw_show() argument
152 val = q->limits.max_hw_discard_sectors << 9; in queue_discard_max_hw_show()
156 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) in queue_discard_max_show() argument
159 (unsigned long long)q->limits.max_discard_sectors << 9); in queue_discard_max_show()
162 static ssize_t queue_discard_max_store(struct request_queue *q, in queue_discard_max_store() argument
171 if (max_discard & (q->limits.discard_granularity - 1)) in queue_discard_max_store()
178 if (max_discard > q->limits.max_hw_discard_sectors) in queue_discard_max_store()
179 max_discard = q->limits.max_hw_discard_sectors; in queue_discard_max_store()
181 q->limits.max_discard_sectors = max_discard; in queue_discard_max_store()
185 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) in queue_discard_zeroes_data_show() argument
187 return queue_var_show(queue_discard_zeroes_data(q), page); in queue_discard_zeroes_data_show()
190 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) in queue_write_same_max_show() argument
193 (unsigned long long)q->limits.max_write_same_sectors << 9); in queue_write_same_max_show()
198 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
201 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, in queue_max_sectors_store()
209 q->limits.max_dev_sectors >> 1); in queue_max_sectors_store()
214 spin_lock_irq(q->queue_lock); in queue_max_sectors_store()
215 q->limits.max_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
216 spin_unlock_irq(q->queue_lock); in queue_max_sectors_store()
221 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) in queue_max_hw_sectors_show() argument
223 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; in queue_max_hw_sectors_show()
230 queue_show_##name(struct request_queue *q, char *page) \
233 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
237 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
247 spin_lock_irq(q->queue_lock); \
249 queue_flag_set(QUEUE_FLAG_##flag, q); \
251 queue_flag_clear(QUEUE_FLAG_##flag, q); \
252 spin_unlock_irq(q->queue_lock); \
261 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) in queue_nomerges_show() argument
263 return queue_var_show((blk_queue_nomerges(q) << 1) | in queue_nomerges_show()
264 blk_queue_noxmerges(q), page); in queue_nomerges_show()
267 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, in queue_nomerges_store() argument
276 spin_lock_irq(q->queue_lock); in queue_nomerges_store()
277 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
278 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
280 queue_flag_set(QUEUE_FLAG_NOMERGES, q); in queue_nomerges_store()
282 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); in queue_nomerges_store()
283 spin_unlock_irq(q->queue_lock); in queue_nomerges_store()
288 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) in queue_rq_affinity_show() argument
290 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show()
291 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); in queue_rq_affinity_show()
297 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) in queue_rq_affinity_store() argument
307 spin_lock_irq(q->queue_lock); in queue_rq_affinity_store()
309 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
310 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
312 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
313 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
315 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); in queue_rq_affinity_store()
316 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); in queue_rq_affinity_store()
318 spin_unlock_irq(q->queue_lock); in queue_rq_affinity_store()
323 static ssize_t queue_poll_show(struct request_queue *q, char *page) in queue_poll_show() argument
325 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); in queue_poll_show()
328 static ssize_t queue_poll_store(struct request_queue *q, const char *page, in queue_poll_store() argument
334 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_store()
341 spin_lock_irq(q->queue_lock); in queue_poll_store()
343 queue_flag_set(QUEUE_FLAG_POLL, q); in queue_poll_store()
345 queue_flag_clear(QUEUE_FLAG_POLL, q); in queue_poll_store()
346 spin_unlock_irq(q->queue_lock); in queue_poll_store()
516 struct request_queue *q = in queue_attr_show() local
522 mutex_lock(&q->sysfs_lock); in queue_attr_show()
523 if (blk_queue_dying(q)) { in queue_attr_show()
524 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
527 res = entry->show(q, page); in queue_attr_show()
528 mutex_unlock(&q->sysfs_lock); in queue_attr_show()
537 struct request_queue *q; in queue_attr_store() local
543 q = container_of(kobj, struct request_queue, kobj); in queue_attr_store()
544 mutex_lock(&q->sysfs_lock); in queue_attr_store()
545 if (blk_queue_dying(q)) { in queue_attr_store()
546 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
549 res = entry->store(q, page, length); in queue_attr_store()
550 mutex_unlock(&q->sysfs_lock); in queue_attr_store()
556 struct request_queue *q = container_of(rcu_head, struct request_queue, in blk_free_queue_rcu() local
558 kmem_cache_free(blk_requestq_cachep, q); in blk_free_queue_rcu()
578 struct request_queue *q = in blk_release_queue() local
581 bdi_exit(&q->backing_dev_info); in blk_release_queue()
582 blkcg_exit_queue(q); in blk_release_queue()
584 if (q->elevator) { in blk_release_queue()
585 spin_lock_irq(q->queue_lock); in blk_release_queue()
586 ioc_clear_queue(q); in blk_release_queue()
587 spin_unlock_irq(q->queue_lock); in blk_release_queue()
588 elevator_exit(q->elevator); in blk_release_queue()
591 blk_exit_rl(&q->root_rl); in blk_release_queue()
593 if (q->queue_tags) in blk_release_queue()
594 __blk_queue_free_tags(q); in blk_release_queue()
596 if (!q->mq_ops) in blk_release_queue()
597 blk_free_flush_queue(q->fq); in blk_release_queue()
599 blk_mq_release(q); in blk_release_queue()
601 blk_trace_shutdown(q); in blk_release_queue()
603 if (q->bio_split) in blk_release_queue()
604 bioset_free(q->bio_split); in blk_release_queue()
606 ida_simple_remove(&blk_queue_ida, q->id); in blk_release_queue()
607 call_rcu(&q->rcu_head, blk_free_queue_rcu); in blk_release_queue()
625 struct request_queue *q = disk->queue; in blk_register_queue() local
627 if (WARN_ON(!q)) in blk_register_queue()
639 if (!blk_queue_init_done(q)) { in blk_register_queue()
640 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); in blk_register_queue()
641 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
642 blk_queue_bypass_end(q); in blk_register_queue()
649 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); in blk_register_queue()
655 kobject_uevent(&q->kobj, KOBJ_ADD); in blk_register_queue()
657 if (q->mq_ops) in blk_register_queue()
660 if (!q->request_fn) in blk_register_queue()
663 ret = elv_register_queue(q); in blk_register_queue()
665 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_register_queue()
666 kobject_del(&q->kobj); in blk_register_queue()
677 struct request_queue *q = disk->queue; in blk_unregister_queue() local
679 if (WARN_ON(!q)) in blk_unregister_queue()
682 if (q->mq_ops) in blk_unregister_queue()
685 if (q->request_fn) in blk_unregister_queue()
686 elv_unregister_queue(q); in blk_unregister_queue()
688 kobject_uevent(&q->kobj, KOBJ_REMOVE); in blk_unregister_queue()
689 kobject_del(&q->kobj); in blk_unregister_queue()