/drivers/scsi/ |
D | scsi_lib.c | 250 req = blk_get_request(sdev->request_queue, in __scsi_execute() 258 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, in __scsi_execute() 334 static void scsi_kick_queue(struct request_queue *q) in scsi_kick_queue() 363 scsi_kick_queue(current_sdev->request_queue); in scsi_single_lun_run() 376 scsi_kick_queue(sdev->request_queue); in scsi_single_lun_run() 424 struct request_queue *slq; in scsi_starved_list_run() 458 slq = sdev->request_queue; in scsi_starved_list_run() 479 static void scsi_run_queue(struct request_queue *q) in scsi_run_queue() 494 struct request_queue *q; in scsi_requeue_run_queue() 497 q = sdev->request_queue; in scsi_requeue_run_queue() [all …]
|
D | scsi_pm.c | 151 err = blk_pre_runtime_suspend(sdev->request_queue); in sdev_runtime_suspend() 156 blk_post_runtime_suspend(sdev->request_queue, err); in sdev_runtime_suspend() 180 blk_pre_runtime_resume(sdev->request_queue); in sdev_runtime_resume() 183 blk_post_runtime_resume(sdev->request_queue); in sdev_runtime_resume()
|
D | scsi_dh.c | 251 int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) in scsi_dh_activate() 298 int scsi_dh_set_params(struct request_queue *q, const char *params) in scsi_dh_set_params() 320 int scsi_dh_attach(struct request_queue *q, const char *name) in scsi_dh_attach() 359 const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) in scsi_dh_attached_handler_name()
|
D | scsi_priv.h | 8 struct request_queue; 92 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); 98 struct request_queue;
|
D | scsi_sysfs.c | 479 blk_put_queue(sdev->request_queue); in scsi_device_dev_release_usercontext() 481 sdev->request_queue = NULL; in scsi_device_dev_release_usercontext() 703 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); in sdev_show_timeout() 714 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); in sdev_store_timeout() 844 blk_mq_run_hw_queues(sdev->request_queue, true); in store_state_field() 1130 err = scsi_dh_attach(sdev->request_queue, buf); in sdev_store_dh_state() 1361 struct request_queue *rq = sdev->request_queue; in scsi_sysfs_add_sdev() 1467 bsg_unregister_queue(sdev->request_queue); in __scsi_remove_device() 1483 blk_cleanup_queue(sdev->request_queue); in __scsi_remove_device()
|
D | sg.c | 287 struct request_queue *q; in sg_open() 347 q = sdp->device->request_queue; in sg_open() 829 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, in sg_common_write() 845 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() 1027 max_sectors_bytes(sdp->device->request_queue)); in sg_ioctl_common() 1043 max_sectors_bytes(sdp->device->request_queue)); in sg_ioctl_common() 1104 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); in sg_ioctl_common() 1112 return put_user(max_sectors_bytes(sdp->device->request_queue), in sg_ioctl_common() 1115 return blk_trace_setup(sdp->device->request_queue, in sg_ioctl_common() 1120 return blk_trace_startstop(sdp->device->request_queue, 1); in sg_ioctl_common() [all …]
|
D | st.c | 550 req = blk_get_request(SRpnt->stp->device->request_queue, in st_scsi_execute() 747 STp->device->request_queue->rq_timeout, in cross_eof() 792 STp->device->request_queue->rq_timeout, in st_flush_write_buffer() 1114 STp->device->request_queue->rq_timeout, in check_tape() 1141 STp->device->request_queue->rq_timeout, in check_tape() 1388 STp->device->request_queue->rq_timeout, in st_flush() 1556 STp->device->request_queue)) == 0) { in setup_buffering() 1776 STp->device->request_queue->rq_timeout, in st_write() 1946 STp->device->request_queue->rq_timeout, in read_tape() 2378 blk_queue_rq_timeout(STp->device->request_queue, [all …]
|
D | sd_zbc.c | 98 const int timeout = sdp->request_queue->rq_timeout; in sd_zbc_do_report_zones() 152 struct request_queue *q = sdkp->disk->queue; in sd_zbc_alloc_report_buffer() 686 struct request_queue *q = disk->queue; in sd_zbc_revalidate_zones() 764 struct request_queue *q = disk->queue; in sd_zbc_read_zones()
|
/drivers/usb/storage/ |
D | scsiglue.c | 83 blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); in slave_alloc() 107 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) in slave_configure() 108 blk_queue_max_hw_sectors(sdev->request_queue, in slave_configure() 116 blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF); in slave_configure() 122 blk_queue_max_hw_sectors(sdev->request_queue, 2048); in slave_configure() 129 blk_queue_max_hw_sectors(sdev->request_queue, in slave_configure() 130 min_t(size_t, queue_max_hw_sectors(sdev->request_queue), in slave_configure() 140 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH); in slave_configure() 587 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); in max_sectors_show() 598 blk_queue_max_hw_sectors(sdev->request_queue, ms); in max_sectors_store()
|
/drivers/mmc/core/ |
D | crypto.h | 13 struct request_queue; 19 void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host); 29 static inline void mmc_crypto_setup_queue(struct request_queue *q, in mmc_crypto_setup_queue()
|
D | queue.c | 88 struct request_queue *q = req->q; in mmc_cqe_recovery_notifier() 125 struct request_queue *q = req->q; in mmc_mq_timed_out() 143 struct request_queue *q = mq->queue; in mmc_mq_recovery_handler() 180 static void mmc_queue_setup_discard(struct request_queue *q, in mmc_queue_setup_discard() 225 static void mmc_exit_request(struct request_queue *q, struct request *req) in mmc_exit_request() 251 struct request_queue *q = req->q; in mmc_mq_queue_rq() 512 struct request_queue *q = mq->queue; in mmc_cleanup_queue()
|
D | crypto.c | 22 void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host) in mmc_crypto_setup_queue()
|
/drivers/char/ipmi/ |
D | ipmb_dev_int.c | 60 struct list_head request_queue; member 86 while (list_empty(&ipmb_dev->request_queue)) { in ipmb_read() 93 !list_empty(&ipmb_dev->request_queue)); in ipmb_read() 100 queue_elem = list_first_entry(&ipmb_dev->request_queue, in ipmb_read() 217 list_add(&queue_elem->list, &ipmb_dev->request_queue); in ipmb_handle_request() 316 INIT_LIST_HEAD(&ipmb_dev->request_queue); in ipmb_probe()
|
/drivers/block/null_blk/ |
D | null_blk.h | 82 struct request_queue *q; 102 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q); 114 struct request_queue *q) in null_init_zoned_dev()
|
/drivers/md/ |
D | dm-rq.h | 36 void dm_start_queue(struct request_queue *q); 37 void dm_stop_queue(struct request_queue *q);
|
D | dm-table.c | 432 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 884 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable() 1281 struct request_queue *q = bdev_get_queue(dev->bdev); in dm_derive_raw_secret_callback() 1456 static void dm_update_keyslot_manager(struct request_queue *q, in dm_update_keyslot_manager() 1487 static void dm_update_keyslot_manager(struct request_queue *q, in dm_update_keyslot_manager() 1671 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model() 1710 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_matches_zone_sectors() 1868 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1918 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rotational() 1926 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() [all …]
|
D | dm-rq.c | 65 void dm_start_queue(struct request_queue *q) in dm_start_queue() 71 void dm_stop_queue(struct request_queue *q) in dm_stop_queue() 171 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) in __dm_mq_kick_requeue_list() 538 struct request_queue *q; in dm_mq_init_request_queue()
|
/drivers/nvme/host/ |
D | nvme.h | 248 struct request_queue *admin_q; 249 struct request_queue *connect_q; 250 struct request_queue *fabrics_q; 441 struct request_queue *queue; 684 struct request *nvme_alloc_request(struct request_queue *q, 686 struct request *nvme_alloc_request_qid(struct request_queue *q, 691 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 693 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
D | zns.c | 12 struct request_queue *q = ns->queue; in nvme_revalidate_zones() 52 struct request_queue *q = ns->queue; in nvme_update_zone_info() 119 struct request_queue *q = ns->disk->queue; in nvme_zns_alloc_report_buffer()
|
/drivers/scsi/ufs/ |
D | ufshcd-crypto.h | 57 struct request_queue *q); 84 struct request_queue *q) { } in ufshcd_crypto_setup_rq_keyslot_manager()
|
/drivers/ide/ |
D | ide-pm.c | 45 struct request_queue *q = rq->q; in ide_pm_execute_rq() 202 struct request_queue *q = drive->queue; in ide_complete_pm_rq() 245 struct request_queue *q = drive->queue; in ide_check_pm_state()
|
/drivers/nvdimm/ |
D | pmem.c | 337 struct request_queue *q = in pmem_pagemap_cleanup() 338 container_of(pgmap->ref, struct request_queue, q_usage_counter); in pmem_pagemap_cleanup() 350 struct request_queue *q = in pmem_pagemap_kill() 351 container_of(pgmap->ref, struct request_queue, q_usage_counter); in pmem_pagemap_kill() 383 struct request_queue *q; in pmem_attach_disk()
|
/drivers/thunderbolt/ |
D | ctl.c | 33 struct list_head request_queue; member 123 list_add_tail(&req->list, &ctl->request_queue); in tb_cfg_request_enqueue() 153 list_for_each_entry(req, &pkg->ctl->request_queue, list) { in tb_cfg_request_find() 621 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_alloc() 710 if (!list_empty(&ctl->request_queue)) in tb_ctl_stop() 712 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_stop()
|
/drivers/target/ |
D | target_core_iblock.c | 72 struct request_queue *q; in iblock_configure_device() 194 struct request_queue *q) in iblock_emulate_read_cap_with_block_size() 693 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw() 792 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks() 853 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
|
/drivers/cdrom/ |
D | gdrom.c | 88 static DECLARE_WAIT_QUEUE_HEAD(request_queue); 104 struct request_queue *gdrom_rq; 542 wake_up_interruptible(&request_queue); in gdrom_dma_interrupt() 623 wait_event_interruptible_timeout(request_queue, in gdrom_readdisk_dma()
|