• Home
  • Raw
  • Download

Lines Matching refs:shost

60 int scsi_init_sense_cache(struct Scsi_Host *shost)  in scsi_init_sense_cache()  argument
278 static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) in scsi_dec_host_busy() argument
284 if (unlikely(scsi_host_in_recovery(shost))) { in scsi_dec_host_busy()
285 unsigned int busy = scsi_host_busy(shost); in scsi_dec_host_busy()
287 spin_lock_irqsave(shost->host_lock, flags); in scsi_dec_host_busy()
288 if (shost->host_failed || shost->host_eh_scheduled) in scsi_dec_host_busy()
289 scsi_eh_wakeup(shost, busy); in scsi_dec_host_busy()
290 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_dec_host_busy()
297 struct Scsi_Host *shost = sdev->host; in scsi_device_unbusy() local
300 scsi_dec_host_busy(shost, cmd); in scsi_device_unbusy()
318 struct Scsi_Host *shost = current_sdev->host; in scsi_single_lun_run() local
323 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
325 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
334 shost->queuecommand_may_block); in scsi_single_lun_run()
336 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
346 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
348 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
353 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
376 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) in scsi_host_is_busy() argument
378 if (atomic_read(&shost->host_blocked) > 0) in scsi_host_is_busy()
380 if (shost->host_self_blocked) in scsi_host_is_busy()
385 static void scsi_starved_list_run(struct Scsi_Host *shost) in scsi_starved_list_run() argument
391 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
392 list_splice_init(&shost->starved_list, &starved_list); in scsi_starved_list_run()
407 if (scsi_host_is_busy(shost)) in scsi_starved_list_run()
415 &shost->starved_list); in scsi_starved_list_run()
432 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
437 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
440 list_splice(&starved_list, &shost->starved_list); in scsi_starved_list_run()
441 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
473 void scsi_run_host_queues(struct Scsi_Host *shost) in scsi_run_host_queues() argument
477 shost_for_each_device(sdev, shost) in scsi_run_host_queues()
1274 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, in scsi_target_queue_ready() argument
1281 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1284 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1288 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1315 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1316 list_move_tail(&sdev->starved_entry, &shost->starved_list); in scsi_target_queue_ready()
1317 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1330 struct Scsi_Host *shost, in scsi_host_queue_ready() argument
1334 if (atomic_read(&shost->host_blocked) > 0) { in scsi_host_queue_ready()
1335 if (scsi_host_busy(shost) > 0) in scsi_host_queue_ready()
1341 if (atomic_dec_return(&shost->host_blocked) > 0) in scsi_host_queue_ready()
1345 shost_printk(KERN_INFO, shost, in scsi_host_queue_ready()
1349 if (shost->host_self_blocked) in scsi_host_queue_ready()
1354 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1357 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1365 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1367 list_add_tail(&sdev->starved_entry, &shost->starved_list); in scsi_host_queue_ready()
1368 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1370 scsi_dec_host_busy(shost, cmd); in scsi_host_queue_ready()
1389 struct Scsi_Host *shost; in scsi_mq_lld_busy() local
1394 shost = sdev->host; in scsi_mq_lld_busy()
1402 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) in scsi_mq_lld_busy()
1530 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) in scsi_mq_inline_sgl_size() argument
1532 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * in scsi_mq_inline_sgl_size()
1540 struct Scsi_Host *shost = sdev->host; in scsi_prepare_cmd() local
1551 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; in scsi_prepare_cmd()
1554 if (scsi_host_get_prot(shost)) { in scsi_prepare_cmd()
1648 struct Scsi_Host *shost = sdev->host; in scsi_queue_rq() local
1666 if (!scsi_target_queue_ready(shost, sdev)) in scsi_queue_rq()
1668 if (unlikely(scsi_host_in_recovery(shost))) { in scsi_queue_rq()
1673 if (!scsi_host_queue_ready(q, shost, sdev, cmd)) in scsi_queue_rq()
1706 scsi_dec_host_busy(shost, cmd); in scsi_queue_rq()
1755 struct Scsi_Host *shost = set->driver_data; in scsi_mq_init_request() local
1766 if (scsi_host_get_prot(shost)) { in scsi_mq_init_request()
1768 shost->hostt->cmd_size; in scsi_mq_init_request()
1769 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); in scsi_mq_init_request()
1772 if (shost->hostt->init_cmd_priv) { in scsi_mq_init_request()
1773 ret = shost->hostt->init_cmd_priv(shost, cmd); in scsi_mq_init_request()
1784 struct Scsi_Host *shost = set->driver_data; in scsi_mq_exit_request() local
1787 if (shost->hostt->exit_cmd_priv) in scsi_mq_exit_request()
1788 shost->hostt->exit_cmd_priv(shost, cmd); in scsi_mq_exit_request()
1795 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll() local
1797 if (shost->hostt->mq_poll) in scsi_mq_poll()
1798 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll()
1806 struct Scsi_Host *shost = data; in scsi_init_hctx() local
1808 hctx->driver_data = shost; in scsi_init_hctx()
1814 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues() local
1816 if (shost->hostt->map_queues) in scsi_map_queues()
1817 return shost->hostt->map_queues(shost); in scsi_map_queues()
1821 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) in __scsi_init_queue() argument
1823 struct device *dev = shost->dma_dev; in __scsi_init_queue()
1828 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, in __scsi_init_queue()
1831 if (scsi_host_prot_dma(shost)) { in __scsi_init_queue()
1832 shost->sg_prot_tablesize = in __scsi_init_queue()
1833 min_not_zero(shost->sg_prot_tablesize, in __scsi_init_queue()
1835 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); in __scsi_init_queue()
1836 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); in __scsi_init_queue()
1840 shost->max_sectors = min_t(unsigned int, shost->max_sectors, in __scsi_init_queue()
1843 blk_queue_max_hw_sectors(q, shost->max_sectors); in __scsi_init_queue()
1844 blk_queue_segment_boundary(q, shost->dma_boundary); in __scsi_init_queue()
1845 dma_set_seg_boundary(dev, shost->dma_boundary); in __scsi_init_queue()
1847 blk_queue_max_segment_size(q, shost->max_segment_size); in __scsi_init_queue()
1848 blk_queue_virt_boundary(q, shost->virt_boundary_mask); in __scsi_init_queue()
1886 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs() local
1888 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
1913 int scsi_mq_setup_tags(struct Scsi_Host *shost) in scsi_mq_setup_tags() argument
1916 struct blk_mq_tag_set *tag_set = &shost->tag_set; in scsi_mq_setup_tags()
1919 scsi_mq_inline_sgl_size(shost)); in scsi_mq_setup_tags()
1920 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; in scsi_mq_setup_tags()
1921 if (scsi_host_get_prot(shost)) in scsi_mq_setup_tags()
1926 if (shost->hostt->commit_rqs) in scsi_mq_setup_tags()
1930 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
1931 tag_set->nr_maps = shost->nr_maps ? : 1; in scsi_mq_setup_tags()
1932 tag_set->queue_depth = shost->can_queue; in scsi_mq_setup_tags()
1937 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); in scsi_mq_setup_tags()
1938 if (shost->queuecommand_may_block) in scsi_mq_setup_tags()
1940 tag_set->driver_data = shost; in scsi_mq_setup_tags()
1941 if (shost->host_tagset) in scsi_mq_setup_tags()
1947 void scsi_mq_destroy_tags(struct Scsi_Host *shost) in scsi_mq_destroy_tags() argument
1949 blk_mq_free_tag_set(&shost->tag_set); in scsi_mq_destroy_tags()
1980 void scsi_block_requests(struct Scsi_Host *shost) in scsi_block_requests() argument
1982 shost->host_self_blocked = 1; in scsi_block_requests()
1996 void scsi_unblock_requests(struct Scsi_Host *shost) in scsi_unblock_requests() argument
1998 shost->host_self_blocked = 0; in scsi_unblock_requests()
1999 scsi_run_host_queues(shost); in scsi_unblock_requests()
2857 scsi_host_block(struct Scsi_Host *shost) in scsi_host_block() argument
2866 shost_for_each_device(sdev, shost) { in scsi_host_block()
2880 WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); in scsi_host_block()
2890 scsi_host_unblock(struct Scsi_Host *shost, int new_state) in scsi_host_unblock() argument
2895 shost_for_each_device(sdev, shost) { in scsi_host_unblock()