Lines Matching defs:shost

61 int scsi_init_sense_cache(struct Scsi_Host *shost)
360 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
363 * host_failed counter or that it notices the shost state change made by
366 static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
372 if (unlikely(scsi_host_in_recovery(shost))) {
373 unsigned int busy = scsi_host_busy(shost);
375 spin_lock_irqsave(shost->host_lock, flags);
376 if (shost->host_failed || shost->host_eh_scheduled)
377 scsi_eh_wakeup(shost, busy);
378 spin_unlock_irqrestore(shost->host_lock, flags);
385 struct Scsi_Host *shost = sdev->host;
388 scsi_dec_host_busy(shost, cmd);
418 struct Scsi_Host *shost = current_sdev->host;
422 spin_lock_irqsave(shost->host_lock, flags);
424 spin_unlock_irqrestore(shost->host_lock, flags);
433 shost->queuecommand_may_block);
435 spin_lock_irqsave(shost->host_lock, flags);
439 spin_unlock_irqrestore(shost->host_lock, flags);
462 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
464 if (atomic_read(&shost->host_blocked) > 0)
466 if (shost->host_self_blocked)
471 static void scsi_starved_list_run(struct Scsi_Host *shost)
477 spin_lock_irqsave(shost->host_lock, flags);
478 list_splice_init(&shost->starved_list, &starved_list);
484 * As long as shost is accepting commands and we have
493 if (scsi_host_is_busy(shost))
501 &shost->starved_list);
518 spin_unlock_irqrestore(shost->host_lock, flags);
523 spin_lock_irqsave(shost->host_lock, flags);
526 list_splice(&starved_list, &shost->starved_list);
527 spin_unlock_irqrestore(shost->host_lock, flags);
559 void scsi_run_host_queues(struct Scsi_Host *shost)
563 shost_for_each_device(sdev, shost)
1373 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1380 spin_lock_irq(shost->host_lock);
1383 spin_unlock_irq(shost->host_lock);
1387 spin_unlock_irq(shost->host_lock);
1414 spin_lock_irq(shost->host_lock);
1415 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1416 spin_unlock_irq(shost->host_lock);
1424 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1429 struct Scsi_Host *shost,
1433 if (atomic_read(&shost->host_blocked) > 0) {
1434 if (scsi_host_busy(shost) > 0)
1440 if (atomic_dec_return(&shost->host_blocked) > 0)
1444 shost_printk(KERN_INFO, shost,
1448 if (shost->host_self_blocked)
1453 spin_lock_irq(shost->host_lock);
1456 spin_unlock_irq(shost->host_lock);
1464 spin_lock_irq(shost->host_lock);
1466 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1467 spin_unlock_irq(shost->host_lock);
1469 scsi_dec_host_busy(shost, cmd);
1477 * shost/starget/sdev, since the returned value is not guaranteed and
1488 struct Scsi_Host *shost;
1493 shost = sdev->host;
1501 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1628 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1630 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1638 struct Scsi_Host *shost = sdev->host;
1662 if (!shost->hostt->init_cmd_priv)
1663 memset(cmd + 1, 0, shost->hostt->cmd_size);
1671 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1674 if (scsi_host_get_prot(shost)) {
1803 struct Scsi_Host *shost = sdev->host;
1821 if (!scsi_target_queue_ready(shost, sdev))
1823 if (unlikely(scsi_host_in_recovery(shost))) {
1828 if (!scsi_host_queue_ready(q, shost, sdev, cmd))
1861 scsi_dec_host_busy(shost, cmd);
1901 struct Scsi_Host *shost = set->driver_data;
1911 if (scsi_host_get_prot(shost)) {
1913 shost->hostt->cmd_size;
1914 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1917 if (shost->hostt->init_cmd_priv) {
1918 ret = shost->hostt->init_cmd_priv(shost, cmd);
1929 struct Scsi_Host *shost = set->driver_data;
1932 if (shost->hostt->exit_cmd_priv)
1933 shost->hostt->exit_cmd_priv(shost, cmd);
1940 struct Scsi_Host *shost = hctx->driver_data;
1942 if (shost->hostt->mq_poll)
1943 return shost->hostt->mq_poll(shost, hctx->queue_num);
1951 struct Scsi_Host *shost = data;
1953 hctx->driver_data = shost;
1959 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1961 if (shost->hostt->map_queues)
1962 return shost->hostt->map_queues(shost);
1966 void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
1968 struct device *dev = shost->dma_dev;
1972 min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS);
1974 if (scsi_host_prot_dma(shost)) {
1975 shost->sg_prot_tablesize =
1976 min_not_zero(shost->sg_prot_tablesize,
1978 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1979 lim->max_integrity_segments = shost->sg_prot_tablesize;
1982 lim->max_hw_sectors = shost->max_sectors;
1983 lim->seg_boundary_mask = shost->dma_boundary;
1984 lim->max_segment_size = shost->max_segment_size;
1985 lim->virt_boundary_mask = shost->virt_boundary_mask;
1987 shost->dma_alignment, dma_get_cache_alignment() - 1);
1989 if (shost->no_highmem)
1992 dma_set_seg_boundary(dev, shost->dma_boundary);
1993 dma_set_max_seg_size(dev, shost->max_segment_size);
2020 struct Scsi_Host *shost = hctx->driver_data;
2022 shost->hostt->commit_rqs(shost, hctx->queue_num);
2046 int scsi_mq_setup_tags(struct Scsi_Host *shost)
2049 struct blk_mq_tag_set *tag_set = &shost->tag_set;
2052 scsi_mq_inline_sgl_size(shost));
2053 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2054 if (scsi_host_get_prot(shost))
2059 if (shost->hostt->commit_rqs)
2063 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
2064 tag_set->nr_maps = shost->nr_maps ? : 1;
2065 tag_set->queue_depth = shost->can_queue;
2067 tag_set->numa_node = dev_to_node(shost->dma_dev);
2070 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2071 if (shost->queuecommand_may_block)
2073 tag_set->driver_data = shost;
2074 if (shost->host_tagset)
2082 struct Scsi_Host *shost = container_of(kref, typeof(*shost),
2085 blk_mq_free_tag_set(&shost->tag_set);
2086 complete(&shost->tagset_freed);
2120 * @shost: host in question
2125 void scsi_block_requests(struct Scsi_Host *shost)
2127 shost->host_self_blocked = 1;
2134 * @shost: host in question
2141 void scsi_unblock_requests(struct Scsi_Host *shost)
2143 shost->host_self_blocked = 0;
2144 scsi_run_host_queues(shost);
2996 * @shost: the Scsi_Host to which this device belongs
3006 scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
3010 blk_mq_wait_quiesce_done(&shost->tag_set);
3042 * @shost: device to block
3050 scsi_host_block(struct Scsi_Host *shost)
3059 shost_for_each_device(sdev, shost) {
3070 blk_mq_wait_quiesce_done(&shost->tag_set);
3077 scsi_host_unblock(struct Scsi_Host *shost, int new_state)
3082 shost_for_each_device(sdev, shost) {