• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/

Lines Matching refs:shost

302 	struct Scsi_Host *shost = sdev->host;
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
309 if (unlikely(scsi_host_in_recovery(shost) &&
310 (shost->host_failed || shost->host_eh_scheduled)))
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
327 struct Scsi_Host *shost = current_sdev->host;
332 spin_lock_irqsave(shost->host_lock, flags);
334 spin_unlock_irqrestore(shost->host_lock, flags);
344 spin_lock_irqsave(shost->host_lock, flags);
354 spin_unlock_irqrestore(shost->host_lock, flags);
356 spin_lock_irqsave(shost->host_lock, flags);
361 spin_unlock_irqrestore(shost->host_lock, flags);
379 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
403 struct Scsi_Host *shost = sdev->host;
410 spin_lock_irqsave(shost->host_lock, flags);
411 list_splice_init(&shost->starved_list, &starved_list);
417 * As long as shost is accepting commands and we have
426 if (scsi_host_is_busy(shost))
434 &shost->starved_list);
438 spin_unlock(shost->host_lock);
451 spin_lock(shost->host_lock);
454 list_splice(&starved_list, &shost->starved_list);
455 spin_unlock_irqrestore(shost->host_lock, flags);
506 void scsi_run_host_queues(struct Scsi_Host *shost)
510 shost_for_each_device(sdev, shost)
1255 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1281 &shost->starved_list);
1293 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1300 struct Scsi_Host *shost,
1303 if (scsi_host_in_recovery(shost))
1305 if (shost->host_busy == 0 && shost->host_blocked) {
1309 if (--shost->host_blocked == 0) {
1312 shost->host_no));
1317 if (scsi_host_is_busy(shost)) {
1319 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1334 * shost/starget/sdev, since the returned value is not guaranteed and
1345 struct Scsi_Host *shost;
1351 shost = sdev->host;
1354 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1369 struct Scsi_Host *shost;
1375 shost = sdev->host;
1387 spin_lock(shost->host_lock);
1388 shost->host_busy++;
1390 spin_unlock(shost->host_lock);
1454 struct Scsi_Host *shost;
1473 shost = sdev->host;
1510 spin_lock(shost->host_lock);
1523 &shost->starved_list);
1527 if (!scsi_target_queue_ready(shost, sdev))
1530 if (!scsi_host_queue_ready(q, shost, sdev))
1534 shost->host_busy++;
1536 spin_unlock_irq(shost->host_lock);
1563 spin_unlock_irq(shost->host_lock);
1586 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1591 if (shost->unchecked_isa_dma)
1600 host_dev = scsi_get_device(shost);
1608 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1612 struct device *dev = shost->shost_gendev.parent;
1621 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1624 blk_queue_max_hw_sectors(q, shost->max_sectors);
1625 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1626 blk_queue_segment_boundary(q, shost->dma_boundary);
1627 dma_set_seg_boundary(dev, shost->dma_boundary);
1631 if (!shost->use_clustering)
1671 * Arguments: shost - Host in question
1681 void scsi_block_requests(struct Scsi_Host *shost)
1683 shost->host_self_blocked = 1;
1693 * Arguments: shost - Host in question
1707 void scsi_unblock_requests(struct Scsi_Host *shost)
1709 shost->host_self_blocked = 0;
1710 scsi_run_host_queues(shost);