Lines Matching defs:esp

33 #define DRV_MODULE_NAME		"esp"
58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
116 #define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
119 static void esp_log_fill_regs(struct esp *esp,
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
130 void scsi_esp_cmd(struct esp *esp, u8 val)
133 int idx = esp->esp_event_cur;
135 p = &esp->esp_event_log[idx];
138 esp_log_fill_regs(esp, p);
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
165 static void esp_event(struct esp *esp, u8 val)
168 int idx = esp->esp_event_cur;
170 p = &esp->esp_event_log[idx];
173 esp_log_fill_regs(esp, p);
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
177 esp->event = val;
180 static void esp_dump_cmd_log(struct esp *esp)
182 int idx = esp->esp_event_cur;
185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
189 shost_printk(KERN_INFO, esp->host,
201 static void esp_flush_fifo(struct esp *esp)
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
209 shost_printk(KERN_ALERT, esp->host,
218 static void hme_read_fifo(struct esp *esp)
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
232 esp->fifo_cnt = idx;
235 static void esp_set_all_config3(struct esp *esp, u8 val)
240 esp->target[i].esp_config3 = val;
244 static void esp_reset_esp(struct esp *esp)
247 scsi_esp_cmd(esp, ESP_CMD_RC);
248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
249 if (esp->rev == FAST)
251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
256 esp->max_period = ((35 * esp->ccycle) / 1000);
257 if (esp->rev == FAST) {
261 esp->rev = FAS236;
263 esp->rev = FASHME; /* Version is usually '5'. */
265 esp->rev = FSC;
269 esp->rev = FAS100A;
271 esp->min_period = ((4 * esp->ccycle) / 1000);
273 esp->min_period = ((5 * esp->ccycle) / 1000);
275 if (esp->rev == FAS236) {
284 esp->rev = PCSCSI;
285 esp_write8(esp->config4, ESP_CFG4);
288 esp->max_period = (esp->max_period + 3)>>2;
289 esp->min_period = (esp->min_period + 3)>>2;
291 esp_write8(esp->config1, ESP_CFG1);
292 switch (esp->rev) {
298 esp_write8(esp->config2, ESP_CFG2);
303 esp_write8(esp->config2, ESP_CFG2);
304 esp->prev_cfg3 = esp->target[0].esp_config3;
305 esp_write8(esp->prev_cfg3, ESP_CFG3);
309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
315 esp_write8(esp->config2, ESP_CFG2);
316 if (esp->rev == FASHME) {
317 u8 cfg3 = esp->target[0].esp_config3;
320 if (esp->scsi_id >= 8)
322 esp_set_all_config3(esp, cfg3);
324 u32 cfg3 = esp->target[0].esp_config3;
327 esp_set_all_config3(esp, cfg3);
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 if (esp->rev == FASHME) {
332 esp->radelay = 80;
334 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335 esp->radelay = 0;
337 esp->radelay = 96;
343 esp_write8(esp->config2, ESP_CFG2);
344 esp_set_all_config3(esp,
345 (esp->target[0].esp_config3 |
347 esp->prev_cfg3 = esp->target[0].esp_config3;
348 esp_write8(esp->prev_cfg3, ESP_CFG3);
349 esp->radelay = 32;
357 esp_write8(esp->cfact, ESP_CFACT);
359 esp->prev_stp = 0;
360 esp_write8(esp->prev_stp, ESP_STP);
362 esp->prev_soff = 0;
363 esp_write8(esp->prev_soff, ESP_SOFF);
365 esp_write8(esp->neg_defp, ESP_TIMEO);
372 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
431 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
444 shost_printk(KERN_ERR, esp->host,
446 shost_printk(KERN_ERR, esp->host,
459 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
465 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
480 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
495 static void esp_write_tgt_config3(struct esp *esp, int tgt)
497 if (esp->rev > ESP100A) {
498 u8 val = esp->target[tgt].esp_config3;
500 if (val != esp->prev_cfg3) {
501 esp->prev_cfg3 = val;
507 static void esp_write_tgt_sync(struct esp *esp, int tgt)
509 u8 off = esp->target[tgt].esp_offset;
510 u8 per = esp->target[tgt].esp_period;
512 if (off != esp->prev_soff) {
513 esp->prev_soff = off;
516 if (per != esp->prev_stp) {
517 esp->prev_stp = per;
522 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
524 if (esp->rev == FASHME) {
630 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
642 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645 dma_unmap_single(esp->dev, ent->sense_dma,
657 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
671 esp_map_sense(esp, ent);
675 esp->active_cmd = ent;
677 p = esp->command_block;
678 esp->msg_out_len = 0;
689 esp->select_state = ESP_SELECT_BASIC;
692 if (esp->rev == FASHME)
696 esp_write_tgt_sync(esp, tgt);
697 esp_write_tgt_config3(esp, tgt);
699 val = (p - esp->command_block);
701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
704 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
708 list_for_each_entry(ent, &esp->queued_cmds, list) {
735 static void esp_maybe_execute_command(struct esp *esp)
746 if (esp->active_cmd ||
747 (esp->flags & ESP_FLAG_RESETTING))
750 ent = find_and_prep_issuable_command(esp);
755 esp_autosense(esp, ent);
763 tp = &esp->target[tgt];
765 list_move(&ent->list, &esp->active_cmds);
767 esp->active_cmd = ent;
769 esp_map_dma(esp, cmd);
770 esp_save_pointers(esp, ent);
775 p = esp->command_block;
777 esp->msg_out_len = 0;
798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799 esp->msg_out_len =
800 spi_populate_width_msg(&esp->msg_out[0],
805 esp->msg_out_len =
806 spi_populate_sync_msg(&esp->msg_out[0],
815 if (esp->msg_out_len)
822 if (ent->tag[0] && esp->rev == ESP100) {
830 esp->cmd_bytes_left = cmd->cmd_len;
831 esp->cmd_bytes_ptr = &cmd->cmnd[0];
834 for (i = esp->msg_out_len - 1;
836 esp->msg_out[i + 2] = esp->msg_out[i];
837 esp->msg_out[0] = ent->tag[0];
838 esp->msg_out[1] = ent->tag[1];
839 esp->msg_out_len += 2;
843 esp->select_state = ESP_SELECT_MSGOUT;
856 esp->select_state = ESP_SELECT_BASIC;
859 if (esp->rev == FASHME)
863 esp_write_tgt_sync(esp, tgt);
864 esp_write_tgt_config3(esp, tgt);
866 val = (p - esp->command_block);
875 esp_send_dma_cmd(esp, val, 16, start_cmd);
878 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
880 struct list_head *head = &esp->esp_cmd_pool;
893 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
895 list_add(&ent->list, &esp->esp_cmd_pool);
898 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
905 esp->active_cmd = NULL;
906 esp_unmap_dma(esp, cmd);
919 esp_unmap_sense(esp, ent);
931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
932 esp->host->unique_id, tgt, lun);
942 esp_put_ent(esp, ent);
944 esp_maybe_execute_command(esp);
947 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
958 struct esp *esp = shost_priv(dev->host);
962 ent = esp_get_ent(esp);
971 list_add_tail(&ent->list, &esp->queued_cmds);
973 esp_maybe_execute_command(esp);
980 static int esp_check_gross_error(struct esp *esp)
982 if (esp->sreg & ESP_STAT_SPAM) {
989 shost_printk(KERN_ERR, esp->host,
990 "Gross error sreg[%02x]\n", esp->sreg);
997 static int esp_check_spur_intr(struct esp *esp)
999 switch (esp->rev) {
1005 esp->sreg &= ~ESP_STAT_INTR;
1009 if (!(esp->sreg & ESP_STAT_INTR)) {
1010 if (esp->ireg & ESP_INTR_SR)
1016 if (!esp->ops->dma_error(esp)) {
1017 shost_printk(KERN_ERR, esp->host,
1019 esp->sreg);
1023 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1034 static void esp_schedule_reset(struct esp *esp)
1038 esp->flags |= ESP_FLAG_RESETTING;
1039 esp_event(esp, ESP_EVENT_RESET);
1046 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1053 shost_printk(KERN_ERR, esp->host,
1061 if (esp->ops->irq_pending(esp))
1065 shost_printk(KERN_ERR, esp->host,
1070 esp->sreg = esp_read8(ESP_STATUS);
1071 esp->ireg = esp_read8(ESP_INTRPT);
1074 i, esp->ireg, esp->sreg);
1076 if (esp->ireg & ESP_INTR_DC) {
1077 shost_printk(KERN_ERR, esp->host,
1082 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1083 shost_printk(KERN_ERR, esp->host,
1084 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1089 esp->command_block[0] = 0xff;
1090 esp->command_block[1] = 0xff;
1091 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1095 scsi_esp_cmd(esp, ESP_CMD_MOK);
1098 if (esp->ops->irq_pending(esp)) {
1099 esp->sreg = esp_read8(ESP_STATUS);
1100 esp->ireg = esp_read8(ESP_INTRPT);
1101 if (esp->ireg & ESP_INTR_FDONE)
1107 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1110 esp->ops->dma_drain(esp);
1111 esp->ops->dma_invalidate(esp);
1114 i, esp->ireg, esp->sreg,
1115 esp->command_block[0],
1116 esp->command_block[1]);
1118 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1119 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1120 shost_printk(KERN_ERR, esp->host,
1122 esp->command_block[0]);
1126 ent = lp->tagged_cmds[esp->command_block[1]];
1128 shost_printk(KERN_ERR, esp->host,
1130 esp->command_block[1]);
1137 static int esp_reconnect(struct esp *esp)
1145 BUG_ON(esp->active_cmd);
1146 if (esp->rev == FASHME) {
1150 target = esp->fifo[0];
1151 lun = esp->fifo[1] & 0x7;
1161 if (!(bits & esp->scsi_id_mask))
1163 bits &= ~esp->scsi_id_mask;
1170 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1171 if (esp->rev == ESP100) {
1181 scsi_esp_cmd(esp, ESP_CMD_NULL);
1184 esp_write_tgt_sync(esp, target);
1185 esp_write_tgt_config3(esp, target);
1187 scsi_esp_cmd(esp, ESP_CMD_MOK);
1189 if (esp->rev == FASHME)
1193 tp = &esp->target[target];
1196 shost_printk(KERN_ERR, esp->host,
1205 ent = esp_reconnect_with_tag(esp, lp);
1210 esp->active_cmd = ent;
1212 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1213 esp_restore_pointers(esp, ent);
1214 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1218 esp_schedule_reset(esp);
1222 static int esp_finish_select(struct esp *esp)
1228 esp->select_state = ESP_SELECT_NONE;
1230 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1231 ent = esp->active_cmd;
1234 if (esp->ops->dma_error(esp)) {
1238 esp_schedule_reset(esp);
1239 esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
1243 esp->ops->dma_invalidate(esp);
1245 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1246 struct esp_target_data *tp = &esp->target[cmd->device->id];
1253 esp_unmap_dma(esp, cmd);
1256 esp->cmd_bytes_ptr = NULL;
1257 esp->cmd_bytes_left = 0;
1259 esp_unmap_sense(esp, ent);
1265 list_move(&ent->list, &esp->queued_cmds);
1266 esp->active_cmd = NULL;
1274 if (esp->ireg == ESP_INTR_DC) {
1281 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1283 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284 esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
1288 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1292 if (esp->rev <= ESP236) {
1295 scsi_esp_cmd(esp, ESP_CMD_NULL);
1298 (!esp->prev_soff ||
1299 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300 esp_flush_fifo(esp);
1306 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1310 shost_printk(KERN_INFO, esp->host,
1311 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1312 esp_schedule_reset(esp);
1316 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1322 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1326 if (!(esp->sreg & ESP_STAT_TCNT)) {
1329 if (esp->rev == FASHME)
1331 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1335 bytes_sent = esp->data_dma_len;
1337 bytes_sent -= esp->send_cmd_residual;
1372 if (!esp->prev_soff) {
1376 if (esp->rev == ESP100) {
1390 esp->sreg = esp_read8(ESP_STATUS);
1391 phase = esp->sreg & ESP_STAT_PMASK;
1405 esp_flush_fifo(esp);
1410 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1420 esp_soff |= esp->radelay;
1421 if (esp->rev >= FAS236) {
1423 if (esp->rev >= FAS100A)
1427 if (esp->rev == FASHME)
1428 esp_soff &= ~esp->radelay;
1433 esp->prev_cfg3 = tp->esp_config3;
1434 esp_write8(esp->prev_cfg3, ESP_CFG3);
1438 tp->esp_period = esp->prev_stp = esp_stp;
1439 tp->esp_offset = esp->prev_soff = esp_soff;
1449 static void esp_msgin_reject(struct esp *esp)
1451 struct esp_cmd_entry *ent = esp->active_cmd;
1457 tp = &esp->target[tgt];
1464 scsi_esp_cmd(esp, ESP_CMD_RATN);
1466 esp->msg_out_len =
1467 spi_populate_sync_msg(&esp->msg_out[0],
1471 scsi_esp_cmd(esp, ESP_CMD_SATN);
1480 esp_setsync(esp, tp, 0, 0, 0, 0);
1481 scsi_esp_cmd(esp, ESP_CMD_RATN);
1485 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1486 esp_schedule_reset(esp);
1489 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1491 u8 period = esp->msg_in[3];
1492 u8 offset = esp->msg_in[4];
1504 if (period > esp->max_period) {
1508 if (period < esp->min_period)
1511 one_clock = esp->ccycle / 1000;
1513 if (stp && esp->rev >= FAS236) {
1521 esp_setsync(esp, tp, period, offset, stp, offset);
1525 esp->msg_out[0] = MESSAGE_REJECT;
1526 esp->msg_out_len = 1;
1527 scsi_esp_cmd(esp, ESP_CMD_SATN);
1533 esp->msg_out_len =
1534 spi_populate_sync_msg(&esp->msg_out[0],
1537 scsi_esp_cmd(esp, ESP_CMD_SATN);
1540 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1542 int size = 8 << esp->msg_in[3];
1545 if (esp->rev != FASHME)
1563 esp->prev_cfg3 = cfg3;
1572 scsi_esp_cmd(esp, ESP_CMD_RATN);
1574 esp->msg_out_len =
1575 spi_populate_sync_msg(&esp->msg_out[0],
1579 scsi_esp_cmd(esp, ESP_CMD_SATN);
1584 esp->msg_out[0] = MESSAGE_REJECT;
1585 esp->msg_out_len = 1;
1586 scsi_esp_cmd(esp, ESP_CMD_SATN);
1589 static void esp_msgin_extended(struct esp *esp)
1591 struct esp_cmd_entry *ent = esp->active_cmd;
1596 tp = &esp->target[tgt];
1597 if (esp->msg_in[2] == EXTENDED_SDTR) {
1598 esp_msgin_sdtr(esp, tp);
1601 if (esp->msg_in[2] == EXTENDED_WDTR) {
1602 esp_msgin_wdtr(esp, tp);
1606 shost_printk(KERN_INFO, esp->host,
1607 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1609 esp->msg_out[0] = MESSAGE_REJECT;
1610 esp->msg_out_len = 1;
1611 scsi_esp_cmd(esp, ESP_CMD_SATN);
1617 static int esp_msgin_process(struct esp *esp)
1619 u8 msg0 = esp->msg_in[0];
1620 int len = esp->msg_in_len;
1624 shost_printk(KERN_INFO, esp->host,
1633 if (len < esp->msg_in[1] + 2)
1635 esp_msgin_extended(esp);
1644 if (esp->msg_in[1] != 1)
1647 ent = esp->active_cmd;
1661 esp_restore_pointers(esp, esp->active_cmd);
1664 esp_save_pointers(esp, esp->active_cmd);
1669 struct esp_cmd_entry *ent = esp->active_cmd;
1672 esp_event(esp, ESP_EVENT_FREE_BUS);
1673 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1677 esp_msgin_reject(esp);
1682 esp->msg_out[0] = MESSAGE_REJECT;
1683 esp->msg_out_len = 1;
1684 scsi_esp_cmd(esp, ESP_CMD_SATN);
1689 static int esp_process_event(struct esp *esp)
1696 esp->event, esp->sreg & ESP_STAT_PMASK);
1697 switch (esp->event) {
1699 switch (esp->sreg & ESP_STAT_PMASK) {
1701 esp_event(esp, ESP_EVENT_DATA_OUT);
1704 esp_event(esp, ESP_EVENT_DATA_IN);
1707 esp_flush_fifo(esp);
1708 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1709 esp_event(esp, ESP_EVENT_STATUS);
1710 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1714 esp_event(esp, ESP_EVENT_MSGOUT);
1718 esp_event(esp, ESP_EVENT_MSGIN);
1722 esp_event(esp, ESP_EVENT_CMD_START);
1726 shost_printk(KERN_INFO, esp->host,
1728 esp->sreg);
1729 esp_schedule_reset(esp);
1739 struct esp_cmd_entry *ent = esp->active_cmd;
1744 if (esp->rev == ESP100)
1745 scsi_esp_cmd(esp, ESP_CMD_NULL);
1752 if (esp->ops->dma_length_limit)
1753 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1756 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1758 esp->data_dma_len = dma_len;
1761 shost_printk(KERN_ERR, esp->host,
1763 shost_printk(KERN_ERR, esp->host,
1767 esp_schedule_reset(esp);
1774 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1776 esp_event(esp, ESP_EVENT_DATA_DONE);
1780 struct esp_cmd_entry *ent = esp->active_cmd;
1784 if (esp->ops->dma_error(esp)) {
1785 shost_printk(KERN_INFO, esp->host,
1787 esp_schedule_reset(esp);
1794 esp->ops->dma_drain(esp);
1796 esp->ops->dma_invalidate(esp);
1798 if (esp->ireg != ESP_INTR_BSERV) {
1802 shost_printk(KERN_INFO, esp->host,
1804 esp_schedule_reset(esp);
1808 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1815 esp_schedule_reset(esp);
1819 esp_advance_dma(esp, ent, cmd, bytes_sent);
1820 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1825 struct esp_cmd_entry *ent = esp->active_cmd;
1827 if (esp->ireg & ESP_INTR_FDONE) {
1830 scsi_esp_cmd(esp, ESP_CMD_MOK);
1831 } else if (esp->ireg == ESP_INTR_BSERV) {
1834 esp_event(esp, ESP_EVENT_MSGIN);
1839 shost_printk(KERN_INFO, esp->host,
1842 esp_schedule_reset(esp);
1846 esp_event(esp, ESP_EVENT_FREE_BUS);
1847 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1851 struct esp_cmd_entry *ent = esp->active_cmd;
1856 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1862 esp_event_queue_full(esp, ent);
1867 esp_autosense(esp, ent);
1869 esp_cmd_is_done(esp, ent, cmd, DID_OK);
1876 esp->active_cmd = NULL;
1877 esp_maybe_execute_command(esp);
1879 shost_printk(KERN_INFO, esp->host,
1882 esp_schedule_reset(esp);
1885 if (esp->active_cmd)
1886 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1890 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1895 for (i = 0; i < esp->msg_out_len; i++)
1896 printk("%02x ", esp->msg_out[i]);
1900 if (esp->rev == FASHME) {
1904 for (i = 0; i < esp->msg_out_len; i++) {
1905 esp_write8(esp->msg_out[i], ESP_FDATA);
1908 scsi_esp_cmd(esp, ESP_CMD_TI);
1910 if (esp->msg_out_len == 1) {
1911 esp_write8(esp->msg_out[0], ESP_FDATA);
1912 scsi_esp_cmd(esp, ESP_CMD_TI);
1913 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1914 for (i = 0; i < esp->msg_out_len; i++)
1915 esp_write8(esp->msg_out[i], ESP_FDATA);
1916 scsi_esp_cmd(esp, ESP_CMD_TI);
1919 memcpy(esp->command_block,
1920 esp->msg_out,
1921 esp->msg_out_len);
1923 esp->ops->send_dma_cmd(esp,
1924 esp->command_block_dma,
1925 esp->msg_out_len,
1926 esp->msg_out_len,
1931 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1935 if (esp->rev == FASHME) {
1936 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1938 if (esp->msg_out_len > 1)
1939 esp->ops->dma_invalidate(esp);
1944 if (!(esp->ireg & ESP_INTR_DC))
1945 scsi_esp_cmd(esp, ESP_CMD_NULL);
1948 esp->msg_out_len = 0;
1950 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1953 if (esp->ireg & ESP_INTR_BSERV) {
1954 if (esp->rev == FASHME) {
1957 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1959 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960 if (esp->rev == ESP100)
1961 scsi_esp_cmd(esp, ESP_CMD_NULL);
1963 scsi_esp_cmd(esp, ESP_CMD_TI);
1964 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1967 if (esp->ireg & ESP_INTR_FDONE) {
1970 if (esp->rev == FASHME)
1971 val = esp->fifo[0];
1974 esp->msg_in[esp->msg_in_len++] = val;
1978 if (!esp_msgin_process(esp))
1979 esp->msg_in_len = 0;
1981 if (esp->rev == FASHME)
1982 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1984 scsi_esp_cmd(esp, ESP_CMD_MOK);
1987 if (esp->event == ESP_EVENT_RESET)
1990 if (esp->event != ESP_EVENT_FREE_BUS)
1991 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1993 shost_printk(KERN_INFO, esp->host,
1995 esp_schedule_reset(esp);
2000 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2001 esp->cmd_bytes_left);
2002 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2003 esp_event(esp, ESP_EVENT_CMD_DONE);
2004 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2007 esp->ops->dma_invalidate(esp);
2008 if (esp->ireg & ESP_INTR_BSERV) {
2009 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2012 esp_schedule_reset(esp);
2016 scsi_esp_cmd(esp, ESP_CMD_RS);
2020 shost_printk(KERN_INFO, esp->host,
2021 "Unexpected event %x, resetting\n", esp->event);
2022 esp_schedule_reset(esp);
2028 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2032 esp_unmap_dma(esp, cmd);
2037 esp_unmap_sense(esp, ent);
2041 esp_put_ent(esp, ent);
2052 static void esp_reset_cleanup(struct esp *esp)
2057 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2063 esp_put_ent(esp, ent);
2066 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2067 if (ent == esp->active_cmd)
2068 esp->active_cmd = NULL;
2069 esp_reset_cleanup_one(esp, ent);
2072 BUG_ON(esp->active_cmd != NULL);
2076 struct esp_target_data *tp = &esp->target[i];
2090 esp->flags &= ~ESP_FLAG_RESETTING;
2094 static void __esp_interrupt(struct esp *esp)
2102 esp->sreg = esp_read8(ESP_STATUS);
2103 esp->seqreg = esp_read8(ESP_SSTEP);
2104 esp->ireg = esp_read8(ESP_INTRPT);
2106 if (esp->flags & ESP_FLAG_RESETTING) {
2109 if (esp_check_gross_error(esp))
2112 finish_reset = esp_check_spur_intr(esp);
2117 if (esp->ireg & ESP_INTR_SR)
2121 esp_reset_cleanup(esp);
2122 if (esp->eh_reset) {
2123 complete(esp->eh_reset);
2124 esp->eh_reset = NULL;
2129 phase = (esp->sreg & ESP_STAT_PMASK);
2130 if (esp->rev == FASHME) {
2132 esp->select_state == ESP_SELECT_NONE &&
2133 esp->event != ESP_EVENT_STATUS &&
2134 esp->event != ESP_EVENT_DATA_DONE) ||
2135 (esp->ireg & ESP_INTR_RSEL)) {
2136 esp->sreg2 = esp_read8(ESP_STATUS2);
2137 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2138 (esp->sreg2 & ESP_STAT2_F1BYTE))
2139 hme_read_fifo(esp);
2145 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2149 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2150 shost_printk(KERN_INFO, esp->host,
2151 "unexpected IREG %02x\n", esp->ireg);
2152 if (esp->ireg & ESP_INTR_IC)
2153 esp_dump_cmd_log(esp);
2155 esp_schedule_reset(esp);
2157 if (esp->ireg & ESP_INTR_RSEL) {
2158 if (esp->active_cmd)
2159 (void) esp_finish_select(esp);
2160 intr_done = esp_reconnect(esp);
2163 if (esp->select_state != ESP_SELECT_NONE)
2164 intr_done = esp_finish_select(esp);
2168 intr_done = esp_process_event(esp);
2173 struct esp *esp = dev_id;
2177 spin_lock_irqsave(esp->host->host_lock, flags);
2179 if (esp->ops->irq_pending(esp)) {
2184 __esp_interrupt(esp);
2185 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2187 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2190 if (esp->ops->irq_pending(esp))
2197 spin_unlock_irqrestore(esp->host->host_lock, flags);
2203 static void esp_get_revision(struct esp *esp)
2207 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2208 if (esp->config2 == 0) {
2209 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2210 esp_write8(esp->config2, ESP_CFG2);
2215 esp->config2 = 0;
2222 esp->rev = ESP100;
2227 esp_set_all_config3(esp, 5);
2228 esp->prev_cfg3 = 5;
2229 esp_write8(esp->config2, ESP_CFG2);
2231 esp_write8(esp->prev_cfg3, ESP_CFG3);
2238 esp->rev = ESP100A;
2240 esp_set_all_config3(esp, 0);
2241 esp->prev_cfg3 = 0;
2242 esp_write8(esp->prev_cfg3, ESP_CFG3);
2247 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2248 esp->rev = FAST;
2249 esp->sync_defp = SYNC_DEFP_FAST;
2251 esp->rev = ESP236;
2256 static void esp_init_swstate(struct esp *esp)
2260 INIT_LIST_HEAD(&esp->queued_cmds);
2261 INIT_LIST_HEAD(&esp->active_cmds);
2262 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2269 esp->target[i].flags = 0;
2270 esp->target[i].nego_goal_period = 0;
2271 esp->target[i].nego_goal_offset = 0;
2272 esp->target[i].nego_goal_width = 0;
2273 esp->target[i].nego_goal_tags = 0;
2278 static void esp_bootup_reset(struct esp *esp)
2283 esp->ops->reset_dma(esp);
2286 esp_reset_esp(esp);
2293 scsi_esp_cmd(esp, ESP_CMD_RS);
2296 esp_write8(esp->config1, ESP_CFG1);
2302 static void esp_set_clock_params(struct esp *esp)
2339 fhz = esp->cfreq;
2355 esp->cfact = (ccf == 8 ? 0 : ccf);
2356 esp->cfreq = fhz;
2357 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2358 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2359 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2360 esp->sync_defp = SYNC_DEFP_SLOW;
2377 int scsi_esp_register(struct esp *esp)
2382 if (!esp->num_tags)
2383 esp->num_tags = ESP_DEFAULT_TAGS;
2384 esp->host->transportt = esp_transport_template;
2385 esp->host->max_lun = ESP_MAX_LUN;
2386 esp->host->cmd_per_lun = 2;
2387 esp->host->unique_id = instance;
2389 esp_set_clock_params(esp);
2391 esp_get_revision(esp);
2393 esp_init_swstate(esp);
2395 esp_bootup_reset(esp);
2397 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2398 esp->host->unique_id, esp->regs, esp->dma_regs,
2399 esp->host->irq);
2400 dev_printk(KERN_INFO, esp->dev,
2401 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2402 esp->host->unique_id, esp_chip_names[esp->rev],
2403 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2408 err = scsi_add_host(esp->host, esp->dev);
2414 scsi_scan_host(esp->host);
2420 void scsi_esp_unregister(struct esp *esp)
2422 scsi_remove_host(esp->host);
2428 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2429 struct esp_target_data *tp = &esp->target[starget->id];
2438 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2439 struct esp_target_data *tp = &esp->target[starget->id];
2446 struct esp *esp = shost_priv(dev->host);
2447 struct esp_target_data *tp = &esp->target[dev->id];
2455 spi_min_period(tp->starget) = esp->min_period;
2458 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2468 struct esp *esp = shost_priv(dev->host);
2469 struct esp_target_data *tp = &esp->target[dev->id];
2472 scsi_change_queue_depth(dev, esp->num_tags);
2492 struct esp *esp = shost_priv(cmd->device->host);
2500 spin_lock_irqsave(esp->host->host_lock, flags);
2501 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2503 ent = esp->active_cmd;
2505 shost_printk(KERN_ERR, esp->host,
2508 list_for_each_entry(ent, &esp->queued_cmds, list) {
2509 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2512 list_for_each_entry(ent, &esp->active_cmds, list) {
2513 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2516 esp_dump_cmd_log(esp);
2517 spin_unlock_irqrestore(esp->host->host_lock, flags);
2519 spin_lock_irqsave(esp->host->host_lock, flags);
2522 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2538 esp_put_ent(esp, ent);
2545 ent = esp->active_cmd;
2551 if (esp->msg_out_len)
2557 esp->msg_out[0] = ABORT_TASK_SET;
2558 esp->msg_out_len = 1;
2561 scsi_esp_cmd(esp, ESP_CMD_SATN);
2582 spin_unlock_irqrestore(esp->host->host_lock, flags);
2585 spin_lock_irqsave(esp->host->host_lock, flags);
2587 spin_unlock_irqrestore(esp->host->host_lock, flags);
2595 spin_unlock_irqrestore(esp->host->host_lock, flags);
2603 spin_unlock_irqrestore(esp->host->host_lock, flags);
2609 struct esp *esp = shost_priv(cmd->device->host);
2615 spin_lock_irqsave(esp->host->host_lock, flags);
2617 esp->eh_reset = &eh_reset;
2624 esp->flags |= ESP_FLAG_RESETTING;
2625 scsi_esp_cmd(esp, ESP_CMD_RS);
2627 spin_unlock_irqrestore(esp->host->host_lock, flags);
2632 spin_lock_irqsave(esp->host->host_lock, flags);
2633 esp->eh_reset = NULL;
2634 spin_unlock_irqrestore(esp->host->host_lock, flags);
2645 struct esp *esp = shost_priv(cmd->device->host);
2648 spin_lock_irqsave(esp->host->host_lock, flags);
2649 esp_bootup_reset(esp);
2650 esp_reset_cleanup(esp);
2651 spin_unlock_irqrestore(esp->host->host_lock, flags);
2660 return "esp";
2665 .name = "esp",
2687 struct esp *esp = shost_priv(host);
2690 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2701 struct esp *esp = shost_priv(host);
2702 struct esp_target_data *tp = &esp->target[target->id];
2704 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2714 struct esp *esp = shost_priv(host);
2715 struct esp_target_data *tp = &esp->target[target->id];
2724 struct esp *esp = shost_priv(host);
2725 struct esp_target_data *tp = &esp->target[target->id];
2784 static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2797 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2802 static inline int esp_wait_for_intr(struct esp *esp)
2807 esp->sreg = esp_read8(ESP_STATUS);
2808 if (esp->sreg & ESP_STAT_INTR)
2814 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2815 esp->sreg);
2821 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2824 u8 phase = esp->sreg & ESP_STAT_PMASK;
2827 esp->send_cmd_error = 0;
2833 scsi_esp_cmd(esp, cmd);
2836 if (!esp_wait_for_fifo(esp))
2839 *dst++ = readb(esp->fifo_reg);
2845 if (esp_wait_for_intr(esp)) {
2846 esp->send_cmd_error = 1;
2850 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2853 esp->ireg = esp_read8(ESP_INTRPT);
2854 if (esp->ireg & mask) {
2855 esp->send_cmd_error = 1;
2868 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2872 writesb(esp->fifo_reg, src, n);
2876 scsi_esp_cmd(esp, cmd);
2879 if (esp_wait_for_intr(esp)) {
2880 esp->send_cmd_error = 1;
2884 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2887 esp->ireg = esp_read8(ESP_INTRPT);
2888 if (esp->ireg & ~ESP_INTR_BSERV) {
2889 esp->send_cmd_error = 1;
2898 writesb(esp->fifo_reg, src, n);
2906 esp->send_cmd_residual = esp_count;