• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/

Lines Matching refs:ms

184 static void mesh_done(struct mesh_state *ms, int start_next);
185 static void mesh_interrupt(struct mesh_state *ms);
186 static void cmd_complete(struct mesh_state *ms);
187 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
188 static void halt_dma(struct mesh_state *ms);
189 static void phase_mismatch(struct mesh_state *ms);
211 static void dlog(struct mesh_state *ms, char *fmt, int a)
213 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
217 slp = &ms->log[ms->log_ix];
220 tlp->phase = (ms->msgphase << 4) + ms->phase;
221 tlp->bs0 = ms->mesh->bus_status0;
222 tlp->bs1 = ms->mesh->bus_status1;
223 tlp->tgt = ms->conn_tgt;
230 if (++ms->log_ix >= N_DBG_SLOG)
231 ms->log_ix = 0;
232 if (ms->n_log < N_DBG_SLOG)
233 ++ms->n_log;
236 static void dumplog(struct mesh_state *ms, int t)
238 struct mesh_target *tp = &ms->tgts[t];
262 static void dumpslog(struct mesh_state *ms)
267 if (ms->n_log == 0)
269 i = ms->log_ix - ms->n_log;
272 ms->n_log = 0;
274 lp = &ms->log[i];
284 } while (i != ms->log_ix);
289 static inline void dlog(struct mesh_state *ms, char *fmt, int a)
291 static inline void dumplog(struct mesh_state *ms, int tgt)
293 static inline void dumpslog(struct mesh_state *ms)
301 mesh_dump_regs(struct mesh_state *ms)
303 volatile struct mesh_regs __iomem *mr = ms->mesh;
304 volatile struct dbdma_regs __iomem *md = ms->dma;
309 ms, mr, md);
321 ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
323 ms->dma_started, ms->dma_count, ms->n_msgout);
325 tp = &ms->tgts[t];
346 static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
356 static void mesh_init(struct mesh_state *ms)
358 volatile struct mesh_regs __iomem *mr = ms->mesh;
359 volatile struct dbdma_regs __iomem *md = ms->dma;
372 out_8(&mr->source_id, ms->host->this_id);
373 out_8(&mr->sel_timeout, 25); /* 250ms */
398 ms->phase = idle;
399 ms->msgphase = msg_none;
403 static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
405 volatile struct mesh_regs __iomem *mr = ms->mesh;
409 ms->current_req = cmd;
410 ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
411 ms->tgts[id].current_req = cmd;
422 if (ms->dma_started)
425 ms->phase = arbitrating;
426 ms->msgphase = msg_none;
427 ms->data_ptr = 0;
428 ms->dma_started = 0;
429 ms->n_msgout = 0;
430 ms->last_n_msgout = 0;
431 ms->expect_reply = 0;
432 ms->conn_tgt = id;
433 ms->tgts[id].saved_ptr = 0;
434 ms->stat = DID_OK;
435 ms->aborting = 0;
437 ms->tgts[id].n_log = 0;
438 dlog(ms, "start cmd=%x", (int) cmd);
442 dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
454 dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
461 dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
464 mesh_interrupt(ms);
465 if (ms->phase != arbitrating)
471 ms->stat = DID_BUS_BUSY;
472 ms->phase = idle;
473 mesh_done(ms, 0);
498 dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
501 mesh_interrupt(ms);
502 if (ms->phase != arbitrating)
504 dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
516 dlog(ms, "after arb, intr/exc/err/fc=%.8x",
521 dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
532 dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
554 static void mesh_start(struct mesh_state *ms)
558 if (ms->phase != idle || ms->current_req != NULL) {
559 printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
560 ms->phase, ms);
564 while (ms->phase == idle) {
566 for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
569 if (ms->tgts[cmd->device->id].current_req == NULL)
575 ms->request_q = next;
579 ms->request_qtail = prev;
581 mesh_start_cmd(ms, cmd);
585 static void mesh_done(struct mesh_state *ms, int start_next)
588 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
590 cmd = ms->current_req;
591 ms->current_req = NULL;
594 cmd->result = (ms->stat << 16) + cmd->SCp.Status;
595 if (ms->stat == DID_OK)
599 cmd->result, ms->data_ptr, scsi_bufflen(cmd));
601 cmd->SCp.this_residual -= ms->data_ptr;
602 mesh_completed(ms, cmd);
605 out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
606 mesh_flush_io(ms->mesh);
608 ms->phase = idle;
609 mesh_start(ms);
613 static inline void add_sdtr_msg(struct mesh_state *ms)
615 int i = ms->n_msgout;
617 ms->msgout[i] = EXTENDED_MESSAGE;
618 ms->msgout[i+1] = 3;
619 ms->msgout[i+2] = EXTENDED_SDTR;
620 ms->msgout[i+3] = mesh_sync_period/4;
621 ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
622 ms->n_msgout = i + 5;
625 static void set_sdtr(struct mesh_state *ms, int period, int offset)
627 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
628 volatile struct mesh_regs __iomem *mr = ms->mesh;
636 ms->conn_tgt);
645 v = (ms->clk_freq / 5000) * period;
650 tr = (ms->clk_freq + 250000) / 500000;
656 tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
663 ms->conn_tgt, tr/10, tr%10);
666 static void start_phase(struct mesh_state *ms)
669 volatile struct mesh_regs __iomem *mr = ms->mesh;
670 volatile struct dbdma_regs __iomem *md = ms->dma;
671 struct scsi_cmnd *cmd = ms->current_req;
672 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
674 dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
675 MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
677 seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
678 switch (ms->msgphase) {
686 ms->n_msgin = 0;
695 if (ms->n_msgout <= 0) {
697 ms->n_msgout);
698 mesh_dump_regs(ms);
699 ms->msgphase = msg_none;
702 if (ALLOW_DEBUG(ms->conn_tgt)) {
704 ms->n_msgout);
705 for (i = 0; i < ms->n_msgout; ++i)
706 printk(" %x", ms->msgout[i]);
709 dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
710 ms->msgout[1], ms->msgout[2]));
720 dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
727 dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
729 if (ms->n_msgout == 1) {
736 cmd_complete(ms);
738 out_8(&mr->count_lo, ms->n_msgout - 1);
740 for (i = 0; i < ms->n_msgout - 1; ++i)
741 out_8(&mr->fifo, ms->msgout[i]);
747 ms->msgphase);
750 switch (ms->phase) {
752 out_8(&mr->dest_id, ms->conn_tgt);
772 if (!ms->dma_started) {
773 set_dma_cmds(ms, cmd);
774 out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
776 ms->dma_started = 1;
778 nb = ms->dma_count;
781 ms->dma_count -= nb;
782 ms->data_ptr += nb;
798 dlog(ms, "enbresel intr/exc/err/fc=%.8x",
805 ms->phase);
806 dumpslog(ms);
811 static inline void get_msgin(struct mesh_state *ms)
813 volatile struct mesh_regs __iomem *mr = ms->mesh;
818 i = ms->n_msgin;
819 ms->n_msgin = i + n;
821 ms->msgin[i++] = in_8(&mr->fifo);
825 static inline int msgin_length(struct mesh_state *ms)
830 if (ms->n_msgin > 0) {
831 b = ms->msgin[0];
834 n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
843 static void reselected(struct mesh_state *ms)
845 volatile struct mesh_regs __iomem *mr = ms->mesh;
850 switch (ms->phase) {
854 if ((cmd = ms->current_req) != NULL) {
856 cmd->host_scribble = (void *) ms->request_q;
857 if (ms->request_q == NULL)
858 ms->request_qtail = cmd;
859 ms->request_q = cmd;
860 tp = &ms->tgts[cmd->device->id];
865 ms->phase = reselecting;
866 mesh_done(ms, 0);
872 ms->msgphase, ms->phase, ms->conn_tgt);
873 dumplog(ms, ms->conn_tgt);
874 dumpslog(ms);
877 if (ms->dma_started) {
879 halt_dma(ms);
881 ms->current_req = NULL;
882 ms->phase = dataing;
883 ms->msgphase = msg_in;
884 ms->n_msgout = 0;
885 ms->last_n_msgout = 0;
886 prev = ms->conn_tgt;
900 dlog(ms, "extra resel err/exc/fc = %.6x",
916 ms->conn_tgt = ms->host->this_id;
922 dlog(ms, "reseldata %x", b);
925 if ((b & (1 << t)) != 0 && t != ms->host->this_id)
927 if (b != (1 << t) + (1 << ms->host->this_id)) {
929 ms->conn_tgt = ms->host->this_id;
937 ms->conn_tgt = t;
938 tp = &ms->tgts[t];
945 ms->current_req = tp->current_req;
950 ms->data_ptr = tp->saved_ptr;
951 dlog(ms, "resel prev tgt=%d", prev);
952 dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
953 start_phase(ms);
957 dumplog(ms, ms->conn_tgt);
958 dumpslog(ms);
959 ms->data_ptr = 0;
960 ms->aborting = 1;
961 start_phase(ms);
964 static void do_abort(struct mesh_state *ms)
966 ms->msgout[0] = ABORT;
967 ms->n_msgout = 1;
968 ms->aborting = 1;
969 ms->stat = DID_ABORT;
970 dlog(ms, "abort", 0);
973 static void handle_reset(struct mesh_state *ms)
978 volatile struct mesh_regs __iomem *mr = ms->mesh;
981 tp = &ms->tgts[tgt];
985 mesh_completed(ms, cmd);
987 ms->tgts[tgt].sdtr_state = do_sdtr;
988 ms->tgts[tgt].sync_params = ASYNC_PARAMS;
990 ms->current_req = NULL;
991 while ((cmd = ms->request_q) != NULL) {
992 ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
994 mesh_completed(ms, cmd);
996 ms->phase = idle;
997 ms->msgphase = msg_none;
1009 struct mesh_state *ms = dev_id;
1010 struct Scsi_Host *dev = ms->host;
1013 mesh_interrupt(ms);
1018 static void handle_error(struct mesh_state *ms)
1021 volatile struct mesh_regs __iomem *mr = ms->mesh;
1026 dlog(ms, "error err/exc/fc/cl=%.8x",
1035 handle_reset(ms);
1042 reselected(ms);
1045 if (!ms->aborting) {
1047 ms->conn_tgt);
1048 dumplog(ms, ms->conn_tgt);
1049 dumpslog(ms);
1052 ms->stat = DID_ABORT;
1053 mesh_done(ms, 1);
1057 if (ms->msgphase == msg_in) {
1059 ms->conn_tgt);
1060 ms->msgout[0] = MSG_PARITY_ERROR;
1061 ms->n_msgout = 1;
1062 ms->msgphase = msg_in_bad;
1063 cmd_complete(ms);
1066 if (ms->stat == DID_OK) {
1068 ms->conn_tgt);
1069 ms->stat = DID_PARITY;
1073 cmd_complete(ms);
1086 reselected(ms);
1092 phase_mismatch(ms);
1100 mesh_dump_regs(ms);
1101 dumplog(ms, ms->conn_tgt);
1102 if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
1104 do_abort(ms);
1105 phase_mismatch(ms);
1108 ms->stat = DID_ERROR;
1109 mesh_done(ms, 1);
1112 static void handle_exception(struct mesh_state *ms)
1115 volatile struct mesh_regs __iomem *mr = ms->mesh;
1122 reselected(ms);
1125 ms->stat = DID_BUS_BUSY;
1126 mesh_done(ms, 1);
1129 ms->stat = DID_BAD_TARGET;
1130 mesh_done(ms, 1);
1134 phase_mismatch(ms);
1137 mesh_dump_regs(ms);
1138 dumplog(ms, ms->conn_tgt);
1139 do_abort(ms);
1140 phase_mismatch(ms);
1144 static void handle_msgin(struct mesh_state *ms)
1147 struct scsi_cmnd *cmd = ms->current_req;
1148 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1150 if (ms->n_msgin == 0)
1152 code = ms->msgin[0];
1153 if (ALLOW_DEBUG(ms->conn_tgt)) {
1154 printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
1155 for (i = 0; i < ms->n_msgin; ++i)
1156 printk(" %x", ms->msgin[i]);
1159 dlog(ms, "msgin msg=%.8x",
1160 MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
1162 ms->expect_reply = 0;
1163 ms->n_msgout = 0;
1164 if (ms->n_msgin < msgin_length(ms))
1172 switch (ms->msgin[2]) {
1174 ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
1175 + (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
1180 add_sdtr_msg(ms);
1183 if (ms->msgout[3] < ms->msgin[3])
1184 ms->msgout[3] = ms->msgin[3];
1185 if (ms->msgout[4] > ms->msgin[4])
1186 ms->msgout[4] = ms->msgin[4];
1187 set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
1188 ms->msgphase = msg_out;
1190 set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
1198 tp->saved_ptr = ms->data_ptr;
1201 ms->data_ptr = tp->saved_ptr;
1204 ms->phase = disconnecting;
1210 set_sdtr(ms, 0, 0);
1217 do_abort(ms);
1218 ms->msgphase = msg_out;
1223 cmd->device->lun, ms->conn_tgt);
1233 ms->conn_tgt);
1234 for (i = 0; i < ms->n_msgin; ++i)
1235 printk(" %x", ms->msgin[i]);
1237 ms->msgout[0] = MESSAGE_REJECT;
1238 ms->n_msgout = 1;
1239 ms->msgphase = msg_out;
1245 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1251 dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
1253 dcmds = ms->dma_cmds;
1265 off = ms->data_ptr;
1303 ms->dma_count = dtot;
1306 static void halt_dma(struct mesh_state *ms)
1308 volatile struct dbdma_regs __iomem *md = ms->dma;
1309 volatile struct mesh_regs __iomem *mr = ms->mesh;
1310 struct scsi_cmnd *cmd = ms->current_req;
1313 if (!ms->tgts[ms->conn_tgt].data_goes_out) {
1324 dlog(ms, "halt_dma fc/count=%.6x",
1326 if (ms->tgts[ms->conn_tgt].data_goes_out)
1330 ms->data_ptr -= nb;
1331 dlog(ms, "data_ptr %x", ms->data_ptr);
1332 if (ms->data_ptr < 0) {
1333 printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
1334 ms->data_ptr, nb, ms);
1335 ms->data_ptr = 0;
1337 dumplog(ms, ms->conn_tgt);
1338 dumpslog(ms);
1341 ms->data_ptr > scsi_bufflen(cmd)) {
1344 ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
1345 ms->tgts[ms->conn_tgt].data_goes_out);
1348 ms->dma_started = 0;
1351 static void phase_mismatch(struct mesh_state *ms)
1353 volatile struct mesh_regs __iomem *mr = ms->mesh;
1356 dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
1359 if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
1365 out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1366 ms->msgphase = msg_out_last;
1370 if (ms->msgphase == msg_in) {
1371 get_msgin(ms);
1372 if (ms->n_msgin)
1373 handle_msgin(ms);
1376 if (ms->dma_started)
1377 halt_dma(ms);
1384 ms->msgphase = msg_none;
1387 ms->tgts[ms->conn_tgt].data_goes_out = 0;
1388 ms->phase = dataing;
1391 ms->tgts[ms->conn_tgt].data_goes_out = 1;
1392 ms->phase = dataing;
1395 ms->phase = commanding;
1398 ms->phase = statusing;
1401 ms->msgphase = msg_in;
1402 ms->n_msgin = 0;
1405 ms->msgphase = msg_out;
1406 if (ms->n_msgout == 0) {
1407 if (ms->aborting) {
1408 do_abort(ms);
1410 if (ms->last_n_msgout == 0) {
1413 ms->msgout[0] = NOP;
1414 ms->last_n_msgout = 1;
1416 ms->n_msgout = ms->last_n_msgout;
1422 ms->stat = DID_ERROR;
1423 mesh_done(ms, 1);
1427 start_phase(ms);
1430 static void cmd_complete(struct mesh_state *ms)
1432 volatile struct mesh_regs __iomem *mr = ms->mesh;
1433 struct scsi_cmnd *cmd = ms->current_req;
1434 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1437 dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
1438 seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
1439 switch (ms->msgphase) {
1442 ms->n_msgin = 0;
1443 ms->msgphase = msg_in;
1448 get_msgin(ms);
1449 n = msgin_length(ms);
1450 if (ms->n_msgin < n) {
1451 out_8(&mr->count_lo, n - ms->n_msgin);
1454 ms->msgphase = msg_none;
1455 handle_msgin(ms);
1456 start_phase(ms);
1485 dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
1490 ms->last_n_msgout = ms->n_msgout;
1491 ms->n_msgout = 0;
1495 handle_error(ms);
1504 handle_exception(ms);
1511 out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1512 ms->msgphase = msg_out_last;
1515 ms->msgphase = msg_out_xxx;
1520 ms->last_n_msgout = ms->n_msgout;
1521 ms->n_msgout = 0;
1522 ms->msgphase = ms->expect_reply? msg_in: msg_none;
1523 start_phase(ms);
1527 switch (ms->phase) {
1530 dumpslog(ms);
1533 dlog(ms, "Selecting phase at command completion",0);
1534 ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
1536 ms->n_msgout = 1;
1537 ms->expect_reply = 0;
1538 if (ms->aborting) {
1539 ms->msgout[0] = ABORT;
1540 ms->n_msgout++;
1543 add_sdtr_msg(ms);
1544 ms->expect_reply = 1;
1547 ms->msgphase = msg_out;
1558 dlog(ms, "impatient for req", ms->n_msgout);
1559 ms->msgphase = msg_none;
1566 if (ms->dma_count != 0) {
1567 start_phase(ms);
1583 halt_dma(ms);
1592 ms->msgphase = msg_in;
1595 mesh_done(ms, 1);
1598 ms->current_req = NULL;
1599 ms->phase = idle;
1600 mesh_start(ms);
1605 ++ms->phase;
1606 start_phase(ms);
1618 struct mesh_state *ms;
1623 ms = (struct mesh_state *) cmd->device->host->hostdata;
1625 if (ms->request_q == NULL)
1626 ms->request_q = cmd;
1628 ms->request_qtail->host_scribble = (void *) cmd;
1629 ms->request_qtail = cmd;
1631 if (ms->phase == idle)
1632 mesh_start(ms);
1642 static void mesh_interrupt(struct mesh_state *ms)
1644 volatile struct mesh_regs __iomem *mr = ms->mesh;
1648 dlog(ms, "interrupt intr/err/exc/seq=%.8x",
1651 handle_error(ms);
1653 handle_exception(ms);
1656 cmd_complete(ms);
1667 struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1670 mesh_dump_regs(ms);
1671 dumplog(ms, cmd->device->id);
1672 dumpslog(ms);
1684 struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1685 volatile struct mesh_regs __iomem *mr = ms->mesh;
1686 volatile struct dbdma_regs __iomem *md = ms->dma;
1691 spin_lock_irqsave(ms->host->host_lock, flags);
1701 out_8(&mr->source_id, ms->host->this_id);
1702 out_8(&mr->sel_timeout, 25); /* 250ms */
1712 handle_reset(ms);
1714 spin_unlock_irqrestore(ms->host->host_lock, flags);
1718 static void set_mesh_power(struct mesh_state *ms, int state)
1723 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
1726 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
1735 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1746 if (ms->phase == sleeping)
1749 scsi_block_requests(ms->host);
1750 spin_lock_irqsave(ms->host->host_lock, flags);
1751 while(ms->phase != idle) {
1752 spin_unlock_irqrestore(ms->host->host_lock, flags);
1754 spin_lock_irqsave(ms->host->host_lock, flags);
1756 ms->phase = sleeping;
1757 spin_unlock_irqrestore(ms->host->host_lock, flags);
1758 disable_irq(ms->meshintr);
1759 set_mesh_power(ms, 0);
1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1769 if (ms->phase != sleeping)
1772 set_mesh_power(ms, 1);
1773 mesh_init(ms);
1774 spin_lock_irqsave(ms->host->host_lock, flags);
1775 mesh_start(ms);
1776 spin_unlock_irqrestore(ms->host->host_lock, flags);
1777 enable_irq(ms->meshintr);
1778 scsi_unblock_requests(ms->host);
1792 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1797 spin_lock_irqsave(ms->host->host_lock, flags);
1798 mr = ms->mesh;
1805 spin_unlock_irqrestore(ms->host->host_lock, flags);
1829 struct mesh_state *ms;
1868 ms = (struct mesh_state *) mesh_host->hostdata;
1869 macio_set_drvdata(mdev, ms);
1870 ms->host = mesh_host;
1871 ms->mdev = mdev;
1872 ms->pdev = pdev;
1874 ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
1875 if (ms->mesh == NULL) {
1879 ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
1880 if (ms->dma == NULL) {
1882 iounmap(ms->mesh);
1886 ms->meshintr = macio_irq(mdev, 0);
1887 ms->dmaintr = macio_irq(mdev, 1);
1892 ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
1898 ms->dma_cmd_size,
1904 memset(dma_cmd_space, 0, ms->dma_cmd_size);
1906 ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
1907 ms->dma_cmd_space = dma_cmd_space;
1908 ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
1910 ms->current_req = NULL;
1912 ms->tgts[tgt].sdtr_state = do_sdtr;
1913 ms->tgts[tgt].sync_params = ASYNC_PARAMS;
1914 ms->tgts[tgt].current_req = NULL;
1918 ms->clk_freq = *cfp;
1921 ms->clk_freq = 50000000;
1927 minper = 1000000000 / (ms->clk_freq / 5); /* ns */
1932 set_mesh_power(ms, 1);
1935 mesh_init(ms);
1938 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1939 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1951 free_irq(ms->meshintr, ms);
1957 set_mesh_power(ms, 0);
1958 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1959 ms->dma_cmd_space, ms->dma_cmd_bus);
1961 iounmap(ms->dma);
1962 iounmap(ms->mesh);
1973 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1974 struct Scsi_Host *mesh_host = ms->host;
1978 free_irq(ms->meshintr, ms);
1984 set_mesh_power(ms, 0);
1987 iounmap(ms->mesh);
1988 iounmap(ms->dma);
1991 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1992 ms->dma_cmd_space, ms->dma_cmd_bus);