• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/scsi/

Lines Matching refs:ms

185 static void mesh_done(struct mesh_state *ms, int start_next);
186 static void mesh_interrupt(struct mesh_state *ms);
187 static void cmd_complete(struct mesh_state *ms);
188 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
189 static void halt_dma(struct mesh_state *ms);
190 static void phase_mismatch(struct mesh_state *ms);
212 static void dlog(struct mesh_state *ms, char *fmt, int a)
214 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
218 slp = &ms->log[ms->log_ix];
221 tlp->phase = (ms->msgphase << 4) + ms->phase;
222 tlp->bs0 = ms->mesh->bus_status0;
223 tlp->bs1 = ms->mesh->bus_status1;
224 tlp->tgt = ms->conn_tgt;
231 if (++ms->log_ix >= N_DBG_SLOG)
232 ms->log_ix = 0;
233 if (ms->n_log < N_DBG_SLOG)
234 ++ms->n_log;
237 static void dumplog(struct mesh_state *ms, int t)
239 struct mesh_target *tp = &ms->tgts[t];
263 static void dumpslog(struct mesh_state *ms)
268 if (ms->n_log == 0)
270 i = ms->log_ix - ms->n_log;
273 ms->n_log = 0;
275 lp = &ms->log[i];
285 } while (i != ms->log_ix);
290 static inline void dlog(struct mesh_state *ms, char *fmt, int a)
292 static inline void dumplog(struct mesh_state *ms, int tgt)
294 static inline void dumpslog(struct mesh_state *ms)
302 mesh_dump_regs(struct mesh_state *ms)
304 volatile struct mesh_regs __iomem *mr = ms->mesh;
305 volatile struct dbdma_regs __iomem *md = ms->dma;
310 ms, mr, md);
322 ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
324 ms->dma_started, ms->dma_count, ms->n_msgout);
326 tp = &ms->tgts[t];
347 static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
357 static void mesh_init(struct mesh_state *ms)
359 volatile struct mesh_regs __iomem *mr = ms->mesh;
360 volatile struct dbdma_regs __iomem *md = ms->dma;
373 out_8(&mr->source_id, ms->host->this_id);
374 out_8(&mr->sel_timeout, 25); /* 250ms */
399 ms->phase = idle;
400 ms->msgphase = msg_none;
404 static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
406 volatile struct mesh_regs __iomem *mr = ms->mesh;
410 ms->current_req = cmd;
411 ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
412 ms->tgts[id].current_req = cmd;
423 if (ms->dma_started)
426 ms->phase = arbitrating;
427 ms->msgphase = msg_none;
428 ms->data_ptr = 0;
429 ms->dma_started = 0;
430 ms->n_msgout = 0;
431 ms->last_n_msgout = 0;
432 ms->expect_reply = 0;
433 ms->conn_tgt = id;
434 ms->tgts[id].saved_ptr = 0;
435 ms->stat = DID_OK;
436 ms->aborting = 0;
438 ms->tgts[id].n_log = 0;
439 dlog(ms, "start cmd=%x", (int) cmd);
443 dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
455 dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
462 dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
465 mesh_interrupt(ms);
466 if (ms->phase != arbitrating)
472 ms->stat = DID_BUS_BUSY;
473 ms->phase = idle;
474 mesh_done(ms, 0);
499 dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
502 mesh_interrupt(ms);
503 if (ms->phase != arbitrating)
505 dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
517 dlog(ms, "after arb, intr/exc/err/fc=%.8x",
522 dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
533 dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
555 static void mesh_start(struct mesh_state *ms)
559 if (ms->phase != idle || ms->current_req != NULL) {
560 printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
561 ms->phase, ms);
565 while (ms->phase == idle) {
567 for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
570 if (ms->tgts[cmd->device->id].current_req == NULL)
576 ms->request_q = next;
580 ms->request_qtail = prev;
582 mesh_start_cmd(ms, cmd);
586 static void mesh_done(struct mesh_state *ms, int start_next)
589 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
591 cmd = ms->current_req;
592 ms->current_req = NULL;
595 cmd->result = (ms->stat << 16) + cmd->SCp.Status;
596 if (ms->stat == DID_OK)
600 cmd->result, ms->data_ptr, cmd->request_bufflen);
608 cmd->SCp.this_residual -= ms->data_ptr;
609 mesh_completed(ms, cmd);
612 out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
613 mesh_flush_io(ms->mesh);
615 ms->phase = idle;
616 mesh_start(ms);
620 static inline void add_sdtr_msg(struct mesh_state *ms)
622 int i = ms->n_msgout;
624 ms->msgout[i] = EXTENDED_MESSAGE;
625 ms->msgout[i+1] = 3;
626 ms->msgout[i+2] = EXTENDED_SDTR;
627 ms->msgout[i+3] = mesh_sync_period/4;
628 ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
629 ms->n_msgout = i + 5;
632 static void set_sdtr(struct mesh_state *ms, int period, int offset)
634 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
635 volatile struct mesh_regs __iomem *mr = ms->mesh;
643 ms->conn_tgt);
652 v = (ms->clk_freq / 5000) * period;
657 tr = (ms->clk_freq + 250000) / 500000;
663 tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
670 ms->conn_tgt, tr/10, tr%10);
673 static void start_phase(struct mesh_state *ms)
676 volatile struct mesh_regs __iomem *mr = ms->mesh;
677 volatile struct dbdma_regs __iomem *md = ms->dma;
678 struct scsi_cmnd *cmd = ms->current_req;
679 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
681 dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
682 MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
684 seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
685 switch (ms->msgphase) {
693 ms->n_msgin = 0;
702 if (ms->n_msgout <= 0) {
704 ms->n_msgout);
705 mesh_dump_regs(ms);
706 ms->msgphase = msg_none;
709 if (ALLOW_DEBUG(ms->conn_tgt)) {
711 ms->n_msgout);
712 for (i = 0; i < ms->n_msgout; ++i)
713 printk(" %x", ms->msgout[i]);
716 dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
717 ms->msgout[1], ms->msgout[2]));
727 dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
734 dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
736 if (ms->n_msgout == 1) {
743 cmd_complete(ms);
745 out_8(&mr->count_lo, ms->n_msgout - 1);
747 for (i = 0; i < ms->n_msgout - 1; ++i)
748 out_8(&mr->fifo, ms->msgout[i]);
754 ms->msgphase);
757 switch (ms->phase) {
759 out_8(&mr->dest_id, ms->conn_tgt);
779 if (!ms->dma_started) {
780 set_dma_cmds(ms, cmd);
781 out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
783 ms->dma_started = 1;
785 nb = ms->dma_count;
788 ms->dma_count -= nb;
789 ms->data_ptr += nb;
805 dlog(ms, "enbresel intr/exc/err/fc=%.8x",
812 ms->phase);
813 dumpslog(ms);
818 static inline void get_msgin(struct mesh_state *ms)
820 volatile struct mesh_regs __iomem *mr = ms->mesh;
825 i = ms->n_msgin;
826 ms->n_msgin = i + n;
828 ms->msgin[i++] = in_8(&mr->fifo);
832 static inline int msgin_length(struct mesh_state *ms)
837 if (ms->n_msgin > 0) {
838 b = ms->msgin[0];
841 n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
850 static void reselected(struct mesh_state *ms)
852 volatile struct mesh_regs __iomem *mr = ms->mesh;
857 switch (ms->phase) {
861 if ((cmd = ms->current_req) != NULL) {
863 cmd->host_scribble = (void *) ms->request_q;
864 if (ms->request_q == NULL)
865 ms->request_qtail = cmd;
866 ms->request_q = cmd;
867 tp = &ms->tgts[cmd->device->id];
872 ms->phase = reselecting;
873 mesh_done(ms, 0);
879 ms->msgphase, ms->phase, ms->conn_tgt);
880 dumplog(ms, ms->conn_tgt);
881 dumpslog(ms);
884 if (ms->dma_started) {
886 halt_dma(ms);
888 ms->current_req = NULL;
889 ms->phase = dataing;
890 ms->msgphase = msg_in;
891 ms->n_msgout = 0;
892 ms->last_n_msgout = 0;
893 prev = ms->conn_tgt;
907 dlog(ms, "extra resel err/exc/fc = %.6x",
923 ms->conn_tgt = ms->host->this_id;
929 dlog(ms, "reseldata %x", b);
932 if ((b & (1 << t)) != 0 && t != ms->host->this_id)
934 if (b != (1 << t) + (1 << ms->host->this_id)) {
936 ms->conn_tgt = ms->host->this_id;
944 ms->conn_tgt = t;
945 tp = &ms->tgts[t];
952 ms->current_req = tp->current_req;
957 ms->data_ptr = tp->saved_ptr;
958 dlog(ms, "resel prev tgt=%d", prev);
959 dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
960 start_phase(ms);
964 dumplog(ms, ms->conn_tgt);
965 dumpslog(ms);
966 ms->data_ptr = 0;
967 ms->aborting = 1;
968 start_phase(ms);
971 static void do_abort(struct mesh_state *ms)
973 ms->msgout[0] = ABORT;
974 ms->n_msgout = 1;
975 ms->aborting = 1;
976 ms->stat = DID_ABORT;
977 dlog(ms, "abort", 0);
980 static void handle_reset(struct mesh_state *ms)
985 volatile struct mesh_regs __iomem *mr = ms->mesh;
988 tp = &ms->tgts[tgt];
992 mesh_completed(ms, cmd);
994 ms->tgts[tgt].sdtr_state = do_sdtr;
995 ms->tgts[tgt].sync_params = ASYNC_PARAMS;
997 ms->current_req = NULL;
998 while ((cmd = ms->request_q) != NULL) {
999 ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
1001 mesh_completed(ms, cmd);
1003 ms->phase = idle;
1004 ms->msgphase = msg_none;
1016 struct mesh_state *ms = dev_id;
1017 struct Scsi_Host *dev = ms->host;
1020 mesh_interrupt(ms);
1025 static void handle_error(struct mesh_state *ms)
1028 volatile struct mesh_regs __iomem *mr = ms->mesh;
1033 dlog(ms, "error err/exc/fc/cl=%.8x",
1042 handle_reset(ms);
1049 reselected(ms);
1052 if (!ms->aborting) {
1054 ms->conn_tgt);
1055 dumplog(ms, ms->conn_tgt);
1056 dumpslog(ms);
1059 ms->stat = DID_ABORT;
1060 mesh_done(ms, 1);
1064 if (ms->msgphase == msg_in) {
1066 ms->conn_tgt);
1067 ms->msgout[0] = MSG_PARITY_ERROR;
1068 ms->n_msgout = 1;
1069 ms->msgphase = msg_in_bad;
1070 cmd_complete(ms);
1073 if (ms->stat == DID_OK) {
1075 ms->conn_tgt);
1076 ms->stat = DID_PARITY;
1080 cmd_complete(ms);
1093 reselected(ms);
1099 phase_mismatch(ms);
1107 mesh_dump_regs(ms);
1108 dumplog(ms, ms->conn_tgt);
1109 if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
1111 do_abort(ms);
1112 phase_mismatch(ms);
1115 ms->stat = DID_ERROR;
1116 mesh_done(ms, 1);
1119 static void handle_exception(struct mesh_state *ms)
1122 volatile struct mesh_regs __iomem *mr = ms->mesh;
1129 reselected(ms);
1132 ms->stat = DID_BUS_BUSY;
1133 mesh_done(ms, 1);
1136 ms->stat = DID_BAD_TARGET;
1137 mesh_done(ms, 1);
1141 phase_mismatch(ms);
1144 mesh_dump_regs(ms);
1145 dumplog(ms, ms->conn_tgt);
1146 do_abort(ms);
1147 phase_mismatch(ms);
1151 static void handle_msgin(struct mesh_state *ms)
1154 struct scsi_cmnd *cmd = ms->current_req;
1155 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1157 if (ms->n_msgin == 0)
1159 code = ms->msgin[0];
1160 if (ALLOW_DEBUG(ms->conn_tgt)) {
1161 printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
1162 for (i = 0; i < ms->n_msgin; ++i)
1163 printk(" %x", ms->msgin[i]);
1166 dlog(ms, "msgin msg=%.8x",
1167 MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
1169 ms->expect_reply = 0;
1170 ms->n_msgout = 0;
1171 if (ms->n_msgin < msgin_length(ms))
1179 switch (ms->msgin[2]) {
1181 ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
1182 + (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
1187 add_sdtr_msg(ms);
1190 if (ms->msgout[3] < ms->msgin[3])
1191 ms->msgout[3] = ms->msgin[3];
1192 if (ms->msgout[4] > ms->msgin[4])
1193 ms->msgout[4] = ms->msgin[4];
1194 set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
1195 ms->msgphase = msg_out;
1197 set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
1205 tp->saved_ptr = ms->data_ptr;
1208 ms->data_ptr = tp->saved_ptr;
1211 ms->phase = disconnecting;
1217 set_sdtr(ms, 0, 0);
1224 do_abort(ms);
1225 ms->msgphase = msg_out;
1230 cmd->device->lun, ms->conn_tgt);
1240 ms->conn_tgt);
1241 for (i = 0; i < ms->n_msgin; ++i)
1242 printk(" %x", ms->msgin[i]);
1244 ms->msgout[0] = MESSAGE_REJECT;
1245 ms->n_msgout = 1;
1246 ms->msgphase = msg_out;
1252 static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1258 dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
1260 dcmds = ms->dma_cmds;
1268 off = ms->data_ptr;
1269 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1290 } else if (ms->data_ptr < cmd->request_bufflen) {
1291 dtot = cmd->request_bufflen - ms->data_ptr;
1296 virt_to_phys(cmd->request_buffer) + ms->data_ptr);
1316 ms->dma_count = dtot;
1319 static void halt_dma(struct mesh_state *ms)
1321 volatile struct dbdma_regs __iomem *md = ms->dma;
1322 volatile struct mesh_regs __iomem *mr = ms->mesh;
1323 struct scsi_cmnd *cmd = ms->current_req;
1326 if (!ms->tgts[ms->conn_tgt].data_goes_out) {
1337 dlog(ms, "halt_dma fc/count=%.6x",
1339 if (ms->tgts[ms->conn_tgt].data_goes_out)
1343 ms->data_ptr -= nb;
1344 dlog(ms, "data_ptr %x", ms->data_ptr);
1345 if (ms->data_ptr < 0) {
1346 printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
1347 ms->data_ptr, nb, ms);
1348 ms->data_ptr = 0;
1350 dumplog(ms, ms->conn_tgt);
1351 dumpslog(ms);
1354 ms->data_ptr > cmd->request_bufflen) {
1357 ms->conn_tgt, ms->data_ptr, cmd->request_bufflen,
1358 ms->tgts[ms->conn_tgt].data_goes_out);
1363 pci_unmap_sg(ms->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
1365 ms->dma_started = 0;
1368 static void phase_mismatch(struct mesh_state *ms)
1370 volatile struct mesh_regs __iomem *mr = ms->mesh;
1373 dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
1376 if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
1382 out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1383 ms->msgphase = msg_out_last;
1387 if (ms->msgphase == msg_in) {
1388 get_msgin(ms);
1389 if (ms->n_msgin)
1390 handle_msgin(ms);
1393 if (ms->dma_started)
1394 halt_dma(ms);
1401 ms->msgphase = msg_none;
1404 ms->tgts[ms->conn_tgt].data_goes_out = 0;
1405 ms->phase = dataing;
1408 ms->tgts[ms->conn_tgt].data_goes_out = 1;
1409 ms->phase = dataing;
1412 ms->phase = commanding;
1415 ms->phase = statusing;
1418 ms->msgphase = msg_in;
1419 ms->n_msgin = 0;
1422 ms->msgphase = msg_out;
1423 if (ms->n_msgout == 0) {
1424 if (ms->aborting) {
1425 do_abort(ms);
1427 if (ms->last_n_msgout == 0) {
1430 ms->msgout[0] = NOP;
1431 ms->last_n_msgout = 1;
1433 ms->n_msgout = ms->last_n_msgout;
1439 ms->stat = DID_ERROR;
1440 mesh_done(ms, 1);
1444 start_phase(ms);
1447 static void cmd_complete(struct mesh_state *ms)
1449 volatile struct mesh_regs __iomem *mr = ms->mesh;
1450 struct scsi_cmnd *cmd = ms->current_req;
1451 struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1454 dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
1455 seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
1456 switch (ms->msgphase) {
1459 ms->n_msgin = 0;
1460 ms->msgphase = msg_in;
1465 get_msgin(ms);
1466 n = msgin_length(ms);
1467 if (ms->n_msgin < n) {
1468 out_8(&mr->count_lo, n - ms->n_msgin);
1471 ms->msgphase = msg_none;
1472 handle_msgin(ms);
1473 start_phase(ms);
1502 dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
1507 ms->last_n_msgout = ms->n_msgout;
1508 ms->n_msgout = 0;
1512 handle_error(ms);
1521 handle_exception(ms);
1528 out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1529 ms->msgphase = msg_out_last;
1532 ms->msgphase = msg_out_xxx;
1537 ms->last_n_msgout = ms->n_msgout;
1538 ms->n_msgout = 0;
1539 ms->msgphase = ms->expect_reply? msg_in: msg_none;
1540 start_phase(ms);
1544 switch (ms->phase) {
1547 dumpslog(ms);
1550 dlog(ms, "Selecting phase at command completion",0);
1551 ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
1553 ms->n_msgout = 1;
1554 ms->expect_reply = 0;
1555 if (ms->aborting) {
1556 ms->msgout[0] = ABORT;
1557 ms->n_msgout++;
1560 add_sdtr_msg(ms);
1561 ms->expect_reply = 1;
1564 ms->msgphase = msg_out;
1575 dlog(ms, "impatient for req", ms->n_msgout);
1576 ms->msgphase = msg_none;
1583 if (ms->dma_count != 0) {
1584 start_phase(ms);
1600 halt_dma(ms);
1609 ms->msgphase = msg_in;
1612 mesh_done(ms, 1);
1615 ms->current_req = NULL;
1616 ms->phase = idle;
1617 mesh_start(ms);
1622 ++ms->phase;
1623 start_phase(ms);
1635 struct mesh_state *ms;
1640 ms = (struct mesh_state *) cmd->device->host->hostdata;
1642 if (ms->request_q == NULL)
1643 ms->request_q = cmd;
1645 ms->request_qtail->host_scribble = (void *) cmd;
1646 ms->request_qtail = cmd;
1648 if (ms->phase == idle)
1649 mesh_start(ms);
1659 static void mesh_interrupt(struct mesh_state *ms)
1661 volatile struct mesh_regs __iomem *mr = ms->mesh;
1665 dlog(ms, "interrupt intr/err/exc/seq=%.8x",
1668 handle_error(ms);
1670 handle_exception(ms);
1673 cmd_complete(ms);
1684 struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1687 mesh_dump_regs(ms);
1688 dumplog(ms, cmd->device->id);
1689 dumpslog(ms);
1701 struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1702 volatile struct mesh_regs __iomem *mr = ms->mesh;
1703 volatile struct dbdma_regs __iomem *md = ms->dma;
1708 spin_lock_irqsave(ms->host->host_lock, flags);
1718 out_8(&mr->source_id, ms->host->this_id);
1719 out_8(&mr->sel_timeout, 25); /* 250ms */
1729 handle_reset(ms);
1731 spin_unlock_irqrestore(ms->host->host_lock, flags);
1735 static void set_mesh_power(struct mesh_state *ms, int state)
1740 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
1743 pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
1752 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1765 scsi_block_requests(ms->host);
1766 spin_lock_irqsave(ms->host->host_lock, flags);
1767 while(ms->phase != idle) {
1768 spin_unlock_irqrestore(ms->host->host_lock, flags);
1770 spin_lock_irqsave(ms->host->host_lock, flags);
1772 ms->phase = sleeping;
1773 spin_unlock_irqrestore(ms->host->host_lock, flags);
1774 disable_irq(ms->meshintr);
1775 set_mesh_power(ms, 0);
1784 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1790 set_mesh_power(ms, 1);
1791 mesh_init(ms);
1792 spin_lock_irqsave(ms->host->host_lock, flags);
1793 mesh_start(ms);
1794 spin_unlock_irqrestore(ms->host->host_lock, flags);
1795 enable_irq(ms->meshintr);
1796 scsi_unblock_requests(ms->host);
1812 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1817 spin_lock_irqsave(ms->host->host_lock, flags);
1818 mr = ms->mesh;
1825 spin_unlock_irqrestore(ms->host->host_lock, flags);
1849 struct mesh_state *ms;
1888 ms = (struct mesh_state *) mesh_host->hostdata;
1889 macio_set_drvdata(mdev, ms);
1890 ms->host = mesh_host;
1891 ms->mdev = mdev;
1892 ms->pdev = pdev;
1894 ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
1895 if (ms->mesh == NULL) {
1899 ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
1900 if (ms->dma == NULL) {
1902 iounmap(ms->mesh);
1906 ms->meshintr = macio_irq(mdev, 0);
1907 ms->dmaintr = macio_irq(mdev, 1);
1912 ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
1918 ms->dma_cmd_size,
1924 memset(dma_cmd_space, 0, ms->dma_cmd_size);
1926 ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
1927 ms->dma_cmd_space = dma_cmd_space;
1928 ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
1930 ms->current_req = NULL;
1932 ms->tgts[tgt].sdtr_state = do_sdtr;
1933 ms->tgts[tgt].sync_params = ASYNC_PARAMS;
1934 ms->tgts[tgt].current_req = NULL;
1938 ms->clk_freq = *cfp;
1941 ms->clk_freq = 50000000;
1947 minper = 1000000000 / (ms->clk_freq / 5); /* ns */
1952 set_mesh_power(ms, 1);
1955 mesh_init(ms);
1958 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1959 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1971 free_irq(ms->meshintr, ms);
1977 set_mesh_power(ms, 0);
1978 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1979 ms->dma_cmd_space, ms->dma_cmd_bus);
1981 iounmap(ms->dma);
1982 iounmap(ms->mesh);
1993 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1994 struct Scsi_Host *mesh_host = ms->host;
1998 free_irq(ms->meshintr, ms);
2004 set_mesh_power(ms, 0);
2007 iounmap(ms->mesh);
2008 iounmap(ms->dma);
2011 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2012 ms->dma_cmd_space, ms->dma_cmd_bus);