1/*
2 * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
3 * bus adaptor found on Power Macintosh computers.
4 * We assume the MESH is connected to a DBDMA (descriptor-based DMA)
5 * controller.
6 *
7 * Paul Mackerras, August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 *
10 * Apr. 21 2002  - BenH		Rework bus reset code for new error handler
11 *                              Add delay after initial bus reset
12 *                              Add module parameters
13 *
14 * Sep. 27 2003  - BenH		Move to new driver model, fix some write posting
15 *				issues
16 * To do:
17 * - handle aborts correctly
18 * - retry arbitration if lost (unless higher levels do this for us)
19 * - power down the chip when no device is detected
20 */
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/types.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/blkdev.h>
28#include <linux/proc_fs.h>
29#include <linux/stat.h>
30#include <linux/interrupt.h>
31#include <linux/reboot.h>
32#include <linux/spinlock.h>
33#include <asm/dbdma.h>
34#include <asm/io.h>
35#include <asm/pgtable.h>
36#include <asm/prom.h>
37#include <asm/system.h>
38#include <asm/irq.h>
39#include <asm/hydra.h>
40#include <asm/processor.h>
41#include <asm/machdep.h>
42#include <asm/pmac_feature.h>
43#include <asm/pci-bridge.h>
44#include <asm/macio.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_cmnd.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_host.h>
50
51#include "mesh.h"
52
53#undef KERN_DEBUG
54#define KERN_DEBUG KERN_WARNING
55
56MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
57MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
58MODULE_LICENSE("GPL");
59
60static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE;
61static int sync_targets = 0xff;
62static int resel_targets = 0xff;
63static int debug_targets = 0;	/* print debug for these targets */
64static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS;
65
66module_param(sync_rate, int, 0);
67MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)");
68module_param(sync_targets, int, 0);
69MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous");
70module_param(resel_targets, int, 0);
71MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect");
72module_param(debug_targets, int, 0644);
73MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets");
74module_param(init_reset_delay, int, 0);
75MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)");
76
77static int mesh_sync_period = 100;
78static int mesh_sync_offset = 0;
79static unsigned char use_active_neg = 0;  /* bit mask for SEQ_ACTIVE_NEG if used */
80
81#define ALLOW_SYNC(tgt)		((sync_targets >> (tgt)) & 1)
82#define ALLOW_RESEL(tgt)	((resel_targets >> (tgt)) & 1)
83#define ALLOW_DEBUG(tgt)	((debug_targets >> (tgt)) & 1)
84#define DEBUG_TARGET(cmd)	((cmd) && ALLOW_DEBUG((cmd)->device->id))
85
86#undef MESH_DBG
87#define N_DBG_LOG	50
88#define N_DBG_SLOG	20
89#define NUM_DBG_EVENTS	13
90#undef	DBG_USE_TB		/* bombs on 601 */
91
92struct dbglog {
93	char	*fmt;
94	u32	tb;
95	u8	phase;
96	u8	bs0;
97	u8	bs1;
98	u8	tgt;
99	int	d;
100};
101
102enum mesh_phase {
103	idle,
104	arbitrating,
105	selecting,
106	commanding,
107	dataing,
108	statusing,
109	busfreeing,
110	disconnecting,
111	reselecting,
112	sleeping
113};
114
115enum msg_phase {
116	msg_none,
117	msg_out,
118	msg_out_xxx,
119	msg_out_last,
120	msg_in,
121	msg_in_bad,
122};
123
124enum sdtr_phase {
125	do_sdtr,
126	sdtr_sent,
127	sdtr_done
128};
129
130struct mesh_target {
131	enum sdtr_phase sdtr_state;
132	int	sync_params;
133	int	data_goes_out;		/* guess as to data direction */
134	struct scsi_cmnd *current_req;
135	u32	saved_ptr;
136#ifdef MESH_DBG
137	int	log_ix;
138	int	n_log;
139	struct dbglog log[N_DBG_LOG];
140#endif
141};
142
143struct mesh_state {
144	volatile struct	mesh_regs __iomem *mesh;
145	int	meshintr;
146	volatile struct	dbdma_regs __iomem *dma;
147	int	dmaintr;
148	struct	Scsi_Host *host;
149	struct	mesh_state *next;
150	struct scsi_cmnd *request_q;
151	struct scsi_cmnd *request_qtail;
152	enum mesh_phase phase;		/* what we're currently trying to do */
153	enum msg_phase msgphase;
154	int	conn_tgt;		/* target we're connected to */
155	struct scsi_cmnd *current_req;		/* req we're currently working on */
156	int	data_ptr;
157	int	dma_started;
158	int	dma_count;
159	int	stat;
160	int	aborting;
161	int	expect_reply;
162	int	n_msgin;
163	u8	msgin[16];
164	int	n_msgout;
165	int	last_n_msgout;
166	u8	msgout[16];
167	struct dbdma_cmd *dma_cmds;	/* space for dbdma commands, aligned */
168	dma_addr_t dma_cmd_bus;
169	void	*dma_cmd_space;
170	int	dma_cmd_size;
171	int	clk_freq;
172	struct mesh_target tgts[8];
173	struct macio_dev *mdev;
174	struct pci_dev* pdev;
175#ifdef MESH_DBG
176	int	log_ix;
177	int	n_log;
178	struct dbglog log[N_DBG_SLOG];
179#endif
180};
181
182/*
183 * Driver is too messy, we need a few prototypes...
184 */
185static void mesh_done(struct mesh_state *ms, int start_next);
186static void mesh_interrupt(struct mesh_state *ms);
187static void cmd_complete(struct mesh_state *ms);
188static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd);
189static void halt_dma(struct mesh_state *ms);
190static void phase_mismatch(struct mesh_state *ms);
191
192
193/*
194 * Some debugging & logging routines
195 */
196
197#ifdef MESH_DBG
198
199static inline u32 readtb(void)
200{
201	u32 tb;
202
203#ifdef DBG_USE_TB
204	/* Beware: if you enable this, it will crash on 601s. */
205	asm ("mftb %0" : "=r" (tb) : );
206#else
207	tb = 0;
208#endif
209	return tb;
210}
211
212static void dlog(struct mesh_state *ms, char *fmt, int a)
213{
214	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
215	struct dbglog *tlp, *slp;
216
217	tlp = &tp->log[tp->log_ix];
218	slp = &ms->log[ms->log_ix];
219	tlp->fmt = fmt;
220	tlp->tb = readtb();
221	tlp->phase = (ms->msgphase << 4) + ms->phase;
222	tlp->bs0 = ms->mesh->bus_status0;
223	tlp->bs1 = ms->mesh->bus_status1;
224	tlp->tgt = ms->conn_tgt;
225	tlp->d = a;
226	*slp = *tlp;
227	if (++tp->log_ix >= N_DBG_LOG)
228		tp->log_ix = 0;
229	if (tp->n_log < N_DBG_LOG)
230		++tp->n_log;
231	if (++ms->log_ix >= N_DBG_SLOG)
232		ms->log_ix = 0;
233	if (ms->n_log < N_DBG_SLOG)
234		++ms->n_log;
235}
236
237static void dumplog(struct mesh_state *ms, int t)
238{
239	struct mesh_target *tp = &ms->tgts[t];
240	struct dbglog *lp;
241	int i;
242
243	if (tp->n_log == 0)
244		return;
245	i = tp->log_ix - tp->n_log;
246	if (i < 0)
247		i += N_DBG_LOG;
248	tp->n_log = 0;
249	do {
250		lp = &tp->log[i];
251		printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ",
252		       t, lp->bs1, lp->bs0, lp->phase);
253#ifdef DBG_USE_TB
254		printk("tb=%10u ", lp->tb);
255#endif
256		printk(lp->fmt, lp->d);
257		printk("\n");
258		if (++i >= N_DBG_LOG)
259			i = 0;
260	} while (i != tp->log_ix);
261}
262
263static void dumpslog(struct mesh_state *ms)
264{
265	struct dbglog *lp;
266	int i;
267
268	if (ms->n_log == 0)
269		return;
270	i = ms->log_ix - ms->n_log;
271	if (i < 0)
272		i += N_DBG_SLOG;
273	ms->n_log = 0;
274	do {
275		lp = &ms->log[i];
276		printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ",
277		       lp->bs1, lp->bs0, lp->phase, lp->tgt);
278#ifdef DBG_USE_TB
279		printk("tb=%10u ", lp->tb);
280#endif
281		printk(lp->fmt, lp->d);
282		printk("\n");
283		if (++i >= N_DBG_SLOG)
284			i = 0;
285	} while (i != ms->log_ix);
286}
287
288#else
289
290static inline void dlog(struct mesh_state *ms, char *fmt, int a)
291{}
292static inline void dumplog(struct mesh_state *ms, int tgt)
293{}
294static inline void dumpslog(struct mesh_state *ms)
295{}
296
297#endif /* MESH_DBG */
298
299#define MKWORD(a, b, c, d)	(((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
300
301static void
302mesh_dump_regs(struct mesh_state *ms)
303{
304	volatile struct mesh_regs __iomem *mr = ms->mesh;
305	volatile struct dbdma_regs __iomem *md = ms->dma;
306	int t;
307	struct mesh_target *tp;
308
309	printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n",
310	       ms, mr, md);
311	printk(KERN_DEBUG "    ct=%4x seq=%2x bs=%4x fc=%2x "
312	       "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n",
313	       (mr->count_hi << 8) + mr->count_lo, mr->sequence,
314	       (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count,
315	       mr->exception, mr->error, mr->intr_mask, mr->interrupt,
316	       mr->sync_params);
317	while(in_8(&mr->fifo_count))
318		printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo));
319	printk(KERN_DEBUG "    dma stat=%x cmdptr=%x\n",
320	       in_le32(&md->status), in_le32(&md->cmdptr));
321	printk(KERN_DEBUG "    phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n",
322	       ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr);
323	printk(KERN_DEBUG "    dma_st=%d dma_ct=%d n_msgout=%d\n",
324	       ms->dma_started, ms->dma_count, ms->n_msgout);
325	for (t = 0; t < 8; ++t) {
326		tp = &ms->tgts[t];
327		if (tp->current_req == NULL)
328			continue;
329		printk(KERN_DEBUG "    target %d: req=%p goes_out=%d saved_ptr=%d\n",
330		       t, tp->current_req, tp->data_goes_out, tp->saved_ptr);
331	}
332}
333
334
335/*
336 * Flush write buffers on the bus path to the mesh
337 */
338static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr)
339{
340	(void)in_8(&mr->mesh_id);
341}
342
343
344/*
345 * Complete a SCSI command
346 */
347static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd)
348{
349	(*cmd->scsi_done)(cmd);
350}
351
352
353/* Called with  meshinterrupt disabled, initialize the chipset
354 * and eventually do the initial bus reset. The lock must not be
355 * held since we can schedule.
356 */
357static void mesh_init(struct mesh_state *ms)
358{
359	volatile struct mesh_regs __iomem *mr = ms->mesh;
360	volatile struct dbdma_regs __iomem *md = ms->dma;
361
362	mesh_flush_io(mr);
363	udelay(100);
364
365	/* Reset controller */
366	out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16);	/* stop dma */
367	out_8(&mr->exception, 0xff);	/* clear all exception bits */
368	out_8(&mr->error, 0xff);	/* clear all error bits */
369	out_8(&mr->sequence, SEQ_RESETMESH);
370	mesh_flush_io(mr);
371	udelay(10);
372	out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
373	out_8(&mr->source_id, ms->host->this_id);
374	out_8(&mr->sel_timeout, 25);	/* 250ms */
375	out_8(&mr->sync_params, ASYNC_PARAMS);
376
377	if (init_reset_delay) {
378		printk(KERN_INFO "mesh: performing initial bus reset...\n");
379
380		/* Reset bus */
381		out_8(&mr->bus_status1, BS1_RST);	/* assert RST */
382		mesh_flush_io(mr);
383		udelay(30);			/* leave it on for >= 25us */
384		out_8(&mr->bus_status1, 0);	/* negate RST */
385		mesh_flush_io(mr);
386
387		/* Wait for bus to come back */
388		msleep(init_reset_delay);
389	}
390
391	/* Reconfigure controller */
392	out_8(&mr->interrupt, 0xff);	/* clear all interrupt bits */
393	out_8(&mr->sequence, SEQ_FLUSHFIFO);
394	mesh_flush_io(mr);
395	udelay(1);
396	out_8(&mr->sync_params, ASYNC_PARAMS);
397	out_8(&mr->sequence, SEQ_ENBRESEL);
398
399	ms->phase = idle;
400	ms->msgphase = msg_none;
401}
402
403
404static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
405{
406	volatile struct mesh_regs __iomem *mr = ms->mesh;
407	int t, id;
408
409	id = cmd->device->id;
410	ms->current_req = cmd;
411	ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE;
412	ms->tgts[id].current_req = cmd;
413
414	if (DEBUG_TARGET(cmd)) {
415		int i;
416		printk(KERN_DEBUG "mesh_start: %p ser=%lu tgt=%d cmd=",
417		       cmd, cmd->serial_number, id);
418		for (i = 0; i < cmd->cmd_len; ++i)
419			printk(" %x", cmd->cmnd[i]);
420		printk(" use_sg=%d buffer=%p bufflen=%u\n",
421		       cmd->use_sg, cmd->request_buffer, cmd->request_bufflen);
422	}
423	if (ms->dma_started)
424		panic("mesh: double DMA start !\n");
425
426	ms->phase = arbitrating;
427	ms->msgphase = msg_none;
428	ms->data_ptr = 0;
429	ms->dma_started = 0;
430	ms->n_msgout = 0;
431	ms->last_n_msgout = 0;
432	ms->expect_reply = 0;
433	ms->conn_tgt = id;
434	ms->tgts[id].saved_ptr = 0;
435	ms->stat = DID_OK;
436	ms->aborting = 0;
437#ifdef MESH_DBG
438	ms->tgts[id].n_log = 0;
439	dlog(ms, "start cmd=%x", (int) cmd);
440#endif
441
442	/* Off we go */
443	dlog(ms, "about to arb, intr/exc/err/fc=%.8x",
444	     MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
445	out_8(&mr->interrupt, INT_CMDDONE);
446	out_8(&mr->sequence, SEQ_ENBRESEL);
447	mesh_flush_io(mr);
448	udelay(1);
449
450	if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
451		/*
452		 * Some other device has the bus or is arbitrating for it -
453		 * probably a target which is about to reselect us.
454		 */
455		dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x",
456		     MKWORD(mr->interrupt, mr->exception,
457			    mr->error, mr->fifo_count));
458		for (t = 100; t > 0; --t) {
459			if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0)
460				break;
461			if (in_8(&mr->interrupt) != 0) {
462				dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x",
463				     MKWORD(mr->interrupt, mr->exception,
464					    mr->error, mr->fifo_count));
465				mesh_interrupt(ms);
466				if (ms->phase != arbitrating)
467					return;
468			}
469			udelay(1);
470		}
471		if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) {
472			ms->stat = DID_BUS_BUSY;
473			ms->phase = idle;
474			mesh_done(ms, 0);
475			return;
476		}
477	}
478
479	/*
480	 * Apparently the mesh has a bug where it will assert both its
481	 * own bit and the target's bit on the bus during arbitration.
482	 */
483	out_8(&mr->dest_id, mr->source_id);
484
485	/*
486	 * There appears to be a race with reselection sometimes,
487	 * where a target reselects us just as we issue the
488	 * arbitrate command.  It seems that then the arbitrate
489	 * command just hangs waiting for the bus to be free
490	 * without giving us a reselection exception.
491	 * The only way I have found to get it to respond correctly
492	 * is this: disable reselection before issuing the arbitrate
493	 * command, then after issuing it, if it looks like a target
494	 * is trying to reselect us, reset the mesh and then enable
495	 * reselection.
496	 */
497	out_8(&mr->sequence, SEQ_DISRESEL);
498	if (in_8(&mr->interrupt) != 0) {
499		dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x",
500		     MKWORD(mr->interrupt, mr->exception,
501			    mr->error, mr->fifo_count));
502		mesh_interrupt(ms);
503		if (ms->phase != arbitrating)
504			return;
505		dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x",
506		     MKWORD(mr->interrupt, mr->exception,
507			    mr->error, mr->fifo_count));
508	}
509
510	out_8(&mr->sequence, SEQ_ARBITRATE);
511
512	for (t = 230; t > 0; --t) {
513		if (in_8(&mr->interrupt) != 0)
514			break;
515		udelay(1);
516	}
517	dlog(ms, "after arb, intr/exc/err/fc=%.8x",
518	     MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
519	if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
520	    && (in_8(&mr->bus_status0) & BS0_IO)) {
521		/* looks like a reselection - try resetting the mesh */
522		dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x",
523		     MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
524		out_8(&mr->sequence, SEQ_RESETMESH);
525		mesh_flush_io(mr);
526		udelay(10);
527		out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
528		out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
529		out_8(&mr->sequence, SEQ_ENBRESEL);
530		mesh_flush_io(mr);
531		for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t)
532			udelay(1);
533		dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x",
534		     MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count));
535#ifndef MESH_MULTIPLE_HOSTS
536		if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL)
537		    && (in_8(&mr->bus_status0) & BS0_IO)) {
538			printk(KERN_ERR "mesh: controller not responding"
539			       " to reselection!\n");
540			/*
541			 * If this is a target reselecting us, and the
542			 * mesh isn't responding, the higher levels of
543			 * the scsi code will eventually time out and
544			 * reset the bus.
545			 */
546		}
547#endif
548	}
549}
550
551/*
552 * Start the next command for a MESH.
553 * Should be called with interrupts disabled.
554 */
555static void mesh_start(struct mesh_state *ms)
556{
557	struct scsi_cmnd *cmd, *prev, *next;
558
559	if (ms->phase != idle || ms->current_req != NULL) {
560		printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)",
561		       ms->phase, ms);
562		return;
563	}
564
565	while (ms->phase == idle) {
566		prev = NULL;
567		for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) {
568			if (cmd == NULL)
569				return;
570			if (ms->tgts[cmd->device->id].current_req == NULL)
571				break;
572			prev = cmd;
573		}
574		next = (struct scsi_cmnd *) cmd->host_scribble;
575		if (prev == NULL)
576			ms->request_q = next;
577		else
578			prev->host_scribble = (void *) next;
579		if (next == NULL)
580			ms->request_qtail = prev;
581
582		mesh_start_cmd(ms, cmd);
583	}
584}
585
586static void mesh_done(struct mesh_state *ms, int start_next)
587{
588	struct scsi_cmnd *cmd;
589	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
590
591	cmd = ms->current_req;
592	ms->current_req = NULL;
593	tp->current_req = NULL;
594	if (cmd) {
595		cmd->result = (ms->stat << 16) + cmd->SCp.Status;
596		if (ms->stat == DID_OK)
597			cmd->result += (cmd->SCp.Message << 8);
598		if (DEBUG_TARGET(cmd)) {
599			printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
600			       cmd->result, ms->data_ptr, cmd->request_bufflen);
601			if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
602			    && cmd->request_buffer != 0) {
603				unsigned char *b = cmd->request_buffer;
604				printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
605				       b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
606			}
607		}
608		cmd->SCp.this_residual -= ms->data_ptr;
609		mesh_completed(ms, cmd);
610	}
611	if (start_next) {
612		out_8(&ms->mesh->sequence, SEQ_ENBRESEL);
613		mesh_flush_io(ms->mesh);
614		udelay(1);
615		ms->phase = idle;
616		mesh_start(ms);
617	}
618}
619
620static inline void add_sdtr_msg(struct mesh_state *ms)
621{
622	int i = ms->n_msgout;
623
624	ms->msgout[i] = EXTENDED_MESSAGE;
625	ms->msgout[i+1] = 3;
626	ms->msgout[i+2] = EXTENDED_SDTR;
627	ms->msgout[i+3] = mesh_sync_period/4;
628	ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0);
629	ms->n_msgout = i + 5;
630}
631
632static void set_sdtr(struct mesh_state *ms, int period, int offset)
633{
634	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
635	volatile struct mesh_regs __iomem *mr = ms->mesh;
636	int v, tr;
637
638	tp->sdtr_state = sdtr_done;
639	if (offset == 0) {
640		/* asynchronous */
641		if (SYNC_OFF(tp->sync_params))
642			printk(KERN_INFO "mesh: target %d now asynchronous\n",
643			       ms->conn_tgt);
644		tp->sync_params = ASYNC_PARAMS;
645		out_8(&mr->sync_params, ASYNC_PARAMS);
646		return;
647	}
648	/*
649	 * We need to compute ceil(clk_freq * period / 500e6) - 2
650	 * without incurring overflow.
651	 */
652	v = (ms->clk_freq / 5000) * period;
653	if (v <= 250000) {
654		/* special case: sync_period == 5 * clk_period */
655		v = 0;
656		/* units of tr are 100kB/s */
657		tr = (ms->clk_freq + 250000) / 500000;
658	} else {
659		/* sync_period == (v + 2) * 2 * clk_period */
660		v = (v + 99999) / 100000 - 2;
661		if (v > 15)
662			v = 15;	/* oops */
663		tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000;
664	}
665	if (offset > 15)
666		offset = 15;	/* can't happen */
667	tp->sync_params = SYNC_PARAMS(offset, v);
668	out_8(&mr->sync_params, tp->sync_params);
669	printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n",
670	       ms->conn_tgt, tr/10, tr%10);
671}
672
673static void start_phase(struct mesh_state *ms)
674{
675	int i, seq, nb;
676	volatile struct mesh_regs __iomem *mr = ms->mesh;
677	volatile struct dbdma_regs __iomem *md = ms->dma;
678	struct scsi_cmnd *cmd = ms->current_req;
679	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
680
681	dlog(ms, "start_phase nmo/exc/fc/seq = %.8x",
682	     MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence));
683	out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
684	seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
685	switch (ms->msgphase) {
686	case msg_none:
687		break;
688
689	case msg_in:
690		out_8(&mr->count_hi, 0);
691		out_8(&mr->count_lo, 1);
692		out_8(&mr->sequence, SEQ_MSGIN + seq);
693		ms->n_msgin = 0;
694		return;
695
696	case msg_out:
697		/*
698		 * To make sure ATN drops before we assert ACK for
699		 * the last byte of the message, we have to do the
700		 * last byte specially.
701		 */
702		if (ms->n_msgout <= 0) {
703			printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n",
704			       ms->n_msgout);
705			mesh_dump_regs(ms);
706			ms->msgphase = msg_none;
707			break;
708		}
709		if (ALLOW_DEBUG(ms->conn_tgt)) {
710			printk(KERN_DEBUG "mesh: sending %d msg bytes:",
711			       ms->n_msgout);
712			for (i = 0; i < ms->n_msgout; ++i)
713				printk(" %x", ms->msgout[i]);
714			printk("\n");
715		}
716		dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0],
717						ms->msgout[1], ms->msgout[2]));
718		out_8(&mr->count_hi, 0);
719		out_8(&mr->sequence, SEQ_FLUSHFIFO);
720		mesh_flush_io(mr);
721		udelay(1);
722		/*
723		 * If ATN is not already asserted, we assert it, then
724		 * issue a SEQ_MSGOUT to get the mesh to drop ACK.
725		 */
726		if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) {
727			dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0);
728			out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */
729			mesh_flush_io(mr);
730			udelay(1);
731			out_8(&mr->count_lo, 1);
732			out_8(&mr->sequence, SEQ_MSGOUT + seq);
733			out_8(&mr->bus_status0, 0); /* release explicit ATN */
734			dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0);
735		}
736		if (ms->n_msgout == 1) {
737			/*
738			 * We can't issue the SEQ_MSGOUT without ATN
739			 * until the target has asserted REQ.  The logic
740			 * in cmd_complete handles both situations:
741			 * REQ already asserted or not.
742			 */
743			cmd_complete(ms);
744		} else {
745			out_8(&mr->count_lo, ms->n_msgout - 1);
746			out_8(&mr->sequence, SEQ_MSGOUT + seq);
747			for (i = 0; i < ms->n_msgout - 1; ++i)
748				out_8(&mr->fifo, ms->msgout[i]);
749		}
750		return;
751
752	default:
753		printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n",
754		       ms->msgphase);
755	}
756
757	switch (ms->phase) {
758	case selecting:
759		out_8(&mr->dest_id, ms->conn_tgt);
760		out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN);
761		break;
762	case commanding:
763		out_8(&mr->sync_params, tp->sync_params);
764		out_8(&mr->count_hi, 0);
765		if (cmd) {
766			out_8(&mr->count_lo, cmd->cmd_len);
767			out_8(&mr->sequence, SEQ_COMMAND + seq);
768			for (i = 0; i < cmd->cmd_len; ++i)
769				out_8(&mr->fifo, cmd->cmnd[i]);
770		} else {
771			out_8(&mr->count_lo, 6);
772			out_8(&mr->sequence, SEQ_COMMAND + seq);
773			for (i = 0; i < 6; ++i)
774				out_8(&mr->fifo, 0);
775		}
776		break;
777	case dataing:
778		/* transfer data, if any */
779		if (!ms->dma_started) {
780			set_dma_cmds(ms, cmd);
781			out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
782			out_le32(&md->control, (RUN << 16) | RUN);
783			ms->dma_started = 1;
784		}
785		nb = ms->dma_count;
786		if (nb > 0xfff0)
787			nb = 0xfff0;
788		ms->dma_count -= nb;
789		ms->data_ptr += nb;
790		out_8(&mr->count_lo, nb);
791		out_8(&mr->count_hi, nb >> 8);
792		out_8(&mr->sequence, (tp->data_goes_out?
793				SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq);
794		break;
795	case statusing:
796		out_8(&mr->count_hi, 0);
797		out_8(&mr->count_lo, 1);
798		out_8(&mr->sequence, SEQ_STATUS + seq);
799		break;
800	case busfreeing:
801	case disconnecting:
802		out_8(&mr->sequence, SEQ_ENBRESEL);
803		mesh_flush_io(mr);
804		udelay(1);
805		dlog(ms, "enbresel intr/exc/err/fc=%.8x",
806		     MKWORD(mr->interrupt, mr->exception, mr->error,
807			    mr->fifo_count));
808		out_8(&mr->sequence, SEQ_BUSFREE);
809		break;
810	default:
811		printk(KERN_ERR "mesh: start_phase called with phase=%d\n",
812		       ms->phase);
813		dumpslog(ms);
814	}
815
816}
817
818static inline void get_msgin(struct mesh_state *ms)
819{
820	volatile struct mesh_regs __iomem *mr = ms->mesh;
821	int i, n;
822
823	n = mr->fifo_count;
824	if (n != 0) {
825		i = ms->n_msgin;
826		ms->n_msgin = i + n;
827		for (; n > 0; --n)
828			ms->msgin[i++] = in_8(&mr->fifo);
829	}
830}
831
832static inline int msgin_length(struct mesh_state *ms)
833{
834	int b, n;
835
836	n = 1;
837	if (ms->n_msgin > 0) {
838		b = ms->msgin[0];
839		if (b == 1) {
840			/* extended message */
841			n = ms->n_msgin < 2? 2: ms->msgin[1] + 2;
842		} else if (0x20 <= b && b <= 0x2f) {
843			/* 2-byte message */
844			n = 2;
845		}
846	}
847	return n;
848}
849
850static void reselected(struct mesh_state *ms)
851{
852	volatile struct mesh_regs __iomem *mr = ms->mesh;
853	struct scsi_cmnd *cmd;
854	struct mesh_target *tp;
855	int b, t, prev;
856
857	switch (ms->phase) {
858	case idle:
859		break;
860	case arbitrating:
861		if ((cmd = ms->current_req) != NULL) {
862			/* put the command back on the queue */
863			cmd->host_scribble = (void *) ms->request_q;
864			if (ms->request_q == NULL)
865				ms->request_qtail = cmd;
866			ms->request_q = cmd;
867			tp = &ms->tgts[cmd->device->id];
868			tp->current_req = NULL;
869		}
870		break;
871	case busfreeing:
872		ms->phase = reselecting;
873		mesh_done(ms, 0);
874		break;
875	case disconnecting:
876		break;
877	default:
878		printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n",
879		       ms->msgphase, ms->phase, ms->conn_tgt);
880		dumplog(ms, ms->conn_tgt);
881		dumpslog(ms);
882	}
883
884	if (ms->dma_started) {
885		printk(KERN_ERR "mesh: reselected with DMA started !\n");
886		halt_dma(ms);
887	}
888	ms->current_req = NULL;
889	ms->phase = dataing;
890	ms->msgphase = msg_in;
891	ms->n_msgout = 0;
892	ms->last_n_msgout = 0;
893	prev = ms->conn_tgt;
894
895	/*
896	 * We seem to get abortive reselections sometimes.
897	 */
898	while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) {
899		static int mesh_aborted_resels;
900		mesh_aborted_resels++;
901		out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
902		mesh_flush_io(mr);
903		udelay(1);
904		out_8(&mr->sequence, SEQ_ENBRESEL);
905		mesh_flush_io(mr);
906		udelay(5);
907		dlog(ms, "extra resel err/exc/fc = %.6x",
908		     MKWORD(0, mr->error, mr->exception, mr->fifo_count));
909	}
910	out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
911       	mesh_flush_io(mr);
912	udelay(1);
913	out_8(&mr->sequence, SEQ_ENBRESEL);
914       	mesh_flush_io(mr);
915	udelay(1);
916	out_8(&mr->sync_params, ASYNC_PARAMS);
917
918	/*
919	 * Find out who reselected us.
920	 */
921	if (in_8(&mr->fifo_count) == 0) {
922		printk(KERN_ERR "mesh: reselection but nothing in fifo?\n");
923		ms->conn_tgt = ms->host->this_id;
924		goto bogus;
925	}
926	/* get the last byte in the fifo */
927	do {
928		b = in_8(&mr->fifo);
929		dlog(ms, "reseldata %x", b);
930	} while (in_8(&mr->fifo_count));
931	for (t = 0; t < 8; ++t)
932		if ((b & (1 << t)) != 0 && t != ms->host->this_id)
933			break;
934	if (b != (1 << t) + (1 << ms->host->this_id)) {
935		printk(KERN_ERR "mesh: bad reselection data %x\n", b);
936		ms->conn_tgt = ms->host->this_id;
937		goto bogus;
938	}
939
940
941	/*
942	 * Set up to continue with that target's transfer.
943	 */
944	ms->conn_tgt = t;
945	tp = &ms->tgts[t];
946	out_8(&mr->sync_params, tp->sync_params);
947	if (ALLOW_DEBUG(t)) {
948		printk(KERN_DEBUG "mesh: reselected by target %d\n", t);
949		printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n",
950		       tp->saved_ptr, tp->data_goes_out, tp->current_req);
951	}
952	ms->current_req = tp->current_req;
953	if (tp->current_req == NULL) {
954		printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t);
955		goto bogus;
956	}
957	ms->data_ptr = tp->saved_ptr;
958	dlog(ms, "resel prev tgt=%d", prev);
959	dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception));
960	start_phase(ms);
961	return;
962
963bogus:
964	dumplog(ms, ms->conn_tgt);
965	dumpslog(ms);
966	ms->data_ptr = 0;
967	ms->aborting = 1;
968	start_phase(ms);
969}
970
971static void do_abort(struct mesh_state *ms)
972{
973	ms->msgout[0] = ABORT;
974	ms->n_msgout = 1;
975	ms->aborting = 1;
976	ms->stat = DID_ABORT;
977	dlog(ms, "abort", 0);
978}
979
980static void handle_reset(struct mesh_state *ms)
981{
982	int tgt;
983	struct mesh_target *tp;
984	struct scsi_cmnd *cmd;
985	volatile struct mesh_regs __iomem *mr = ms->mesh;
986
987	for (tgt = 0; tgt < 8; ++tgt) {
988		tp = &ms->tgts[tgt];
989		if ((cmd = tp->current_req) != NULL) {
990			cmd->result = DID_RESET << 16;
991			tp->current_req = NULL;
992			mesh_completed(ms, cmd);
993		}
994		ms->tgts[tgt].sdtr_state = do_sdtr;
995		ms->tgts[tgt].sync_params = ASYNC_PARAMS;
996	}
997	ms->current_req = NULL;
998	while ((cmd = ms->request_q) != NULL) {
999		ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
1000		cmd->result = DID_RESET << 16;
1001		mesh_completed(ms, cmd);
1002	}
1003	ms->phase = idle;
1004	ms->msgphase = msg_none;
1005	out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1006	out_8(&mr->sequence, SEQ_FLUSHFIFO);
1007       	mesh_flush_io(mr);
1008	udelay(1);
1009	out_8(&mr->sync_params, ASYNC_PARAMS);
1010	out_8(&mr->sequence, SEQ_ENBRESEL);
1011}
1012
1013static irqreturn_t do_mesh_interrupt(int irq, void *dev_id)
1014{
1015	unsigned long flags;
1016	struct mesh_state *ms = dev_id;
1017	struct Scsi_Host *dev = ms->host;
1018
1019	spin_lock_irqsave(dev->host_lock, flags);
1020	mesh_interrupt(ms);
1021	spin_unlock_irqrestore(dev->host_lock, flags);
1022	return IRQ_HANDLED;
1023}
1024
1025static void handle_error(struct mesh_state *ms)
1026{
1027	int err, exc, count;
1028	volatile struct mesh_regs __iomem *mr = ms->mesh;
1029
1030	err = in_8(&mr->error);
1031	exc = in_8(&mr->exception);
1032	out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1033	dlog(ms, "error err/exc/fc/cl=%.8x",
1034	     MKWORD(err, exc, mr->fifo_count, mr->count_lo));
1035	if (err & ERR_SCSIRESET) {
1036		/* SCSI bus was reset */
1037		printk(KERN_INFO "mesh: SCSI bus reset detected: "
1038		       "waiting for end...");
1039		while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
1040			udelay(1);
1041		printk("done\n");
1042		handle_reset(ms);
1043		/* request_q is empty, no point in mesh_start() */
1044		return;
1045	}
1046	if (err & ERR_UNEXPDISC) {
1047		/* Unexpected disconnect */
1048		if (exc & EXC_RESELECTED) {
1049			reselected(ms);
1050			return;
1051		}
1052		if (!ms->aborting) {
1053			printk(KERN_WARNING "mesh: target %d aborted\n",
1054			       ms->conn_tgt);
1055			dumplog(ms, ms->conn_tgt);
1056			dumpslog(ms);
1057		}
1058		out_8(&mr->interrupt, INT_CMDDONE);
1059		ms->stat = DID_ABORT;
1060		mesh_done(ms, 1);
1061		return;
1062	}
1063	if (err & ERR_PARITY) {
1064		if (ms->msgphase == msg_in) {
1065			printk(KERN_ERR "mesh: msg parity error, target %d\n",
1066			       ms->conn_tgt);
1067			ms->msgout[0] = MSG_PARITY_ERROR;
1068			ms->n_msgout = 1;
1069			ms->msgphase = msg_in_bad;
1070			cmd_complete(ms);
1071			return;
1072		}
1073		if (ms->stat == DID_OK) {
1074			printk(KERN_ERR "mesh: parity error, target %d\n",
1075			       ms->conn_tgt);
1076			ms->stat = DID_PARITY;
1077		}
1078		count = (mr->count_hi << 8) + mr->count_lo;
1079		if (count == 0) {
1080			cmd_complete(ms);
1081		} else {
1082			/* reissue the data transfer command */
1083			out_8(&mr->sequence, mr->sequence);
1084		}
1085		return;
1086	}
1087	if (err & ERR_SEQERR) {
1088		if (exc & EXC_RESELECTED) {
1089			/* This can happen if we issue a command to
1090			   get the bus just after the target reselects us. */
1091			static int mesh_resel_seqerr;
1092			mesh_resel_seqerr++;
1093			reselected(ms);
1094			return;
1095		}
1096		if (exc == EXC_PHASEMM) {
1097			static int mesh_phasemm_seqerr;
1098			mesh_phasemm_seqerr++;
1099			phase_mismatch(ms);
1100			return;
1101		}
1102		printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n",
1103		       err, exc);
1104	} else {
1105		printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc);
1106	}
1107	mesh_dump_regs(ms);
1108	dumplog(ms, ms->conn_tgt);
1109	if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) {
1110		/* try to do what the target wants */
1111		do_abort(ms);
1112		phase_mismatch(ms);
1113		return;
1114	}
1115	ms->stat = DID_ERROR;
1116	mesh_done(ms, 1);
1117}
1118
1119static void handle_exception(struct mesh_state *ms)
1120{
1121	int exc;
1122	volatile struct mesh_regs __iomem *mr = ms->mesh;
1123
1124	exc = in_8(&mr->exception);
1125	out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE);
1126	if (exc & EXC_RESELECTED) {
1127		static int mesh_resel_exc;
1128		mesh_resel_exc++;
1129		reselected(ms);
1130	} else if (exc == EXC_ARBLOST) {
1131		printk(KERN_DEBUG "mesh: lost arbitration\n");
1132		ms->stat = DID_BUS_BUSY;
1133		mesh_done(ms, 1);
1134	} else if (exc == EXC_SELTO) {
1135		/* selection timed out */
1136		ms->stat = DID_BAD_TARGET;
1137		mesh_done(ms, 1);
1138	} else if (exc == EXC_PHASEMM) {
1139		/* target wants to do something different:
1140		   find out what it wants and do it. */
1141		phase_mismatch(ms);
1142	} else {
1143		printk(KERN_ERR "mesh: can't cope with exception %x\n", exc);
1144		mesh_dump_regs(ms);
1145		dumplog(ms, ms->conn_tgt);
1146		do_abort(ms);
1147		phase_mismatch(ms);
1148	}
1149}
1150
1151static void handle_msgin(struct mesh_state *ms)
1152{
1153	int i, code;
1154	struct scsi_cmnd *cmd = ms->current_req;
1155	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1156
1157	if (ms->n_msgin == 0)
1158		return;
1159	code = ms->msgin[0];
1160	if (ALLOW_DEBUG(ms->conn_tgt)) {
1161		printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin);
1162		for (i = 0; i < ms->n_msgin; ++i)
1163			printk(" %x", ms->msgin[i]);
1164		printk("\n");
1165	}
1166	dlog(ms, "msgin msg=%.8x",
1167	     MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2]));
1168
1169	ms->expect_reply = 0;
1170	ms->n_msgout = 0;
1171	if (ms->n_msgin < msgin_length(ms))
1172		goto reject;
1173	if (cmd)
1174		cmd->SCp.Message = code;
1175	switch (code) {
1176	case COMMAND_COMPLETE:
1177		break;
1178	case EXTENDED_MESSAGE:
1179		switch (ms->msgin[2]) {
1180		case EXTENDED_MODIFY_DATA_POINTER:
1181			ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6]
1182				+ (ms->msgin[4] << 16) + (ms->msgin[5] << 8);
1183			break;
1184		case EXTENDED_SDTR:
1185			if (tp->sdtr_state != sdtr_sent) {
1186				/* reply with an SDTR */
1187				add_sdtr_msg(ms);
1188				/* limit period to at least his value,
1189				   offset to no more than his */
1190				if (ms->msgout[3] < ms->msgin[3])
1191					ms->msgout[3] = ms->msgin[3];
1192				if (ms->msgout[4] > ms->msgin[4])
1193					ms->msgout[4] = ms->msgin[4];
1194				set_sdtr(ms, ms->msgout[3], ms->msgout[4]);
1195				ms->msgphase = msg_out;
1196			} else {
1197				set_sdtr(ms, ms->msgin[3], ms->msgin[4]);
1198			}
1199			break;
1200		default:
1201			goto reject;
1202		}
1203		break;
1204	case SAVE_POINTERS:
1205		tp->saved_ptr = ms->data_ptr;
1206		break;
1207	case RESTORE_POINTERS:
1208		ms->data_ptr = tp->saved_ptr;
1209		break;
1210	case DISCONNECT:
1211		ms->phase = disconnecting;
1212		break;
1213	case ABORT:
1214		break;
1215	case MESSAGE_REJECT:
1216		if (tp->sdtr_state == sdtr_sent)
1217			set_sdtr(ms, 0, 0);
1218		break;
1219	case NOP:
1220		break;
1221	default:
1222		if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) {
1223			if (cmd == NULL) {
1224				do_abort(ms);
1225				ms->msgphase = msg_out;
1226			} else if (code != cmd->device->lun + IDENTIFY_BASE) {
1227				printk(KERN_WARNING "mesh: lun mismatch "
1228				       "(%d != %d) on reselection from "
1229				       "target %d\n", code - IDENTIFY_BASE,
1230				       cmd->device->lun, ms->conn_tgt);
1231			}
1232			break;
1233		}
1234		goto reject;
1235	}
1236	return;
1237
1238 reject:
1239	printk(KERN_WARNING "mesh: rejecting message from target %d:",
1240	       ms->conn_tgt);
1241	for (i = 0; i < ms->n_msgin; ++i)
1242		printk(" %x", ms->msgin[i]);
1243	printk("\n");
1244	ms->msgout[0] = MESSAGE_REJECT;
1245	ms->n_msgout = 1;
1246	ms->msgphase = msg_out;
1247}
1248
1249/*
1250 * Set up DMA commands for transferring data.
1251 */
1252static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1253{
1254	int i, dma_cmd, total, off, dtot;
1255	struct scatterlist *scl;
1256	struct dbdma_cmd *dcmds;
1257
1258	dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out?
1259		OUTPUT_MORE: INPUT_MORE;
1260	dcmds = ms->dma_cmds;
1261	dtot = 0;
1262	if (cmd) {
1263		cmd->SCp.this_residual = cmd->request_bufflen;
1264		if (cmd->use_sg > 0) {
1265			int nseg;
1266			total = 0;
1267			scl = (struct scatterlist *) cmd->request_buffer;
1268			off = ms->data_ptr;
1269			nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1270					  cmd->sc_data_direction);
1271			for (i = 0; i <nseg; ++i, ++scl) {
1272				u32 dma_addr = sg_dma_address(scl);
1273				u32 dma_len = sg_dma_len(scl);
1274
1275				total += scl->length;
1276				if (off >= dma_len) {
1277					off -= dma_len;
1278					continue;
1279				}
1280				if (dma_len > 0xffff)
1281					panic("mesh: scatterlist element >= 64k");
1282				st_le16(&dcmds->req_count, dma_len - off);
1283				st_le16(&dcmds->command, dma_cmd);
1284				st_le32(&dcmds->phy_addr, dma_addr + off);
1285				dcmds->xfer_status = 0;
1286				++dcmds;
1287				dtot += dma_len - off;
1288				off = 0;
1289			}
1290		} else if (ms->data_ptr < cmd->request_bufflen) {
1291			dtot = cmd->request_bufflen - ms->data_ptr;
1292			if (dtot > 0xffff)
1293				panic("mesh: transfer size >= 64k");
1294			st_le16(&dcmds->req_count, dtot);
1295			st_le32(&dcmds->phy_addr,
1296				virt_to_phys(cmd->request_buffer) + ms->data_ptr);
1297			dcmds->xfer_status = 0;
1298			++dcmds;
1299		}
1300	}
1301	if (dtot == 0) {
1302		/* Either the target has overrun our buffer,
1303		   or the caller didn't provide a buffer. */
1304		static char mesh_extra_buf[64];
1305
1306		dtot = sizeof(mesh_extra_buf);
1307		st_le16(&dcmds->req_count, dtot);
1308		st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf));
1309		dcmds->xfer_status = 0;
1310		++dcmds;
1311	}
1312	dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
1313	st_le16(&dcmds[-1].command, dma_cmd);
1314	memset(dcmds, 0, sizeof(*dcmds));
1315	st_le16(&dcmds->command, DBDMA_STOP);
1316	ms->dma_count = dtot;
1317}
1318
1319static void halt_dma(struct mesh_state *ms)
1320{
1321	volatile struct dbdma_regs __iomem *md = ms->dma;
1322	volatile struct mesh_regs __iomem *mr = ms->mesh;
1323	struct scsi_cmnd *cmd = ms->current_req;
1324	int t, nb;
1325
1326	if (!ms->tgts[ms->conn_tgt].data_goes_out) {
1327		/* wait a little while until the fifo drains */
1328		t = 50;
1329		while (t > 0 && in_8(&mr->fifo_count) != 0
1330		       && (in_le32(&md->status) & ACTIVE) != 0) {
1331			--t;
1332			udelay(1);
1333		}
1334	}
1335	out_le32(&md->control, RUN << 16);	/* turn off RUN bit */
1336	nb = (mr->count_hi << 8) + mr->count_lo;
1337	dlog(ms, "halt_dma fc/count=%.6x",
1338	     MKWORD(0, mr->fifo_count, 0, nb));
1339	if (ms->tgts[ms->conn_tgt].data_goes_out)
1340		nb += mr->fifo_count;
1341	/* nb is the number of bytes not yet transferred
1342	   to/from the target. */
1343	ms->data_ptr -= nb;
1344	dlog(ms, "data_ptr %x", ms->data_ptr);
1345	if (ms->data_ptr < 0) {
1346		printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n",
1347		       ms->data_ptr, nb, ms);
1348		ms->data_ptr = 0;
1349#ifdef MESH_DBG
1350		dumplog(ms, ms->conn_tgt);
1351		dumpslog(ms);
1352#endif /* MESH_DBG */
1353	} else if (cmd && cmd->request_bufflen != 0 &&
1354		   ms->data_ptr > cmd->request_bufflen) {
1355		printk(KERN_DEBUG "mesh: target %d overrun, "
1356		       "data_ptr=%x total=%x goes_out=%d\n",
1357		       ms->conn_tgt, ms->data_ptr, cmd->request_bufflen,
1358		       ms->tgts[ms->conn_tgt].data_goes_out);
1359	}
1360	if (cmd->use_sg != 0) {
1361		struct scatterlist *sg;
1362		sg = (struct scatterlist *)cmd->request_buffer;
1363		pci_unmap_sg(ms->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
1364	}
1365	ms->dma_started = 0;
1366}
1367
1368static void phase_mismatch(struct mesh_state *ms)
1369{
1370	volatile struct mesh_regs __iomem *mr = ms->mesh;
1371	int phase;
1372
1373	dlog(ms, "phasemm ch/cl/seq/fc=%.8x",
1374	     MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count));
1375	phase = in_8(&mr->bus_status0) & BS0_PHASE;
1376	if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) {
1377		/* output the last byte of the message, without ATN */
1378		out_8(&mr->count_lo, 1);
1379		out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
1380		mesh_flush_io(mr);
1381		udelay(1);
1382		out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1383		ms->msgphase = msg_out_last;
1384		return;
1385	}
1386
1387	if (ms->msgphase == msg_in) {
1388		get_msgin(ms);
1389		if (ms->n_msgin)
1390			handle_msgin(ms);
1391	}
1392
1393	if (ms->dma_started)
1394		halt_dma(ms);
1395	if (mr->fifo_count) {
1396		out_8(&mr->sequence, SEQ_FLUSHFIFO);
1397		mesh_flush_io(mr);
1398		udelay(1);
1399	}
1400
1401	ms->msgphase = msg_none;
1402	switch (phase) {
1403	case BP_DATAIN:
1404		ms->tgts[ms->conn_tgt].data_goes_out = 0;
1405		ms->phase = dataing;
1406		break;
1407	case BP_DATAOUT:
1408		ms->tgts[ms->conn_tgt].data_goes_out = 1;
1409		ms->phase = dataing;
1410		break;
1411	case BP_COMMAND:
1412		ms->phase = commanding;
1413		break;
1414	case BP_STATUS:
1415		ms->phase = statusing;
1416		break;
1417	case BP_MSGIN:
1418		ms->msgphase = msg_in;
1419		ms->n_msgin = 0;
1420		break;
1421	case BP_MSGOUT:
1422		ms->msgphase = msg_out;
1423		if (ms->n_msgout == 0) {
1424			if (ms->aborting) {
1425				do_abort(ms);
1426			} else {
1427				if (ms->last_n_msgout == 0) {
1428					printk(KERN_DEBUG
1429					       "mesh: no msg to repeat\n");
1430					ms->msgout[0] = NOP;
1431					ms->last_n_msgout = 1;
1432				}
1433				ms->n_msgout = ms->last_n_msgout;
1434			}
1435		}
1436		break;
1437	default:
1438		printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase);
1439		ms->stat = DID_ERROR;
1440		mesh_done(ms, 1);
1441		return;
1442	}
1443
1444	start_phase(ms);
1445}
1446
1447static void cmd_complete(struct mesh_state *ms)
1448{
1449	volatile struct mesh_regs __iomem *mr = ms->mesh;
1450	struct scsi_cmnd *cmd = ms->current_req;
1451	struct mesh_target *tp = &ms->tgts[ms->conn_tgt];
1452	int seq, n, t;
1453
1454	dlog(ms, "cmd_complete fc=%x", mr->fifo_count);
1455	seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0);
1456	switch (ms->msgphase) {
1457	case msg_out_xxx:
1458		/* huh?  we expected a phase mismatch */
1459		ms->n_msgin = 0;
1460		ms->msgphase = msg_in;
1461		/* fall through */
1462
1463	case msg_in:
1464		/* should have some message bytes in fifo */
1465		get_msgin(ms);
1466		n = msgin_length(ms);
1467		if (ms->n_msgin < n) {
1468			out_8(&mr->count_lo, n - ms->n_msgin);
1469			out_8(&mr->sequence, SEQ_MSGIN + seq);
1470		} else {
1471			ms->msgphase = msg_none;
1472			handle_msgin(ms);
1473			start_phase(ms);
1474		}
1475		break;
1476
1477	case msg_in_bad:
1478		out_8(&mr->sequence, SEQ_FLUSHFIFO);
1479		mesh_flush_io(mr);
1480		udelay(1);
1481		out_8(&mr->count_lo, 1);
1482		out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg);
1483		break;
1484
1485	case msg_out:
1486		/*
1487		 * To get the right timing on ATN wrt ACK, we have
1488		 * to get the MESH to drop ACK, wait until REQ gets
1489		 * asserted, then drop ATN.  To do this we first
1490		 * issue a SEQ_MSGOUT with ATN and wait for REQ,
1491		 * then change the command to a SEQ_MSGOUT w/o ATN.
1492		 * If we don't see REQ in a reasonable time, we
1493		 * change the command to SEQ_MSGIN with ATN,
1494		 * wait for the phase mismatch interrupt, then
1495		 * issue the SEQ_MSGOUT without ATN.
1496		 */
1497		out_8(&mr->count_lo, 1);
1498		out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN);
1499		t = 30;		/* wait up to 30us */
1500		while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0)
1501			udelay(1);
1502		dlog(ms, "last_mbyte err/exc/fc/cl=%.8x",
1503		     MKWORD(mr->error, mr->exception,
1504			    mr->fifo_count, mr->count_lo));
1505		if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) {
1506			/* whoops, target didn't do what we expected */
1507			ms->last_n_msgout = ms->n_msgout;
1508			ms->n_msgout = 0;
1509			if (in_8(&mr->interrupt) & INT_ERROR) {
1510				printk(KERN_ERR "mesh: error %x in msg_out\n",
1511				       in_8(&mr->error));
1512				handle_error(ms);
1513				return;
1514			}
1515			if (in_8(&mr->exception) != EXC_PHASEMM)
1516				printk(KERN_ERR "mesh: exc %x in msg_out\n",
1517				       in_8(&mr->exception));
1518			else
1519				printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n",
1520				       in_8(&mr->bus_status0));
1521			handle_exception(ms);
1522			return;
1523		}
1524		if (in_8(&mr->bus_status0) & BS0_REQ) {
1525			out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg);
1526			mesh_flush_io(mr);
1527			udelay(1);
1528			out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]);
1529			ms->msgphase = msg_out_last;
1530		} else {
1531			out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN);
1532			ms->msgphase = msg_out_xxx;
1533		}
1534		break;
1535
1536	case msg_out_last:
1537		ms->last_n_msgout = ms->n_msgout;
1538		ms->n_msgout = 0;
1539		ms->msgphase = ms->expect_reply? msg_in: msg_none;
1540		start_phase(ms);
1541		break;
1542
1543	case msg_none:
1544		switch (ms->phase) {
1545		case idle:
1546			printk(KERN_ERR "mesh: interrupt in idle phase?\n");
1547			dumpslog(ms);
1548			return;
1549		case selecting:
1550			dlog(ms, "Selecting phase at command completion",0);
1551			ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt),
1552						 (cmd? cmd->device->lun: 0));
1553			ms->n_msgout = 1;
1554			ms->expect_reply = 0;
1555			if (ms->aborting) {
1556				ms->msgout[0] = ABORT;
1557				ms->n_msgout++;
1558			} else if (tp->sdtr_state == do_sdtr) {
1559				/* add SDTR message */
1560				add_sdtr_msg(ms);
1561				ms->expect_reply = 1;
1562				tp->sdtr_state = sdtr_sent;
1563			}
1564			ms->msgphase = msg_out;
1565			/*
1566			 * We need to wait for REQ before dropping ATN.
1567			 * We wait for at most 30us, then fall back to
1568			 * a scheme where we issue a SEQ_COMMAND with ATN,
1569			 * which will give us a phase mismatch interrupt
1570			 * when REQ does come, and then we send the message.
1571			 */
1572			t = 230;		/* wait up to 230us */
1573			while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) {
1574				if (--t < 0) {
1575					dlog(ms, "impatient for req", ms->n_msgout);
1576					ms->msgphase = msg_none;
1577					break;
1578				}
1579				udelay(1);
1580			}
1581			break;
1582		case dataing:
1583			if (ms->dma_count != 0) {
1584				start_phase(ms);
1585				return;
1586			}
1587			/*
1588			 * We can get a phase mismatch here if the target
1589			 * changes to the status phase, even though we have
1590			 * had a command complete interrupt.  Then, if we
1591			 * issue the SEQ_STATUS command, we'll get a sequence
1592			 * error interrupt.  Which isn't so bad except that
1593			 * occasionally the mesh actually executes the
1594			 * SEQ_STATUS *as well as* giving us the sequence
1595			 * error and phase mismatch exception.
1596			 */
1597			out_8(&mr->sequence, 0);
1598			out_8(&mr->interrupt,
1599			      INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1600			halt_dma(ms);
1601			break;
1602		case statusing:
1603			if (cmd) {
1604				cmd->SCp.Status = mr->fifo;
1605				if (DEBUG_TARGET(cmd))
1606					printk(KERN_DEBUG "mesh: status is %x\n",
1607					       cmd->SCp.Status);
1608			}
1609			ms->msgphase = msg_in;
1610			break;
1611		case busfreeing:
1612			mesh_done(ms, 1);
1613			return;
1614		case disconnecting:
1615			ms->current_req = NULL;
1616			ms->phase = idle;
1617			mesh_start(ms);
1618			return;
1619		default:
1620			break;
1621		}
1622		++ms->phase;
1623		start_phase(ms);
1624		break;
1625	}
1626}
1627
1628
1629/*
1630 * Called by midlayer with host locked to queue a new
1631 * request
1632 */
1633static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1634{
1635	struct mesh_state *ms;
1636
1637	cmd->scsi_done = done;
1638	cmd->host_scribble = NULL;
1639
1640	ms = (struct mesh_state *) cmd->device->host->hostdata;
1641
1642	if (ms->request_q == NULL)
1643		ms->request_q = cmd;
1644	else
1645		ms->request_qtail->host_scribble = (void *) cmd;
1646	ms->request_qtail = cmd;
1647
1648	if (ms->phase == idle)
1649		mesh_start(ms);
1650
1651	return 0;
1652}
1653
1654/*
1655 * Called to handle interrupts, either call by the interrupt
1656 * handler (do_mesh_interrupt) or by other functions in
1657 * exceptional circumstances
1658 */
1659static void mesh_interrupt(struct mesh_state *ms)
1660{
1661	volatile struct mesh_regs __iomem *mr = ms->mesh;
1662	int intr;
1663
1664	while ((intr = in_8(&mr->interrupt)) != 0) {
1665		dlog(ms, "interrupt intr/err/exc/seq=%.8x",
1666		     MKWORD(intr, mr->error, mr->exception, mr->sequence));
1667		if (intr & INT_ERROR) {
1668			handle_error(ms);
1669		} else if (intr & INT_EXCEPTION) {
1670			handle_exception(ms);
1671		} else if (intr & INT_CMDDONE) {
1672			out_8(&mr->interrupt, INT_CMDDONE);
1673			cmd_complete(ms);
1674		}
1675	}
1676}
1677
1678/* Todo: here we can at least try to remove the command from the
1679 * queue if it isn't connected yet, and for pending command, assert
1680 * ATN until the bus gets freed.
1681 */
1682static int mesh_abort(struct scsi_cmnd *cmd)
1683{
1684	struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1685
1686	printk(KERN_DEBUG "mesh_abort(%p)\n", cmd);
1687	mesh_dump_regs(ms);
1688	dumplog(ms, cmd->device->id);
1689	dumpslog(ms);
1690	return FAILED;
1691}
1692
1693/*
1694 * Called by the midlayer with the lock held to reset the
1695 * SCSI host and bus.
1696 * The midlayer will wait for devices to come back, we don't need
1697 * to do that ourselves
1698 */
1699static int mesh_host_reset(struct scsi_cmnd *cmd)
1700{
1701	struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata;
1702	volatile struct mesh_regs __iomem *mr = ms->mesh;
1703	volatile struct dbdma_regs __iomem *md = ms->dma;
1704	unsigned long flags;
1705
1706	printk(KERN_DEBUG "mesh_host_reset\n");
1707
1708	spin_lock_irqsave(ms->host->host_lock, flags);
1709
1710	/* Reset the controller & dbdma channel */
1711	out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16);	/* stop dma */
1712	out_8(&mr->exception, 0xff);	/* clear all exception bits */
1713	out_8(&mr->error, 0xff);	/* clear all error bits */
1714	out_8(&mr->sequence, SEQ_RESETMESH);
1715       	mesh_flush_io(mr);
1716	udelay(1);
1717	out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1718	out_8(&mr->source_id, ms->host->this_id);
1719	out_8(&mr->sel_timeout, 25);	/* 250ms */
1720	out_8(&mr->sync_params, ASYNC_PARAMS);
1721
1722	/* Reset the bus */
1723	out_8(&mr->bus_status1, BS1_RST);	/* assert RST */
1724       	mesh_flush_io(mr);
1725	udelay(30);			/* leave it on for >= 25us */
1726	out_8(&mr->bus_status1, 0);	/* negate RST */
1727
1728	/* Complete pending commands */
1729	handle_reset(ms);
1730
1731	spin_unlock_irqrestore(ms->host->host_lock, flags);
1732	return SUCCESS;
1733}
1734
1735static void set_mesh_power(struct mesh_state *ms, int state)
1736{
1737	if (!machine_is(powermac))
1738		return;
1739	if (state) {
1740		pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
1741		msleep(200);
1742	} else {
1743		pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
1744		msleep(10);
1745	}
1746}
1747
1748
1749#ifdef CONFIG_PM
1750static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1751{
1752	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1753	unsigned long flags;
1754
1755	switch (mesg.event) {
1756	case PM_EVENT_SUSPEND:
1757	case PM_EVENT_FREEZE:
1758		break;
1759	default:
1760		return 0;
1761	}
1762	if (mesg.event == mdev->ofdev.dev.power.power_state.event)
1763		return 0;
1764
1765	scsi_block_requests(ms->host);
1766	spin_lock_irqsave(ms->host->host_lock, flags);
1767	while(ms->phase != idle) {
1768		spin_unlock_irqrestore(ms->host->host_lock, flags);
1769		msleep(10);
1770		spin_lock_irqsave(ms->host->host_lock, flags);
1771	}
1772	ms->phase = sleeping;
1773	spin_unlock_irqrestore(ms->host->host_lock, flags);
1774	disable_irq(ms->meshintr);
1775	set_mesh_power(ms, 0);
1776
1777	mdev->ofdev.dev.power.power_state = mesg;
1778
1779	return 0;
1780}
1781
1782static int mesh_resume(struct macio_dev *mdev)
1783{
1784	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1785	unsigned long flags;
1786
1787	if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
1788		return 0;
1789
1790	set_mesh_power(ms, 1);
1791	mesh_init(ms);
1792	spin_lock_irqsave(ms->host->host_lock, flags);
1793	mesh_start(ms);
1794	spin_unlock_irqrestore(ms->host->host_lock, flags);
1795	enable_irq(ms->meshintr);
1796	scsi_unblock_requests(ms->host);
1797
1798	mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1799
1800	return 0;
1801}
1802
1803#endif /* CONFIG_PM */
1804
1805/*
1806 * If we leave drives set for synchronous transfers (especially
1807 * CDROMs), and reboot to MacOS, it gets confused, poor thing.
1808 * So, on reboot we reset the SCSI bus.
1809 */
1810static int mesh_shutdown(struct macio_dev *mdev)
1811{
1812	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1813	volatile struct mesh_regs __iomem *mr;
1814	unsigned long flags;
1815
1816       	printk(KERN_INFO "resetting MESH scsi bus(es)\n");
1817	spin_lock_irqsave(ms->host->host_lock, flags);
1818       	mr = ms->mesh;
1819	out_8(&mr->intr_mask, 0);
1820	out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE);
1821	out_8(&mr->bus_status1, BS1_RST);
1822	mesh_flush_io(mr);
1823	udelay(30);
1824	out_8(&mr->bus_status1, 0);
1825	spin_unlock_irqrestore(ms->host->host_lock, flags);
1826
1827	return 0;
1828}
1829
1830static struct scsi_host_template mesh_template = {
1831	.proc_name			= "mesh",
1832	.name				= "MESH",
1833	.queuecommand			= mesh_queue,
1834	.eh_abort_handler		= mesh_abort,
1835	.eh_host_reset_handler		= mesh_host_reset,
1836	.can_queue			= 20,
1837	.this_id			= 7,
1838	.sg_tablesize			= SG_ALL,
1839	.cmd_per_lun			= 2,
1840	.use_clustering			= DISABLE_CLUSTERING,
1841};
1842
1843static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1844{
1845	struct device_node *mesh = macio_get_of_node(mdev);
1846	struct pci_dev* pdev = macio_get_pci_dev(mdev);
1847	int tgt, minper;
1848	const int *cfp;
1849	struct mesh_state *ms;
1850	struct Scsi_Host *mesh_host;
1851	void *dma_cmd_space;
1852	dma_addr_t dma_cmd_bus;
1853
1854	switch (mdev->bus->chip->type) {
1855	case macio_heathrow:
1856	case macio_gatwick:
1857	case macio_paddington:
1858		use_active_neg = 0;
1859		break;
1860	default:
1861		use_active_neg = SEQ_ACTIVE_NEG;
1862	}
1863
1864	if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
1865       		printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
1866	       	       " (got %d,%d)\n", macio_resource_count(mdev),
1867		       macio_irq_count(mdev));
1868		return -ENODEV;
1869	}
1870
1871	if (macio_request_resources(mdev, "mesh") != 0) {
1872       		printk(KERN_ERR "mesh: unable to request memory resources");
1873		return -EBUSY;
1874	}
1875       	mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state));
1876	if (mesh_host == NULL) {
1877		printk(KERN_ERR "mesh: couldn't register host");
1878		goto out_release;
1879	}
1880
1881	/* Old junk for root discovery, that will die ultimately */
1882#if !defined(MODULE)
1883       	note_scsi_host(mesh, mesh_host);
1884#endif
1885
1886	mesh_host->base = macio_resource_start(mdev, 0);
1887	mesh_host->irq = macio_irq(mdev, 0);
1888       	ms = (struct mesh_state *) mesh_host->hostdata;
1889	macio_set_drvdata(mdev, ms);
1890	ms->host = mesh_host;
1891	ms->mdev = mdev;
1892	ms->pdev = pdev;
1893
1894	ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
1895	if (ms->mesh == NULL) {
1896		printk(KERN_ERR "mesh: can't map registers\n");
1897		goto out_free;
1898	}
1899	ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
1900	if (ms->dma == NULL) {
1901		printk(KERN_ERR "mesh: can't map registers\n");
1902		iounmap(ms->mesh);
1903		goto out_free;
1904	}
1905
1906       	ms->meshintr = macio_irq(mdev, 0);
1907       	ms->dmaintr = macio_irq(mdev, 1);
1908
1909       	/* Space for dma command list: +1 for stop command,
1910       	 * +1 to allow for aligning.
1911	 */
1912	ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd);
1913
1914	/* We use the PCI APIs for now until the generic one gets fixed
1915	 * enough or until we get some macio-specific versions
1916	 */
1917	dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev),
1918					     ms->dma_cmd_size,
1919					     &dma_cmd_bus);
1920	if (dma_cmd_space == NULL) {
1921		printk(KERN_ERR "mesh: can't allocate DMA table\n");
1922		goto out_unmap;
1923	}
1924	memset(dma_cmd_space, 0, ms->dma_cmd_size);
1925
1926	ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
1927       	ms->dma_cmd_space = dma_cmd_space;
1928	ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds)
1929		- (unsigned long)dma_cmd_space;
1930	ms->current_req = NULL;
1931       	for (tgt = 0; tgt < 8; ++tgt) {
1932	       	ms->tgts[tgt].sdtr_state = do_sdtr;
1933	       	ms->tgts[tgt].sync_params = ASYNC_PARAMS;
1934	       	ms->tgts[tgt].current_req = NULL;
1935       	}
1936
1937	if ((cfp = of_get_property(mesh, "clock-frequency", NULL)))
1938       		ms->clk_freq = *cfp;
1939	else {
1940       		printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
1941	       	ms->clk_freq = 50000000;
1942       	}
1943
1944       	/* The maximum sync rate is clock / 5; increase
1945       	 * mesh_sync_period if necessary.
1946	 */
1947	minper = 1000000000 / (ms->clk_freq / 5); /* ns */
1948	if (mesh_sync_period < minper)
1949		mesh_sync_period = minper;
1950
1951	/* Power up the chip */
1952	set_mesh_power(ms, 1);
1953
1954	/* Set it up */
1955       	mesh_init(ms);
1956
1957	/* Request interrupt */
1958       	if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1959	       	printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1960		goto out_shutdown;
1961	}
1962
1963	/* Add scsi host & scan */
1964	if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1965		goto out_release_irq;
1966	scsi_scan_host(mesh_host);
1967
1968	return 0;
1969
1970 out_release_irq:
1971	free_irq(ms->meshintr, ms);
1972 out_shutdown:
1973	/* shutdown & reset bus in case of error or macos can be confused
1974	 * at reboot if the bus was set to synchronous mode already
1975	 */
1976	mesh_shutdown(mdev);
1977	set_mesh_power(ms, 0);
1978	pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1979			    ms->dma_cmd_space, ms->dma_cmd_bus);
1980 out_unmap:
1981	iounmap(ms->dma);
1982	iounmap(ms->mesh);
1983 out_free:
1984	scsi_host_put(mesh_host);
1985 out_release:
1986	macio_release_resources(mdev);
1987
1988	return -ENODEV;
1989}
1990
1991static int mesh_remove(struct macio_dev *mdev)
1992{
1993	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1994	struct Scsi_Host *mesh_host = ms->host;
1995
1996	scsi_remove_host(mesh_host);
1997
1998	free_irq(ms->meshintr, ms);
1999
2000	/* Reset scsi bus */
2001	mesh_shutdown(mdev);
2002
2003	/* Shut down chip & termination */
2004	set_mesh_power(ms, 0);
2005
2006	/* Unmap registers & dma controller */
2007	iounmap(ms->mesh);
2008       	iounmap(ms->dma);
2009
2010	/* Free DMA commands memory */
2011	pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2012			    ms->dma_cmd_space, ms->dma_cmd_bus);
2013
2014	/* Release memory resources */
2015	macio_release_resources(mdev);
2016
2017	scsi_host_put(mesh_host);
2018
2019	return 0;
2020}
2021
2022
2023static struct of_device_id mesh_match[] =
2024{
2025	{
2026	.name 		= "mesh",
2027	},
2028	{
2029	.type		= "scsi",
2030	.compatible	= "chrp,mesh0"
2031	},
2032	{},
2033};
2034MODULE_DEVICE_TABLE (of, mesh_match);
2035
2036static struct macio_driver mesh_driver =
2037{
2038	.name 		= "mesh",
2039	.match_table	= mesh_match,
2040	.probe		= mesh_probe,
2041	.remove		= mesh_remove,
2042	.shutdown	= mesh_shutdown,
2043#ifdef CONFIG_PM
2044	.suspend	= mesh_suspend,
2045	.resume		= mesh_resume,
2046#endif
2047};
2048
2049
2050static int __init init_mesh(void)
2051{
2052
2053	/* Calculate sync rate from module parameters */
2054	if (sync_rate > 10)
2055		sync_rate = 10;
2056	if (sync_rate > 0) {
2057		printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate);
2058		mesh_sync_period = 1000 / sync_rate;	/* ns */
2059		mesh_sync_offset = 15;
2060	} else
2061		printk(KERN_INFO "mesh: configured for asynchronous\n");
2062
2063	return macio_register_driver(&mesh_driver);
2064}
2065
2066static void __exit exit_mesh(void)
2067{
2068	return macio_unregister_driver(&mesh_driver);
2069}
2070
2071module_init(init_mesh);
2072module_exit(exit_mesh);
2073