1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/irqreturn.h>
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME		"esp"
33#define PFX DRV_MODULE_NAME	": "
34#define DRV_VERSION		"2.000"
35#define DRV_MODULE_RELDATE	"April 19, 2007"
36
37/* SCSI bus reset settle time in seconds.  */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR		0x00000001
42#define ESP_DEBUG_SCSICMD	0x00000002
43#define ESP_DEBUG_RESET		0x00000004
44#define ESP_DEBUG_MSGIN		0x00000008
45#define ESP_DEBUG_MSGOUT	0x00000010
46#define ESP_DEBUG_CMDDONE	0x00000020
47#define ESP_DEBUG_DISCONNECT	0x00000040
48#define ESP_DEBUG_DATASTART	0x00000080
49#define ESP_DEBUG_DATADONE	0x00000100
50#define ESP_DEBUG_RECONNECT	0x00000200
51#define ESP_DEBUG_AUTOSENSE	0x00000400
52
53#define esp_log_intr(f, a...) \
54do {	if (esp_debug & ESP_DEBUG_INTR) \
55		printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do {	if (esp_debug & ESP_DEBUG_RESET) \
60		printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do {	if (esp_debug & ESP_DEBUG_MSGIN) \
65		printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
70		printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
75		printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
80		printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do {	if (esp_debug & ESP_DEBUG_DATASTART) \
85		printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do {	if (esp_debug & ESP_DEBUG_DATADONE) \
90		printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
95		printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100		printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107			      struct esp_event_ent *p)
108{
109	p->sreg = esp->sreg;
110	p->seqreg = esp->seqreg;
111	p->sreg2 = esp->sreg2;
112	p->ireg = esp->ireg;
113	p->select_state = esp->select_state;
114	p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119	struct esp_event_ent *p;
120	int idx = esp->esp_event_cur;
121
122	p = &esp->esp_event_log[idx];
123	p->type = ESP_EVENT_TYPE_CMD;
124	p->val = val;
125	esp_log_fill_regs(esp, p);
126
127	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129	esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135	struct esp_event_ent *p;
136	int idx = esp->esp_event_cur;
137
138	p = &esp->esp_event_log[idx];
139	p->type = ESP_EVENT_TYPE_EVENT;
140	p->val = val;
141	esp_log_fill_regs(esp, p);
142
143	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145	esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150	int idx = esp->esp_event_cur;
151	int stop = idx;
152
153	printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154	       esp->host->unique_id);
155	do {
156		struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158		printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159		       esp->host->unique_id, idx,
160		       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162		printk("val[%02x] sreg[%02x] seqreg[%02x] "
163		       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164		       p->val, p->sreg, p->seqreg,
165		       p->sreg2, p->ireg, p->select_state, p->event);
166
167		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168	} while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174	if (esp->rev == ESP236) {
175		int lim = 1000;
176
177		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178			if (--lim == 0) {
179				printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180				       "will not clear!\n",
181				       esp->host->unique_id);
182				break;
183			}
184			udelay(1);
185		}
186	}
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192	int idx = 0;
193
194	while (fcnt--) {
195		esp->fifo[idx++] = esp_read8(ESP_FDATA);
196		esp->fifo[idx++] = esp_read8(ESP_FDATA);
197	}
198	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199		esp_write8(0, ESP_FDATA);
200		esp->fifo[idx++] = esp_read8(ESP_FDATA);
201		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202	}
203	esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208	int i;
209
210	for (i = 0; i < ESP_MAX_TARGET; i++)
211		esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217	u8 family_code, version;
218
219	/* Now reset the ESP chip */
220	scsi_esp_cmd(esp, ESP_CMD_RC);
221	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223
224	/* Reload the configuration registers */
225	esp_write8(esp->cfact, ESP_CFACT);
226
227	esp->prev_stp = 0;
228	esp_write8(esp->prev_stp, ESP_STP);
229
230	esp->prev_soff = 0;
231	esp_write8(esp->prev_soff, ESP_SOFF);
232
233	esp_write8(esp->neg_defp, ESP_TIMEO);
234
235	/* This is the only point at which it is reliable to read
236	 * the ID-code for a fast ESP chip variants.
237	 */
238	esp->max_period = ((35 * esp->ccycle) / 1000);
239	if (esp->rev == FAST) {
240		version = esp_read8(ESP_UID);
241		family_code = (version & 0xf8) >> 3;
242		if (family_code == 0x02)
243			esp->rev = FAS236;
244		else if (family_code == 0x0a)
245			esp->rev = FASHME; /* Version is usually '5'. */
246		else
247			esp->rev = FAS100A;
248		esp->min_period = ((4 * esp->ccycle) / 1000);
249	} else {
250		esp->min_period = ((5 * esp->ccycle) / 1000);
251	}
252	esp->max_period = (esp->max_period + 3)>>2;
253	esp->min_period = (esp->min_period + 3)>>2;
254
255	esp_write8(esp->config1, ESP_CFG1);
256	switch (esp->rev) {
257	case ESP100:
258		/* nothing to do */
259		break;
260
261	case ESP100A:
262		esp_write8(esp->config2, ESP_CFG2);
263		break;
264
265	case ESP236:
266		/* Slow 236 */
267		esp_write8(esp->config2, ESP_CFG2);
268		esp->prev_cfg3 = esp->target[0].esp_config3;
269		esp_write8(esp->prev_cfg3, ESP_CFG3);
270		break;
271
272	case FASHME:
273		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
274		/* fallthrough... */
275
276	case FAS236:
277		/* Fast 236 or HME */
278		esp_write8(esp->config2, ESP_CFG2);
279		if (esp->rev == FASHME) {
280			u8 cfg3 = esp->target[0].esp_config3;
281
282			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
283			if (esp->scsi_id >= 8)
284				cfg3 |= ESP_CONFIG3_IDBIT3;
285			esp_set_all_config3(esp, cfg3);
286		} else {
287			u32 cfg3 = esp->target[0].esp_config3;
288
289			cfg3 |= ESP_CONFIG3_FCLK;
290			esp_set_all_config3(esp, cfg3);
291		}
292		esp->prev_cfg3 = esp->target[0].esp_config3;
293		esp_write8(esp->prev_cfg3, ESP_CFG3);
294		if (esp->rev == FASHME) {
295			esp->radelay = 80;
296		} else {
297			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
298				esp->radelay = 0;
299			else
300				esp->radelay = 96;
301		}
302		break;
303
304	case FAS100A:
305		/* Fast 100a */
306		esp_write8(esp->config2, ESP_CFG2);
307		esp_set_all_config3(esp,
308				    (esp->target[0].esp_config3 |
309				     ESP_CONFIG3_FCLOCK));
310		esp->prev_cfg3 = esp->target[0].esp_config3;
311		esp_write8(esp->prev_cfg3, ESP_CFG3);
312		esp->radelay = 32;
313		break;
314
315	default:
316		break;
317	}
318
319	/* Eat any bitrot in the chip */
320	esp_read8(ESP_INTRPT);
321	udelay(100);
322}
323
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{
326	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
327	struct scatterlist *sg = cmd->request_buffer;
328	int dir = cmd->sc_data_direction;
329	int total, i;
330
331	if (dir == DMA_NONE)
332		return;
333
334	BUG_ON(cmd->use_sg == 0);
335
336	spriv->u.num_sg = esp->ops->map_sg(esp, sg,
337					   cmd->use_sg, dir);
338	spriv->cur_residue = sg_dma_len(sg);
339	spriv->cur_sg = sg;
340
341	total = 0;
342	for (i = 0; i < spriv->u.num_sg; i++)
343		total += sg_dma_len(&sg[i]);
344	spriv->tot_residue = total;
345}
346
347static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
348				   struct scsi_cmnd *cmd)
349{
350	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
351
352	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
353		return ent->sense_dma +
354			(ent->sense_ptr - cmd->sense_buffer);
355	}
356
357	return sg_dma_address(p->cur_sg) +
358		(sg_dma_len(p->cur_sg) -
359		 p->cur_residue);
360}
361
362static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
363				    struct scsi_cmnd *cmd)
364{
365	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
366
367	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
368		return SCSI_SENSE_BUFFERSIZE -
369			(ent->sense_ptr - cmd->sense_buffer);
370	}
371	return p->cur_residue;
372}
373
374static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
375			    struct scsi_cmnd *cmd, unsigned int len)
376{
377	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
378
379	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380		ent->sense_ptr += len;
381		return;
382	}
383
384	p->cur_residue -= len;
385	p->tot_residue -= len;
386	if (p->cur_residue < 0 || p->tot_residue < 0) {
387		printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
388		       esp->host->unique_id);
389		printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
390		       "len[%u]\n",
391		       esp->host->unique_id,
392		       p->cur_residue, p->tot_residue, len);
393		p->cur_residue = 0;
394		p->tot_residue = 0;
395	}
396	if (!p->cur_residue && p->tot_residue) {
397		p->cur_sg++;
398		p->cur_residue = sg_dma_len(p->cur_sg);
399	}
400}
401
402static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
403{
404	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
405	int dir = cmd->sc_data_direction;
406
407	if (dir == DMA_NONE)
408		return;
409
410	esp->ops->unmap_sg(esp, cmd->request_buffer,
411			   spriv->u.num_sg, dir);
412}
413
414static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
415{
416	struct scsi_cmnd *cmd = ent->cmd;
417	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
418
419	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
420		ent->saved_sense_ptr = ent->sense_ptr;
421		return;
422	}
423	ent->saved_cur_residue = spriv->cur_residue;
424	ent->saved_cur_sg = spriv->cur_sg;
425	ent->saved_tot_residue = spriv->tot_residue;
426}
427
428static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
429{
430	struct scsi_cmnd *cmd = ent->cmd;
431	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
432
433	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
434		ent->sense_ptr = ent->saved_sense_ptr;
435		return;
436	}
437	spriv->cur_residue = ent->saved_cur_residue;
438	spriv->cur_sg = ent->saved_cur_sg;
439	spriv->tot_residue = ent->saved_tot_residue;
440}
441
442static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
443{
444	if (cmd->cmd_len == 6 ||
445	    cmd->cmd_len == 10 ||
446	    cmd->cmd_len == 12) {
447		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
448	} else {
449		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
450	}
451}
452
453static void esp_write_tgt_config3(struct esp *esp, int tgt)
454{
455	if (esp->rev > ESP100A) {
456		u8 val = esp->target[tgt].esp_config3;
457
458		if (val != esp->prev_cfg3) {
459			esp->prev_cfg3 = val;
460			esp_write8(val, ESP_CFG3);
461		}
462	}
463}
464
465static void esp_write_tgt_sync(struct esp *esp, int tgt)
466{
467	u8 off = esp->target[tgt].esp_offset;
468	u8 per = esp->target[tgt].esp_period;
469
470	if (off != esp->prev_soff) {
471		esp->prev_soff = off;
472		esp_write8(off, ESP_SOFF);
473	}
474	if (per != esp->prev_stp) {
475		esp->prev_stp = per;
476		esp_write8(per, ESP_STP);
477	}
478}
479
480static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
481{
482	if (esp->rev == FASHME) {
483		/* Arbitrary segment boundaries, 24-bit counts.  */
484		if (dma_len > (1U << 24))
485			dma_len = (1U << 24);
486	} else {
487		u32 base, end;
488
489		/* ESP chip limits other variants by 16-bits of transfer
490		 * count.  Actually on FAS100A and FAS236 we could get
491		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
492		 * in the ESP_CFG2 register but that causes other unwanted
493		 * changes so we don't use it currently.
494		 */
495		if (dma_len > (1U << 16))
496			dma_len = (1U << 16);
497
498		/* All of the DMA variants hooked up to these chips
499		 * cannot handle crossing a 24-bit address boundary.
500		 */
501		base = dma_addr & ((1U << 24) - 1U);
502		end = base + dma_len;
503		if (end > (1U << 24))
504			end = (1U <<24);
505		dma_len = end - base;
506	}
507	return dma_len;
508}
509
510static int esp_need_to_nego_wide(struct esp_target_data *tp)
511{
512	struct scsi_target *target = tp->starget;
513
514	return spi_width(target) != tp->nego_goal_width;
515}
516
517static int esp_need_to_nego_sync(struct esp_target_data *tp)
518{
519	struct scsi_target *target = tp->starget;
520
521	/* When offset is zero, period is "don't care".  */
522	if (!spi_offset(target) && !tp->nego_goal_offset)
523		return 0;
524
525	if (spi_offset(target) == tp->nego_goal_offset &&
526	    spi_period(target) == tp->nego_goal_period)
527		return 0;
528
529	return 1;
530}
531
532static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
533			     struct esp_lun_data *lp)
534{
535	if (!ent->tag[0]) {
536		/* Non-tagged, slot already taken?  */
537		if (lp->non_tagged_cmd)
538			return -EBUSY;
539
540		if (lp->hold) {
541			/* We are being held by active tagged
542			 * commands.
543			 */
544			if (lp->num_tagged)
545				return -EBUSY;
546
547			/* Tagged commands completed, we can unplug
548			 * the queue and run this untagged command.
549			 */
550			lp->hold = 0;
551		} else if (lp->num_tagged) {
552			/* Plug the queue until num_tagged decreases
553			 * to zero in esp_free_lun_tag.
554			 */
555			lp->hold = 1;
556			return -EBUSY;
557		}
558
559		lp->non_tagged_cmd = ent;
560		return 0;
561	} else {
562		/* Tagged command, see if blocked by a
563		 * non-tagged one.
564		 */
565		if (lp->non_tagged_cmd || lp->hold)
566			return -EBUSY;
567	}
568
569	BUG_ON(lp->tagged_cmds[ent->tag[1]]);
570
571	lp->tagged_cmds[ent->tag[1]] = ent;
572	lp->num_tagged++;
573
574	return 0;
575}
576
577static void esp_free_lun_tag(struct esp_cmd_entry *ent,
578			     struct esp_lun_data *lp)
579{
580	if (ent->tag[0]) {
581		BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
582		lp->tagged_cmds[ent->tag[1]] = NULL;
583		lp->num_tagged--;
584	} else {
585		BUG_ON(lp->non_tagged_cmd != ent);
586		lp->non_tagged_cmd = NULL;
587	}
588}
589
590/* When a contingent allegiance conditon is created, we force feed a
591 * REQUEST_SENSE command to the device to fetch the sense data.  I
592 * tried many other schemes, relying on the scsi error handling layer
593 * to send out the REQUEST_SENSE automatically, but this was difficult
594 * to get right especially in the presence of applications like smartd
595 * which use SG_IO to send out their own REQUEST_SENSE commands.
596 */
597static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
598{
599	struct scsi_cmnd *cmd = ent->cmd;
600	struct scsi_device *dev = cmd->device;
601	int tgt, lun;
602	u8 *p, val;
603
604	tgt = dev->id;
605	lun = dev->lun;
606
607
608	if (!ent->sense_ptr) {
609		esp_log_autosense("esp%d: Doing auto-sense for "
610				  "tgt[%d] lun[%d]\n",
611				  esp->host->unique_id, tgt, lun);
612
613		ent->sense_ptr = cmd->sense_buffer;
614		ent->sense_dma = esp->ops->map_single(esp,
615						      ent->sense_ptr,
616						      SCSI_SENSE_BUFFERSIZE,
617						      DMA_FROM_DEVICE);
618	}
619	ent->saved_sense_ptr = ent->sense_ptr;
620
621	esp->active_cmd = ent;
622
623	p = esp->command_block;
624	esp->msg_out_len = 0;
625
626	*p++ = IDENTIFY(0, lun);
627	*p++ = REQUEST_SENSE;
628	*p++ = ((dev->scsi_level <= SCSI_2) ?
629		(lun << 5) : 0);
630	*p++ = 0;
631	*p++ = 0;
632	*p++ = SCSI_SENSE_BUFFERSIZE;
633	*p++ = 0;
634
635	esp->select_state = ESP_SELECT_BASIC;
636
637	val = tgt;
638	if (esp->rev == FASHME)
639		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
640	esp_write8(val, ESP_BUSID);
641
642	esp_write_tgt_sync(esp, tgt);
643	esp_write_tgt_config3(esp, tgt);
644
645	val = (p - esp->command_block);
646
647	if (esp->rev == FASHME)
648		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
649	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
650			       val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
651}
652
653static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
654{
655	struct esp_cmd_entry *ent;
656
657	list_for_each_entry(ent, &esp->queued_cmds, list) {
658		struct scsi_cmnd *cmd = ent->cmd;
659		struct scsi_device *dev = cmd->device;
660		struct esp_lun_data *lp = dev->hostdata;
661
662		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
663			ent->tag[0] = 0;
664			ent->tag[1] = 0;
665			return ent;
666		}
667
668		if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
669			ent->tag[0] = 0;
670			ent->tag[1] = 0;
671		}
672
673		if (esp_alloc_lun_tag(ent, lp) < 0)
674			continue;
675
676		return ent;
677	}
678
679	return NULL;
680}
681
682static void esp_maybe_execute_command(struct esp *esp)
683{
684	struct esp_target_data *tp;
685	struct esp_lun_data *lp;
686	struct scsi_device *dev;
687	struct scsi_cmnd *cmd;
688	struct esp_cmd_entry *ent;
689	int tgt, lun, i;
690	u32 val, start_cmd;
691	u8 *p;
692
693	if (esp->active_cmd ||
694	    (esp->flags & ESP_FLAG_RESETTING))
695		return;
696
697	ent = find_and_prep_issuable_command(esp);
698	if (!ent)
699		return;
700
701	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
702		esp_autosense(esp, ent);
703		return;
704	}
705
706	cmd = ent->cmd;
707	dev = cmd->device;
708	tgt = dev->id;
709	lun = dev->lun;
710	tp = &esp->target[tgt];
711	lp = dev->hostdata;
712
713	list_del(&ent->list);
714	list_add(&ent->list, &esp->active_cmds);
715
716	esp->active_cmd = ent;
717
718	esp_map_dma(esp, cmd);
719	esp_save_pointers(esp, ent);
720
721	esp_check_command_len(esp, cmd);
722
723	p = esp->command_block;
724
725	esp->msg_out_len = 0;
726	if (tp->flags & ESP_TGT_CHECK_NEGO) {
727		/* Need to negotiate.  If the target is broken
728		 * go for synchronous transfers and non-wide.
729		 */
730		if (tp->flags & ESP_TGT_BROKEN) {
731			tp->flags &= ~ESP_TGT_DISCONNECT;
732			tp->nego_goal_period = 0;
733			tp->nego_goal_offset = 0;
734			tp->nego_goal_width = 0;
735			tp->nego_goal_tags = 0;
736		}
737
738		/* If the settings are not changing, skip this.  */
739		if (spi_width(tp->starget) == tp->nego_goal_width &&
740		    spi_period(tp->starget) == tp->nego_goal_period &&
741		    spi_offset(tp->starget) == tp->nego_goal_offset) {
742			tp->flags &= ~ESP_TGT_CHECK_NEGO;
743			goto build_identify;
744		}
745
746		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
747			esp->msg_out_len =
748				spi_populate_width_msg(&esp->msg_out[0],
749						       (tp->nego_goal_width ?
750							1 : 0));
751			tp->flags |= ESP_TGT_NEGO_WIDE;
752		} else if (esp_need_to_nego_sync(tp)) {
753			esp->msg_out_len =
754				spi_populate_sync_msg(&esp->msg_out[0],
755						      tp->nego_goal_period,
756						      tp->nego_goal_offset);
757			tp->flags |= ESP_TGT_NEGO_SYNC;
758		} else {
759			tp->flags &= ~ESP_TGT_CHECK_NEGO;
760		}
761
762		/* Process it like a slow command.  */
763		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
764			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
765	}
766
767build_identify:
768	/* If we don't have a lun-data struct yet, we're probing
769	 * so do not disconnect.  Also, do not disconnect unless
770	 * we have a tag on this command.
771	 */
772	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
773		*p++ = IDENTIFY(1, lun);
774	else
775		*p++ = IDENTIFY(0, lun);
776
777	if (ent->tag[0] && esp->rev == ESP100) {
778		/* ESP100 lacks select w/atn3 command, use select
779		 * and stop instead.
780		 */
781		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
782	}
783
784	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
785		start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
786		if (ent->tag[0]) {
787			*p++ = ent->tag[0];
788			*p++ = ent->tag[1];
789
790			start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
791		}
792
793		for (i = 0; i < cmd->cmd_len; i++)
794			*p++ = cmd->cmnd[i];
795
796		esp->select_state = ESP_SELECT_BASIC;
797	} else {
798		esp->cmd_bytes_left = cmd->cmd_len;
799		esp->cmd_bytes_ptr = &cmd->cmnd[0];
800
801		if (ent->tag[0]) {
802			for (i = esp->msg_out_len - 1;
803			     i >= 0; i--)
804				esp->msg_out[i + 2] = esp->msg_out[i];
805			esp->msg_out[0] = ent->tag[0];
806			esp->msg_out[1] = ent->tag[1];
807			esp->msg_out_len += 2;
808		}
809
810		start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
811		esp->select_state = ESP_SELECT_MSGOUT;
812	}
813	val = tgt;
814	if (esp->rev == FASHME)
815		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
816	esp_write8(val, ESP_BUSID);
817
818	esp_write_tgt_sync(esp, tgt);
819	esp_write_tgt_config3(esp, tgt);
820
821	val = (p - esp->command_block);
822
823	if (esp_debug & ESP_DEBUG_SCSICMD) {
824		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
825		for (i = 0; i < cmd->cmd_len; i++)
826			printk("%02x ", cmd->cmnd[i]);
827		printk("]\n");
828	}
829
830	if (esp->rev == FASHME)
831		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
832	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
833			       val, 16, 0, start_cmd);
834}
835
836static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
837{
838	struct list_head *head = &esp->esp_cmd_pool;
839	struct esp_cmd_entry *ret;
840
841	if (list_empty(head)) {
842		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
843	} else {
844		ret = list_entry(head->next, struct esp_cmd_entry, list);
845		list_del(&ret->list);
846		memset(ret, 0, sizeof(*ret));
847	}
848	return ret;
849}
850
851static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
852{
853	list_add(&ent->list, &esp->esp_cmd_pool);
854}
855
856static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
857			    struct scsi_cmnd *cmd, unsigned int result)
858{
859	struct scsi_device *dev = cmd->device;
860	int tgt = dev->id;
861	int lun = dev->lun;
862
863	esp->active_cmd = NULL;
864	esp_unmap_dma(esp, cmd);
865	esp_free_lun_tag(ent, dev->hostdata);
866	cmd->result = result;
867
868	if (ent->eh_done) {
869		complete(ent->eh_done);
870		ent->eh_done = NULL;
871	}
872
873	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
874		esp->ops->unmap_single(esp, ent->sense_dma,
875				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
876		ent->sense_ptr = NULL;
877
878		/* Restore the message/status bytes to what we actually
879		 * saw originally.  Also, report that we are providing
880		 * the sense data.
881		 */
882		cmd->result = ((DRIVER_SENSE << 24) |
883			       (DID_OK << 16) |
884			       (COMMAND_COMPLETE << 8) |
885			       (SAM_STAT_CHECK_CONDITION << 0));
886
887		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
888		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
889			int i;
890
891			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
892			       esp->host->unique_id, tgt, lun);
893			for (i = 0; i < 18; i++)
894				printk("%02x ", cmd->sense_buffer[i]);
895			printk("]\n");
896		}
897	}
898
899	cmd->scsi_done(cmd);
900
901	list_del(&ent->list);
902	esp_put_ent(esp, ent);
903
904	esp_maybe_execute_command(esp);
905}
906
907static unsigned int compose_result(unsigned int status, unsigned int message,
908				   unsigned int driver_code)
909{
910	return (status | (message << 8) | (driver_code << 16));
911}
912
913static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
914{
915	struct scsi_device *dev = ent->cmd->device;
916	struct esp_lun_data *lp = dev->hostdata;
917
918	scsi_track_queue_full(dev, lp->num_tagged - 1);
919}
920
921static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
922{
923	struct scsi_device *dev = cmd->device;
924	struct esp *esp = host_to_esp(dev->host);
925	struct esp_cmd_priv *spriv;
926	struct esp_cmd_entry *ent;
927
928	ent = esp_get_ent(esp);
929	if (!ent)
930		return SCSI_MLQUEUE_HOST_BUSY;
931
932	ent->cmd = cmd;
933
934	cmd->scsi_done = done;
935
936	spriv = ESP_CMD_PRIV(cmd);
937	spriv->u.dma_addr = ~(dma_addr_t)0x0;
938
939	list_add_tail(&ent->list, &esp->queued_cmds);
940
941	esp_maybe_execute_command(esp);
942
943	return 0;
944}
945
946static int esp_check_gross_error(struct esp *esp)
947{
948	if (esp->sreg & ESP_STAT_SPAM) {
949		/* Gross Error, could be one of:
950		 * - top of fifo overwritten
951		 * - top of command register overwritten
952		 * - DMA programmed with wrong direction
953		 * - improper phase change
954		 */
955		printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
956		       esp->host->unique_id, esp->sreg);
957		return 1;
958	}
959	return 0;
960}
961
962static int esp_check_spur_intr(struct esp *esp)
963{
964	switch (esp->rev) {
965	case ESP100:
966	case ESP100A:
967		/* The interrupt pending bit of the status register cannot
968		 * be trusted on these revisions.
969		 */
970		esp->sreg &= ~ESP_STAT_INTR;
971		break;
972
973	default:
974		if (!(esp->sreg & ESP_STAT_INTR)) {
975			esp->ireg = esp_read8(ESP_INTRPT);
976			if (esp->ireg & ESP_INTR_SR)
977				return 1;
978
979			/* If the DMA is indicating interrupt pending and the
980			 * ESP is not, the only possibility is a DMA error.
981			 */
982			if (!esp->ops->dma_error(esp)) {
983				printk(KERN_ERR PFX "esp%d: Spurious irq, "
984				       "sreg=%x.\n",
985				       esp->host->unique_id, esp->sreg);
986				return -1;
987			}
988
989			printk(KERN_ERR PFX "esp%d: DMA error\n",
990			       esp->host->unique_id);
991
992			return -1;
993		}
994		break;
995	}
996
997	return 0;
998}
999
1000static void esp_schedule_reset(struct esp *esp)
1001{
1002	esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1003		      __builtin_return_address(0));
1004	esp->flags |= ESP_FLAG_RESETTING;
1005	esp_event(esp, ESP_EVENT_RESET);
1006}
1007
1008/* In order to avoid having to add a special half-reconnected state
1009 * into the driver we just sit here and poll through the rest of
1010 * the reselection process to get the tag message bytes.
1011 */
1012static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1013						    struct esp_lun_data *lp)
1014{
1015	struct esp_cmd_entry *ent;
1016	int i;
1017
1018	if (!lp->num_tagged) {
1019		printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1020		       esp->host->unique_id);
1021		return NULL;
1022	}
1023
1024	esp_log_reconnect("ESP: reconnect tag, ");
1025
1026	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1027		if (esp->ops->irq_pending(esp))
1028			break;
1029	}
1030	if (i == ESP_QUICKIRQ_LIMIT) {
1031		printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1032		       esp->host->unique_id);
1033		return NULL;
1034	}
1035
1036	esp->sreg = esp_read8(ESP_STATUS);
1037	esp->ireg = esp_read8(ESP_INTRPT);
1038
1039	esp_log_reconnect("IRQ(%d:%x:%x), ",
1040			  i, esp->ireg, esp->sreg);
1041
1042	if (esp->ireg & ESP_INTR_DC) {
1043		printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1044		       esp->host->unique_id);
1045		return NULL;
1046	}
1047
1048	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1049		printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1050		       esp->host->unique_id, esp->sreg);
1051		return NULL;
1052	}
1053
1054	/* DMA in the tag bytes... */
1055	esp->command_block[0] = 0xff;
1056	esp->command_block[1] = 0xff;
1057	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1058			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1059
1060	/* ACK the msssage.  */
1061	scsi_esp_cmd(esp, ESP_CMD_MOK);
1062
1063	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1064		if (esp->ops->irq_pending(esp)) {
1065			esp->sreg = esp_read8(ESP_STATUS);
1066			esp->ireg = esp_read8(ESP_INTRPT);
1067			if (esp->ireg & ESP_INTR_FDONE)
1068				break;
1069		}
1070		udelay(1);
1071	}
1072	if (i == ESP_RESELECT_TAG_LIMIT) {
1073		printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1074		       esp->host->unique_id);
1075		return NULL;
1076	}
1077	esp->ops->dma_drain(esp);
1078	esp->ops->dma_invalidate(esp);
1079
1080	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1081			  i, esp->ireg, esp->sreg,
1082			  esp->command_block[0],
1083			  esp->command_block[1]);
1084
1085	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1086	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1087		printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1088		       "type %02x.\n",
1089		       esp->host->unique_id, esp->command_block[0]);
1090		return NULL;
1091	}
1092
1093	ent = lp->tagged_cmds[esp->command_block[1]];
1094	if (!ent) {
1095		printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1096		       "tag %02x.\n",
1097		       esp->host->unique_id, esp->command_block[1]);
1098		return NULL;
1099	}
1100
1101	return ent;
1102}
1103
1104static int esp_reconnect(struct esp *esp)
1105{
1106	struct esp_cmd_entry *ent;
1107	struct esp_target_data *tp;
1108	struct esp_lun_data *lp;
1109	struct scsi_device *dev;
1110	int target, lun;
1111
1112	BUG_ON(esp->active_cmd);
1113	if (esp->rev == FASHME) {
1114		/* FASHME puts the target and lun numbers directly
1115		 * into the fifo.
1116		 */
1117		target = esp->fifo[0];
1118		lun = esp->fifo[1] & 0x7;
1119	} else {
1120		u8 bits = esp_read8(ESP_FDATA);
1121
1122		/* Older chips put the lun directly into the fifo, but
1123		 * the target is given as a sample of the arbitration
1124		 * lines on the bus at reselection time.  So we should
1125		 * see the ID of the ESP and the one reconnecting target
1126		 * set in the bitmap.
1127		 */
1128		if (!(bits & esp->scsi_id_mask))
1129			goto do_reset;
1130		bits &= ~esp->scsi_id_mask;
1131		if (!bits || (bits & (bits - 1)))
1132			goto do_reset;
1133
1134		target = ffs(bits) - 1;
1135		lun = (esp_read8(ESP_FDATA) & 0x7);
1136
1137		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1138		if (esp->rev == ESP100) {
1139			u8 ireg = esp_read8(ESP_INTRPT);
1140			/* This chip has a bug during reselection that can
1141			 * cause a spurious illegal-command interrupt, which
1142			 * we simply ACK here.  Another possibility is a bus
1143			 * reset so we must check for that.
1144			 */
1145			if (ireg & ESP_INTR_SR)
1146				goto do_reset;
1147		}
1148		scsi_esp_cmd(esp, ESP_CMD_NULL);
1149	}
1150
1151	esp_write_tgt_sync(esp, target);
1152	esp_write_tgt_config3(esp, target);
1153
1154	scsi_esp_cmd(esp, ESP_CMD_MOK);
1155
1156	if (esp->rev == FASHME)
1157		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1158			   ESP_BUSID);
1159
1160	tp = &esp->target[target];
1161	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1162	if (!dev) {
1163		printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1164		       "tgt[%u] lun[%u]\n",
1165		       esp->host->unique_id, target, lun);
1166		goto do_reset;
1167	}
1168	lp = dev->hostdata;
1169
1170	ent = lp->non_tagged_cmd;
1171	if (!ent) {
1172		ent = esp_reconnect_with_tag(esp, lp);
1173		if (!ent)
1174			goto do_reset;
1175	}
1176
1177	esp->active_cmd = ent;
1178
1179	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1180		esp->msg_out[0] = ABORT_TASK_SET;
1181		esp->msg_out_len = 1;
1182		scsi_esp_cmd(esp, ESP_CMD_SATN);
1183	}
1184
1185	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1186	esp_restore_pointers(esp, ent);
1187	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1188	return 1;
1189
1190do_reset:
1191	esp_schedule_reset(esp);
1192	return 0;
1193}
1194
1195static int esp_finish_select(struct esp *esp)
1196{
1197	struct esp_cmd_entry *ent;
1198	struct scsi_cmnd *cmd;
1199	u8 orig_select_state;
1200
1201	orig_select_state = esp->select_state;
1202
1203	/* No longer selecting.  */
1204	esp->select_state = ESP_SELECT_NONE;
1205
1206	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1207	ent = esp->active_cmd;
1208	cmd = ent->cmd;
1209
1210	if (esp->ops->dma_error(esp)) {
1211		/* If we see a DMA error during or as a result of selection,
1212		 * all bets are off.
1213		 */
1214		esp_schedule_reset(esp);
1215		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1216		return 0;
1217	}
1218
1219	esp->ops->dma_invalidate(esp);
1220
1221	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1222		struct esp_target_data *tp = &esp->target[cmd->device->id];
1223
1224		/* Carefully back out of the selection attempt.  Release
1225		 * resources (such as DMA mapping & TAG) and reset state (such
1226		 * as message out and command delivery variables).
1227		 */
1228		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1229			esp_unmap_dma(esp, cmd);
1230			esp_free_lun_tag(ent, cmd->device->hostdata);
1231			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1232			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1233			esp->cmd_bytes_ptr = NULL;
1234			esp->cmd_bytes_left = 0;
1235		} else {
1236			esp->ops->unmap_single(esp, ent->sense_dma,
1237					       SCSI_SENSE_BUFFERSIZE,
1238					       DMA_FROM_DEVICE);
1239			ent->sense_ptr = NULL;
1240		}
1241
1242		/* Now that the state is unwound properly, put back onto
1243		 * the issue queue.  This command is no longer active.
1244		 */
1245		list_del(&ent->list);
1246		list_add(&ent->list, &esp->queued_cmds);
1247		esp->active_cmd = NULL;
1248
1249		/* Return value ignored by caller, it directly invokes
1250		 * esp_reconnect().
1251		 */
1252		return 0;
1253	}
1254
1255	if (esp->ireg == ESP_INTR_DC) {
1256		struct scsi_device *dev = cmd->device;
1257
1258		/* Disconnect.  Make sure we re-negotiate sync and
1259		 * wide parameters if this target starts responding
1260		 * again in the future.
1261		 */
1262		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1263
1264		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1265		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1266		return 1;
1267	}
1268
1269	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1270		/* Selection successful.  On pre-FAST chips we have
1271		 * to do a NOP and possibly clean out the FIFO.
1272		 */
1273		if (esp->rev <= ESP236) {
1274			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1275
1276			scsi_esp_cmd(esp, ESP_CMD_NULL);
1277
1278			if (!fcnt &&
1279			    (!esp->prev_soff ||
1280			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1281				esp_flush_fifo(esp);
1282		}
1283
1284		/* If we are doing a slow command, negotiation, etc.
1285		 * we'll do the right thing as we transition to the
1286		 * next phase.
1287		 */
1288		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1289		return 0;
1290	}
1291
1292	printk("ESP: Unexpected selection completion ireg[%x].\n",
1293	       esp->ireg);
1294	esp_schedule_reset(esp);
1295	return 0;
1296}
1297
1298static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1299			       struct scsi_cmnd *cmd)
1300{
1301	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1302
1303	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1304	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1305		fifo_cnt <<= 1;
1306
1307	ecount = 0;
1308	if (!(esp->sreg & ESP_STAT_TCNT)) {
1309		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1310			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1311		if (esp->rev == FASHME)
1312			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1313	}
1314
1315	bytes_sent = esp->data_dma_len;
1316	bytes_sent -= ecount;
1317
1318	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1319		bytes_sent -= fifo_cnt;
1320
1321	flush_fifo = 0;
1322	if (!esp->prev_soff) {
1323		/* Synchronous data transfer, always flush fifo. */
1324		flush_fifo = 1;
1325	} else {
1326		if (esp->rev == ESP100) {
1327			u32 fflags, phase;
1328
1329			/* ESP100 has a chip bug where in the synchronous data
1330			 * phase it can mistake a final long REQ pulse from the
1331			 * target as an extra data byte.  Fun.
1332			 *
1333			 * To detect this case we resample the status register
1334			 * and fifo flags.  If we're still in a data phase and
1335			 * we see spurious chunks in the fifo, we return error
1336			 * to the caller which should reset and set things up
1337			 * such that we only try future transfers to this
1338			 * target in synchronous mode.
1339			 */
1340			esp->sreg = esp_read8(ESP_STATUS);
1341			phase = esp->sreg & ESP_STAT_PMASK;
1342			fflags = esp_read8(ESP_FFLAGS);
1343
1344			if ((phase == ESP_DOP &&
1345			     (fflags & ESP_FF_ONOTZERO)) ||
1346			    (phase == ESP_DIP &&
1347			     (fflags & ESP_FF_FBYTES)))
1348				return -1;
1349		}
1350		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1351			flush_fifo = 1;
1352	}
1353
1354	if (flush_fifo)
1355		esp_flush_fifo(esp);
1356
1357	return bytes_sent;
1358}
1359
1360static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1361			u8 scsi_period, u8 scsi_offset,
1362			u8 esp_stp, u8 esp_soff)
1363{
1364	spi_period(tp->starget) = scsi_period;
1365	spi_offset(tp->starget) = scsi_offset;
1366	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1367
1368	if (esp_soff) {
1369		esp_stp &= 0x1f;
1370		esp_soff |= esp->radelay;
1371		if (esp->rev >= FAS236) {
1372			u8 bit = ESP_CONFIG3_FSCSI;
1373			if (esp->rev >= FAS100A)
1374				bit = ESP_CONFIG3_FAST;
1375
1376			if (scsi_period < 50) {
1377				if (esp->rev == FASHME)
1378					esp_soff &= ~esp->radelay;
1379				tp->esp_config3 |= bit;
1380			} else {
1381				tp->esp_config3 &= ~bit;
1382			}
1383			esp->prev_cfg3 = tp->esp_config3;
1384			esp_write8(esp->prev_cfg3, ESP_CFG3);
1385		}
1386	}
1387
1388	tp->esp_period = esp->prev_stp = esp_stp;
1389	tp->esp_offset = esp->prev_soff = esp_soff;
1390
1391	esp_write8(esp_soff, ESP_SOFF);
1392	esp_write8(esp_stp, ESP_STP);
1393
1394	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1395
1396	spi_display_xfer_agreement(tp->starget);
1397}
1398
1399static void esp_msgin_reject(struct esp *esp)
1400{
1401	struct esp_cmd_entry *ent = esp->active_cmd;
1402	struct scsi_cmnd *cmd = ent->cmd;
1403	struct esp_target_data *tp;
1404	int tgt;
1405
1406	tgt = cmd->device->id;
1407	tp = &esp->target[tgt];
1408
1409	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1410		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1411
1412		if (!esp_need_to_nego_sync(tp)) {
1413			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1414			scsi_esp_cmd(esp, ESP_CMD_RATN);
1415		} else {
1416			esp->msg_out_len =
1417				spi_populate_sync_msg(&esp->msg_out[0],
1418						      tp->nego_goal_period,
1419						      tp->nego_goal_offset);
1420			tp->flags |= ESP_TGT_NEGO_SYNC;
1421			scsi_esp_cmd(esp, ESP_CMD_SATN);
1422		}
1423		return;
1424	}
1425
1426	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1427		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1428		tp->esp_period = 0;
1429		tp->esp_offset = 0;
1430		esp_setsync(esp, tp, 0, 0, 0, 0);
1431		scsi_esp_cmd(esp, ESP_CMD_RATN);
1432		return;
1433	}
1434
1435	esp->msg_out[0] = ABORT_TASK_SET;
1436	esp->msg_out_len = 1;
1437	scsi_esp_cmd(esp, ESP_CMD_SATN);
1438}
1439
1440static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1441{
1442	u8 period = esp->msg_in[3];
1443	u8 offset = esp->msg_in[4];
1444	u8 stp;
1445
1446	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1447		goto do_reject;
1448
1449	if (offset > 15)
1450		goto do_reject;
1451
1452	if (offset) {
1453		int rounded_up, one_clock;
1454
1455		if (period > esp->max_period) {
1456			period = offset = 0;
1457			goto do_sdtr;
1458		}
1459		if (period < esp->min_period)
1460			goto do_reject;
1461
1462		one_clock = esp->ccycle / 1000;
1463		rounded_up = (period << 2);
1464		rounded_up = (rounded_up + one_clock - 1) / one_clock;
1465		stp = rounded_up;
1466		if (stp && esp->rev >= FAS236) {
1467			if (stp >= 50)
1468				stp--;
1469		}
1470	} else {
1471		stp = 0;
1472	}
1473
1474	esp_setsync(esp, tp, period, offset, stp, offset);
1475	return;
1476
1477do_reject:
1478	esp->msg_out[0] = MESSAGE_REJECT;
1479	esp->msg_out_len = 1;
1480	scsi_esp_cmd(esp, ESP_CMD_SATN);
1481	return;
1482
1483do_sdtr:
1484	tp->nego_goal_period = period;
1485	tp->nego_goal_offset = offset;
1486	esp->msg_out_len =
1487		spi_populate_sync_msg(&esp->msg_out[0],
1488				      tp->nego_goal_period,
1489				      tp->nego_goal_offset);
1490	scsi_esp_cmd(esp, ESP_CMD_SATN);
1491}
1492
1493static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1494{
1495	int size = 8 << esp->msg_in[3];
1496	u8 cfg3;
1497
1498	if (esp->rev != FASHME)
1499		goto do_reject;
1500
1501	if (size != 8 && size != 16)
1502		goto do_reject;
1503
1504	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1505		goto do_reject;
1506
1507	cfg3 = tp->esp_config3;
1508	if (size == 16) {
1509		tp->flags |= ESP_TGT_WIDE;
1510		cfg3 |= ESP_CONFIG3_EWIDE;
1511	} else {
1512		tp->flags &= ~ESP_TGT_WIDE;
1513		cfg3 &= ~ESP_CONFIG3_EWIDE;
1514	}
1515	tp->esp_config3 = cfg3;
1516	esp->prev_cfg3 = cfg3;
1517	esp_write8(cfg3, ESP_CFG3);
1518
1519	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1520
1521	spi_period(tp->starget) = 0;
1522	spi_offset(tp->starget) = 0;
1523	if (!esp_need_to_nego_sync(tp)) {
1524		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1525		scsi_esp_cmd(esp, ESP_CMD_RATN);
1526	} else {
1527		esp->msg_out_len =
1528			spi_populate_sync_msg(&esp->msg_out[0],
1529					      tp->nego_goal_period,
1530					      tp->nego_goal_offset);
1531		tp->flags |= ESP_TGT_NEGO_SYNC;
1532		scsi_esp_cmd(esp, ESP_CMD_SATN);
1533	}
1534	return;
1535
1536do_reject:
1537	esp->msg_out[0] = MESSAGE_REJECT;
1538	esp->msg_out_len = 1;
1539	scsi_esp_cmd(esp, ESP_CMD_SATN);
1540}
1541
1542static void esp_msgin_extended(struct esp *esp)
1543{
1544	struct esp_cmd_entry *ent = esp->active_cmd;
1545	struct scsi_cmnd *cmd = ent->cmd;
1546	struct esp_target_data *tp;
1547	int tgt = cmd->device->id;
1548
1549	tp = &esp->target[tgt];
1550	if (esp->msg_in[2] == EXTENDED_SDTR) {
1551		esp_msgin_sdtr(esp, tp);
1552		return;
1553	}
1554	if (esp->msg_in[2] == EXTENDED_WDTR) {
1555		esp_msgin_wdtr(esp, tp);
1556		return;
1557	}
1558
1559	printk("ESP: Unexpected extended msg type %x\n",
1560	       esp->msg_in[2]);
1561
1562	esp->msg_out[0] = ABORT_TASK_SET;
1563	esp->msg_out_len = 1;
1564	scsi_esp_cmd(esp, ESP_CMD_SATN);
1565}
1566
1567/* Analyze msgin bytes received from target so far.  Return non-zero
1568 * if there are more bytes needed to complete the message.
1569 */
1570static int esp_msgin_process(struct esp *esp)
1571{
1572	u8 msg0 = esp->msg_in[0];
1573	int len = esp->msg_in_len;
1574
1575	if (msg0 & 0x80) {
1576		/* Identify */
1577		printk("ESP: Unexpected msgin identify\n");
1578		return 0;
1579	}
1580
1581	switch (msg0) {
1582	case EXTENDED_MESSAGE:
1583		if (len == 1)
1584			return 1;
1585		if (len < esp->msg_in[1] + 2)
1586			return 1;
1587		esp_msgin_extended(esp);
1588		return 0;
1589
1590	case IGNORE_WIDE_RESIDUE: {
1591		struct esp_cmd_entry *ent;
1592		struct esp_cmd_priv *spriv;
1593		if (len == 1)
1594			return 1;
1595
1596		if (esp->msg_in[1] != 1)
1597			goto do_reject;
1598
1599		ent = esp->active_cmd;
1600		spriv = ESP_CMD_PRIV(ent->cmd);
1601
1602		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1603			spriv->cur_sg--;
1604			spriv->cur_residue = 1;
1605		} else
1606			spriv->cur_residue++;
1607		spriv->tot_residue++;
1608		return 0;
1609	}
1610	case NOP:
1611		return 0;
1612	case RESTORE_POINTERS:
1613		esp_restore_pointers(esp, esp->active_cmd);
1614		return 0;
1615	case SAVE_POINTERS:
1616		esp_save_pointers(esp, esp->active_cmd);
1617		return 0;
1618
1619	case COMMAND_COMPLETE:
1620	case DISCONNECT: {
1621		struct esp_cmd_entry *ent = esp->active_cmd;
1622
1623		ent->message = msg0;
1624		esp_event(esp, ESP_EVENT_FREE_BUS);
1625		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1626		return 0;
1627	}
1628	case MESSAGE_REJECT:
1629		esp_msgin_reject(esp);
1630		return 0;
1631
1632	default:
1633	do_reject:
1634		esp->msg_out[0] = MESSAGE_REJECT;
1635		esp->msg_out_len = 1;
1636		scsi_esp_cmd(esp, ESP_CMD_SATN);
1637		return 0;
1638	}
1639}
1640
1641static int esp_process_event(struct esp *esp)
1642{
1643	int write;
1644
1645again:
1646	write = 0;
1647	switch (esp->event) {
1648	case ESP_EVENT_CHECK_PHASE:
1649		switch (esp->sreg & ESP_STAT_PMASK) {
1650		case ESP_DOP:
1651			esp_event(esp, ESP_EVENT_DATA_OUT);
1652			break;
1653		case ESP_DIP:
1654			esp_event(esp, ESP_EVENT_DATA_IN);
1655			break;
1656		case ESP_STATP:
1657			esp_flush_fifo(esp);
1658			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1659			esp_event(esp, ESP_EVENT_STATUS);
1660			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1661			return 1;
1662
1663		case ESP_MOP:
1664			esp_event(esp, ESP_EVENT_MSGOUT);
1665			break;
1666
1667		case ESP_MIP:
1668			esp_event(esp, ESP_EVENT_MSGIN);
1669			break;
1670
1671		case ESP_CMDP:
1672			esp_event(esp, ESP_EVENT_CMD_START);
1673			break;
1674
1675		default:
1676			printk("ESP: Unexpected phase, sreg=%02x\n",
1677			       esp->sreg);
1678			esp_schedule_reset(esp);
1679			return 0;
1680		}
1681		goto again;
1682		break;
1683
1684	case ESP_EVENT_DATA_IN:
1685		write = 1;
1686		/* fallthru */
1687
1688	case ESP_EVENT_DATA_OUT: {
1689		struct esp_cmd_entry *ent = esp->active_cmd;
1690		struct scsi_cmnd *cmd = ent->cmd;
1691		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1692		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1693
1694		if (esp->rev == ESP100)
1695			scsi_esp_cmd(esp, ESP_CMD_NULL);
1696
1697		if (write)
1698			ent->flags |= ESP_CMD_FLAG_WRITE;
1699		else
1700			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1701
1702		dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1703		esp->data_dma_len = dma_len;
1704
1705		if (!dma_len) {
1706			printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1707			       esp->host->unique_id);
1708			printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1709			       esp->host->unique_id,
1710			       (unsigned long long)esp_cur_dma_addr(ent, cmd),
1711			       esp_cur_dma_len(ent, cmd));
1712			esp_schedule_reset(esp);
1713			return 0;
1714		}
1715
1716		esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1717				  "write(%d)\n",
1718				  (unsigned long long)dma_addr, dma_len, write);
1719
1720		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1721				       write, ESP_CMD_DMA | ESP_CMD_TI);
1722		esp_event(esp, ESP_EVENT_DATA_DONE);
1723		break;
1724	}
1725	case ESP_EVENT_DATA_DONE: {
1726		struct esp_cmd_entry *ent = esp->active_cmd;
1727		struct scsi_cmnd *cmd = ent->cmd;
1728		int bytes_sent;
1729
1730		if (esp->ops->dma_error(esp)) {
1731			printk("ESP: data done, DMA error, resetting\n");
1732			esp_schedule_reset(esp);
1733			return 0;
1734		}
1735
1736		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1737
1738			esp->ops->dma_drain(esp);
1739		}
1740		esp->ops->dma_invalidate(esp);
1741
1742		if (esp->ireg != ESP_INTR_BSERV) {
1743			/* We should always see exactly a bus-service
1744			 * interrupt at the end of a successful transfer.
1745			 */
1746			printk("ESP: data done, not BSERV, resetting\n");
1747			esp_schedule_reset(esp);
1748			return 0;
1749		}
1750
1751		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1752
1753		esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1754				 ent->flags, bytes_sent);
1755
1756		if (bytes_sent < 0) {
1757			esp_schedule_reset(esp);
1758			return 0;
1759		}
1760
1761		esp_advance_dma(esp, ent, cmd, bytes_sent);
1762		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1763		goto again;
1764		break;
1765	}
1766
1767	case ESP_EVENT_STATUS: {
1768		struct esp_cmd_entry *ent = esp->active_cmd;
1769
1770		if (esp->ireg & ESP_INTR_FDONE) {
1771			ent->status = esp_read8(ESP_FDATA);
1772			ent->message = esp_read8(ESP_FDATA);
1773			scsi_esp_cmd(esp, ESP_CMD_MOK);
1774		} else if (esp->ireg == ESP_INTR_BSERV) {
1775			ent->status = esp_read8(ESP_FDATA);
1776			ent->message = 0xff;
1777			esp_event(esp, ESP_EVENT_MSGIN);
1778			return 0;
1779		}
1780
1781		if (ent->message != COMMAND_COMPLETE) {
1782			printk("ESP: Unexpected message %x in status\n",
1783			       ent->message);
1784			esp_schedule_reset(esp);
1785			return 0;
1786		}
1787
1788		esp_event(esp, ESP_EVENT_FREE_BUS);
1789		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1790		break;
1791	}
1792	case ESP_EVENT_FREE_BUS: {
1793		struct esp_cmd_entry *ent = esp->active_cmd;
1794		struct scsi_cmnd *cmd = ent->cmd;
1795
1796		if (ent->message == COMMAND_COMPLETE ||
1797		    ent->message == DISCONNECT)
1798			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1799
1800		if (ent->message == COMMAND_COMPLETE) {
1801			esp_log_cmddone("ESP: Command done status[%x] "
1802					"message[%x]\n",
1803					ent->status, ent->message);
1804			if (ent->status == SAM_STAT_TASK_SET_FULL)
1805				esp_event_queue_full(esp, ent);
1806
1807			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1808			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1809				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1810				esp_autosense(esp, ent);
1811			} else {
1812				esp_cmd_is_done(esp, ent, cmd,
1813						compose_result(ent->status,
1814							       ent->message,
1815							       DID_OK));
1816			}
1817		} else if (ent->message == DISCONNECT) {
1818			esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1819					   "tag[%x:%x]\n",
1820					   cmd->device->id,
1821					   ent->tag[0], ent->tag[1]);
1822
1823			esp->active_cmd = NULL;
1824			esp_maybe_execute_command(esp);
1825		} else {
1826			printk("ESP: Unexpected message %x in freebus\n",
1827			       ent->message);
1828			esp_schedule_reset(esp);
1829			return 0;
1830		}
1831		if (esp->active_cmd)
1832			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1833		break;
1834	}
1835	case ESP_EVENT_MSGOUT: {
1836		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1837
1838		if (esp_debug & ESP_DEBUG_MSGOUT) {
1839			int i;
1840			printk("ESP: Sending message [ ");
1841			for (i = 0; i < esp->msg_out_len; i++)
1842				printk("%02x ", esp->msg_out[i]);
1843			printk("]\n");
1844		}
1845
1846		if (esp->rev == FASHME) {
1847			int i;
1848
1849			/* Always use the fifo.  */
1850			for (i = 0; i < esp->msg_out_len; i++) {
1851				esp_write8(esp->msg_out[i], ESP_FDATA);
1852				esp_write8(0, ESP_FDATA);
1853			}
1854			scsi_esp_cmd(esp, ESP_CMD_TI);
1855		} else {
1856			if (esp->msg_out_len == 1) {
1857				esp_write8(esp->msg_out[0], ESP_FDATA);
1858				scsi_esp_cmd(esp, ESP_CMD_TI);
1859			} else {
1860				/* Use DMA. */
1861				memcpy(esp->command_block,
1862				       esp->msg_out,
1863				       esp->msg_out_len);
1864
1865				esp->ops->send_dma_cmd(esp,
1866						       esp->command_block_dma,
1867						       esp->msg_out_len,
1868						       esp->msg_out_len,
1869						       0,
1870						       ESP_CMD_DMA|ESP_CMD_TI);
1871			}
1872		}
1873		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1874		break;
1875	}
1876	case ESP_EVENT_MSGOUT_DONE:
1877		if (esp->rev == FASHME) {
1878			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1879		} else {
1880			if (esp->msg_out_len > 1)
1881				esp->ops->dma_invalidate(esp);
1882		}
1883
1884		if (!(esp->ireg & ESP_INTR_DC)) {
1885			if (esp->rev != FASHME)
1886				scsi_esp_cmd(esp, ESP_CMD_NULL);
1887		}
1888		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1889		goto again;
1890	case ESP_EVENT_MSGIN:
1891		if (esp->ireg & ESP_INTR_BSERV) {
1892			if (esp->rev == FASHME) {
1893				if (!(esp_read8(ESP_STATUS2) &
1894				      ESP_STAT2_FEMPTY))
1895					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1896			} else {
1897				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898				if (esp->rev == ESP100)
1899					scsi_esp_cmd(esp, ESP_CMD_NULL);
1900			}
1901			scsi_esp_cmd(esp, ESP_CMD_TI);
1902			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1903			return 1;
1904		}
1905		if (esp->ireg & ESP_INTR_FDONE) {
1906			u8 val;
1907
1908			if (esp->rev == FASHME)
1909				val = esp->fifo[0];
1910			else
1911				val = esp_read8(ESP_FDATA);
1912			esp->msg_in[esp->msg_in_len++] = val;
1913
1914			esp_log_msgin("ESP: Got msgin byte %x\n", val);
1915
1916			if (!esp_msgin_process(esp))
1917				esp->msg_in_len = 0;
1918
1919			if (esp->rev == FASHME)
1920				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1921
1922			scsi_esp_cmd(esp, ESP_CMD_MOK);
1923
1924			if (esp->event != ESP_EVENT_FREE_BUS)
1925				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1926		} else {
1927			printk("ESP: MSGIN neither BSERV not FDON, resetting");
1928			esp_schedule_reset(esp);
1929			return 0;
1930		}
1931		break;
1932	case ESP_EVENT_CMD_START:
1933		memcpy(esp->command_block, esp->cmd_bytes_ptr,
1934		       esp->cmd_bytes_left);
1935		if (esp->rev == FASHME)
1936			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1938				       esp->cmd_bytes_left, 16, 0,
1939				       ESP_CMD_DMA | ESP_CMD_TI);
1940		esp_event(esp, ESP_EVENT_CMD_DONE);
1941		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1942		break;
1943	case ESP_EVENT_CMD_DONE:
1944		esp->ops->dma_invalidate(esp);
1945		if (esp->ireg & ESP_INTR_BSERV) {
1946			esp_event(esp, ESP_EVENT_CHECK_PHASE);
1947			goto again;
1948		}
1949		esp_schedule_reset(esp);
1950		return 0;
1951		break;
1952
1953	case ESP_EVENT_RESET:
1954		scsi_esp_cmd(esp, ESP_CMD_RS);
1955		break;
1956
1957	default:
1958		printk("ESP: Unexpected event %x, resetting\n",
1959		       esp->event);
1960		esp_schedule_reset(esp);
1961		return 0;
1962		break;
1963	}
1964	return 1;
1965}
1966
1967static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1968{
1969	struct scsi_cmnd *cmd = ent->cmd;
1970
1971	esp_unmap_dma(esp, cmd);
1972	esp_free_lun_tag(ent, cmd->device->hostdata);
1973	cmd->result = DID_RESET << 16;
1974
1975	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1976		esp->ops->unmap_single(esp, ent->sense_dma,
1977				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1978		ent->sense_ptr = NULL;
1979	}
1980
1981	cmd->scsi_done(cmd);
1982	list_del(&ent->list);
1983	esp_put_ent(esp, ent);
1984}
1985
1986static void esp_clear_hold(struct scsi_device *dev, void *data)
1987{
1988	struct esp_lun_data *lp = dev->hostdata;
1989
1990	BUG_ON(lp->num_tagged);
1991	lp->hold = 0;
1992}
1993
1994static void esp_reset_cleanup(struct esp *esp)
1995{
1996	struct esp_cmd_entry *ent, *tmp;
1997	int i;
1998
1999	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2000		struct scsi_cmnd *cmd = ent->cmd;
2001
2002		list_del(&ent->list);
2003		cmd->result = DID_RESET << 16;
2004		cmd->scsi_done(cmd);
2005		esp_put_ent(esp, ent);
2006	}
2007
2008	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2009		if (ent == esp->active_cmd)
2010			esp->active_cmd = NULL;
2011		esp_reset_cleanup_one(esp, ent);
2012	}
2013
2014	BUG_ON(esp->active_cmd != NULL);
2015
2016	/* Force renegotiation of sync/wide transfers.  */
2017	for (i = 0; i < ESP_MAX_TARGET; i++) {
2018		struct esp_target_data *tp = &esp->target[i];
2019
2020		tp->esp_period = 0;
2021		tp->esp_offset = 0;
2022		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2023				     ESP_CONFIG3_FSCSI |
2024				     ESP_CONFIG3_FAST);
2025		tp->flags &= ~ESP_TGT_WIDE;
2026		tp->flags |= ESP_TGT_CHECK_NEGO;
2027
2028		if (tp->starget)
2029			starget_for_each_device(tp->starget, NULL,
2030						esp_clear_hold);
2031	}
2032	esp->flags &= ~ESP_FLAG_RESETTING;
2033}
2034
2035/* Runs under host->lock */
2036static void __esp_interrupt(struct esp *esp)
2037{
2038	int finish_reset, intr_done;
2039	u8 phase;
2040
2041	esp->sreg = esp_read8(ESP_STATUS);
2042
2043	if (esp->flags & ESP_FLAG_RESETTING) {
2044		finish_reset = 1;
2045	} else {
2046		if (esp_check_gross_error(esp))
2047			return;
2048
2049		finish_reset = esp_check_spur_intr(esp);
2050		if (finish_reset < 0)
2051			return;
2052	}
2053
2054	esp->ireg = esp_read8(ESP_INTRPT);
2055
2056	if (esp->ireg & ESP_INTR_SR)
2057		finish_reset = 1;
2058
2059	if (finish_reset) {
2060		esp_reset_cleanup(esp);
2061		if (esp->eh_reset) {
2062			complete(esp->eh_reset);
2063			esp->eh_reset = NULL;
2064		}
2065		return;
2066	}
2067
2068	phase = (esp->sreg & ESP_STAT_PMASK);
2069	if (esp->rev == FASHME) {
2070		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2071		     esp->select_state == ESP_SELECT_NONE &&
2072		     esp->event != ESP_EVENT_STATUS &&
2073		     esp->event != ESP_EVENT_DATA_DONE) ||
2074		    (esp->ireg & ESP_INTR_RSEL)) {
2075			esp->sreg2 = esp_read8(ESP_STATUS2);
2076			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2077			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2078				hme_read_fifo(esp);
2079		}
2080	}
2081
2082	esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2083		     "sreg2[%02x] ireg[%02x]\n",
2084		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2085
2086	intr_done = 0;
2087
2088	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2089		printk("ESP: unexpected IREG %02x\n", esp->ireg);
2090		if (esp->ireg & ESP_INTR_IC)
2091			esp_dump_cmd_log(esp);
2092
2093		esp_schedule_reset(esp);
2094	} else {
2095		if (!(esp->ireg & ESP_INTR_RSEL)) {
2096			/* Some combination of FDONE, BSERV, DC.  */
2097			if (esp->select_state != ESP_SELECT_NONE)
2098				intr_done = esp_finish_select(esp);
2099		} else if (esp->ireg & ESP_INTR_RSEL) {
2100			if (esp->active_cmd)
2101				(void) esp_finish_select(esp);
2102			intr_done = esp_reconnect(esp);
2103		}
2104	}
2105	while (!intr_done)
2106		intr_done = esp_process_event(esp);
2107}
2108
2109irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2110{
2111	struct esp *esp = dev_id;
2112	unsigned long flags;
2113	irqreturn_t ret;
2114
2115	spin_lock_irqsave(esp->host->host_lock, flags);
2116	ret = IRQ_NONE;
2117	if (esp->ops->irq_pending(esp)) {
2118		ret = IRQ_HANDLED;
2119		for (;;) {
2120			int i;
2121
2122			__esp_interrupt(esp);
2123			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2124				break;
2125			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2126
2127			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2128				if (esp->ops->irq_pending(esp))
2129					break;
2130			}
2131			if (i == ESP_QUICKIRQ_LIMIT)
2132				break;
2133		}
2134	}
2135	spin_unlock_irqrestore(esp->host->host_lock, flags);
2136
2137	return ret;
2138}
2139EXPORT_SYMBOL(scsi_esp_intr);
2140
2141static void __devinit esp_get_revision(struct esp *esp)
2142{
2143	u8 val;
2144
2145	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2146	esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2147	esp_write8(esp->config2, ESP_CFG2);
2148
2149	val = esp_read8(ESP_CFG2);
2150	val &= ~ESP_CONFIG2_MAGIC;
2151	if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2152		/* If what we write to cfg2 does not come back, cfg2 is not
2153		 * implemented, therefore this must be a plain esp100.
2154		 */
2155		esp->rev = ESP100;
2156	} else {
2157		esp->config2 = 0;
2158		esp_set_all_config3(esp, 5);
2159		esp->prev_cfg3 = 5;
2160		esp_write8(esp->config2, ESP_CFG2);
2161		esp_write8(0, ESP_CFG3);
2162		esp_write8(esp->prev_cfg3, ESP_CFG3);
2163
2164		val = esp_read8(ESP_CFG3);
2165		if (val != 5) {
2166			/* The cfg2 register is implemented, however
2167			 * cfg3 is not, must be esp100a.
2168			 */
2169			esp->rev = ESP100A;
2170		} else {
2171			esp_set_all_config3(esp, 0);
2172			esp->prev_cfg3 = 0;
2173			esp_write8(esp->prev_cfg3, ESP_CFG3);
2174
2175			/* All of cfg{1,2,3} implemented, must be one of
2176			 * the fas variants, figure out which one.
2177			 */
2178			if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2179				esp->rev = FAST;
2180				esp->sync_defp = SYNC_DEFP_FAST;
2181			} else {
2182				esp->rev = ESP236;
2183			}
2184			esp->config2 = 0;
2185			esp_write8(esp->config2, ESP_CFG2);
2186		}
2187	}
2188}
2189
2190static void __devinit esp_init_swstate(struct esp *esp)
2191{
2192	int i;
2193
2194	INIT_LIST_HEAD(&esp->queued_cmds);
2195	INIT_LIST_HEAD(&esp->active_cmds);
2196	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2197
2198	/* Start with a clear state, domain validation (via ->slave_configure,
2199	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2200	 * commands.
2201	 */
2202	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2203		esp->target[i].flags = 0;
2204		esp->target[i].nego_goal_period = 0;
2205		esp->target[i].nego_goal_offset = 0;
2206		esp->target[i].nego_goal_width = 0;
2207		esp->target[i].nego_goal_tags = 0;
2208	}
2209}
2210
2211/* This places the ESP into a known state at boot time. */
2212static void esp_bootup_reset(struct esp *esp)
2213{
2214	u8 val;
2215
2216	/* Reset the DMA */
2217	esp->ops->reset_dma(esp);
2218
2219	/* Reset the ESP */
2220	esp_reset_esp(esp);
2221
2222	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2223	val = esp_read8(ESP_CFG1);
2224	val |= ESP_CONFIG1_SRRDISAB;
2225	esp_write8(val, ESP_CFG1);
2226
2227	scsi_esp_cmd(esp, ESP_CMD_RS);
2228	udelay(400);
2229
2230	esp_write8(esp->config1, ESP_CFG1);
2231
2232	/* Eat any bitrot in the chip and we are done... */
2233	esp_read8(ESP_INTRPT);
2234}
2235
2236static void __devinit esp_set_clock_params(struct esp *esp)
2237{
2238	int fmhz;
2239	u8 ccf;
2240
2241	/* This is getting messy but it has to be done correctly or else
2242	 * you get weird behavior all over the place.  We are trying to
2243	 * basically figure out three pieces of information.
2244	 *
2245	 * a) Clock Conversion Factor
2246	 *
2247	 *    This is a representation of the input crystal clock frequency
2248	 *    going into the ESP on this machine.  Any operation whose timing
2249	 *    is longer than 400ns depends on this value being correct.  For
2250	 *    example, you'll get blips for arbitration/selection during high
2251	 *    load or with multiple targets if this is not set correctly.
2252	 *
2253	 * b) Selection Time-Out
2254	 *
2255	 *    The ESP isn't very bright and will arbitrate for the bus and try
2256	 *    to select a target forever if you let it.  This value tells the
2257	 *    ESP when it has taken too long to negotiate and that it should
2258	 *    interrupt the CPU so we can see what happened.  The value is
2259	 *    computed as follows (from NCR/Symbios chip docs).
2260	 *
2261	 *          (Time Out Period) *  (Input Clock)
2262	 *    STO = ----------------------------------
2263	 *          (8192) * (Clock Conversion Factor)
2264	 *
2265	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2266	 *
2267	 * c) Imperical constants for synchronous offset and transfer period
2268         *    register values
2269	 *
2270	 *    This entails the smallest and largest sync period we could ever
2271	 *    handle on this ESP.
2272	 */
2273	fmhz = esp->cfreq;
2274
2275	ccf = ((fmhz / 1000000) + 4) / 5;
2276	if (ccf == 1)
2277		ccf = 2;
2278
2279	/* If we can't find anything reasonable, just assume 20MHZ.
2280	 * This is the clock frequency of the older sun4c's where I've
2281	 * been unable to find the clock-frequency PROM property.  All
2282	 * other machines provide useful values it seems.
2283	 */
2284	if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2285		fmhz = 20000000;
2286		ccf = 4;
2287	}
2288
2289	esp->cfact = (ccf == 8 ? 0 : ccf);
2290	esp->cfreq = fmhz;
2291	esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2292	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2293	esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2294	esp->sync_defp = SYNC_DEFP_SLOW;
2295}
2296
2297static const char *esp_chip_names[] = {
2298	"ESP100",
2299	"ESP100A",
2300	"ESP236",
2301	"FAS236",
2302	"FAS100A",
2303	"FAST",
2304	"FASHME",
2305};
2306
2307static struct scsi_transport_template *esp_transport_template;
2308
2309int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2310{
2311	static int instance;
2312	int err;
2313
2314	esp->host->transportt = esp_transport_template;
2315	esp->host->max_lun = ESP_MAX_LUN;
2316	esp->host->cmd_per_lun = 2;
2317
2318	esp_set_clock_params(esp);
2319
2320	esp_get_revision(esp);
2321
2322	esp_init_swstate(esp);
2323
2324	esp_bootup_reset(esp);
2325
2326	printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2327	       esp->host->unique_id, esp->regs, esp->dma_regs,
2328	       esp->host->irq);
2329	printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2330	       esp->host->unique_id, esp_chip_names[esp->rev],
2331	       esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2332
2333	/* Let the SCSI bus reset settle. */
2334	ssleep(esp_bus_reset_settle);
2335
2336	err = scsi_add_host(esp->host, dev);
2337	if (err)
2338		return err;
2339
2340	esp->host->unique_id = instance++;
2341
2342	scsi_scan_host(esp->host);
2343
2344	return 0;
2345}
2346EXPORT_SYMBOL(scsi_esp_register);
2347
2348void __devexit scsi_esp_unregister(struct esp *esp)
2349{
2350	scsi_remove_host(esp->host);
2351}
2352EXPORT_SYMBOL(scsi_esp_unregister);
2353
2354static int esp_slave_alloc(struct scsi_device *dev)
2355{
2356	struct esp *esp = host_to_esp(dev->host);
2357	struct esp_target_data *tp = &esp->target[dev->id];
2358	struct esp_lun_data *lp;
2359
2360	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2361	if (!lp)
2362		return -ENOMEM;
2363	dev->hostdata = lp;
2364
2365	tp->starget = dev->sdev_target;
2366
2367	spi_min_period(tp->starget) = esp->min_period;
2368	spi_max_offset(tp->starget) = 15;
2369
2370	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2371		spi_max_width(tp->starget) = 1;
2372	else
2373		spi_max_width(tp->starget) = 0;
2374
2375	return 0;
2376}
2377
2378static int esp_slave_configure(struct scsi_device *dev)
2379{
2380	struct esp *esp = host_to_esp(dev->host);
2381	struct esp_target_data *tp = &esp->target[dev->id];
2382	int goal_tags, queue_depth;
2383
2384	goal_tags = 0;
2385
2386	if (dev->tagged_supported) {
2387		goal_tags = ESP_DEFAULT_TAGS;
2388
2389		if (goal_tags > ESP_MAX_TAG)
2390			goal_tags = ESP_MAX_TAG;
2391	}
2392
2393	queue_depth = goal_tags;
2394	if (queue_depth < dev->host->cmd_per_lun)
2395		queue_depth = dev->host->cmd_per_lun;
2396
2397	if (goal_tags) {
2398		scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2399		scsi_activate_tcq(dev, queue_depth);
2400	} else {
2401		scsi_deactivate_tcq(dev, queue_depth);
2402	}
2403	tp->flags |= ESP_TGT_DISCONNECT;
2404
2405	if (!spi_initial_dv(dev->sdev_target))
2406		spi_dv_device(dev);
2407
2408	return 0;
2409}
2410
2411static void esp_slave_destroy(struct scsi_device *dev)
2412{
2413	struct esp_lun_data *lp = dev->hostdata;
2414
2415	kfree(lp);
2416	dev->hostdata = NULL;
2417}
2418
2419static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2420{
2421	struct esp *esp = host_to_esp(cmd->device->host);
2422	struct esp_cmd_entry *ent, *tmp;
2423	struct completion eh_done;
2424	unsigned long flags;
2425
2426	spin_lock_irqsave(esp->host->host_lock, flags);
2427	printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2428	       esp->host->unique_id, cmd, cmd->cmnd[0]);
2429	ent = esp->active_cmd;
2430	if (ent)
2431		printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2432		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2433	list_for_each_entry(ent, &esp->queued_cmds, list) {
2434		printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2435		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2436	}
2437	list_for_each_entry(ent, &esp->active_cmds, list) {
2438		printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2439		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2440	}
2441	esp_dump_cmd_log(esp);
2442	spin_unlock_irqrestore(esp->host->host_lock, flags);
2443
2444	spin_lock_irqsave(esp->host->host_lock, flags);
2445
2446	ent = NULL;
2447	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2448		if (tmp->cmd == cmd) {
2449			ent = tmp;
2450			break;
2451		}
2452	}
2453
2454	if (ent) {
2455		/* Easiest case, we didn't even issue the command
2456		 * yet so it is trivial to abort.
2457		 */
2458		list_del(&ent->list);
2459
2460		cmd->result = DID_ABORT << 16;
2461		cmd->scsi_done(cmd);
2462
2463		esp_put_ent(esp, ent);
2464
2465		goto out_success;
2466	}
2467
2468	init_completion(&eh_done);
2469
2470	ent = esp->active_cmd;
2471	if (ent && ent->cmd == cmd) {
2472		/* Command is the currently active command on
2473		 * the bus.  If we already have an output message
2474		 * pending, no dice.
2475		 */
2476		if (esp->msg_out_len)
2477			goto out_failure;
2478
2479		/* Send out an abort, encouraging the target to
2480		 * go to MSGOUT phase by asserting ATN.
2481		 */
2482		esp->msg_out[0] = ABORT_TASK_SET;
2483		esp->msg_out_len = 1;
2484		ent->eh_done = &eh_done;
2485
2486		scsi_esp_cmd(esp, ESP_CMD_SATN);
2487	} else {
2488		/* The command is disconnected.  This is not easy to
2489		 * abort.  For now we fail and let the scsi error
2490		 * handling layer go try a scsi bus reset or host
2491		 * reset.
2492		 *
2493		 * What we could do is put together a scsi command
2494		 * solely for the purpose of sending an abort message
2495		 * to the target.  Coming up with all the code to
2496		 * cook up scsi commands, special case them everywhere,
2497		 * etc. is for questionable gain and it would be better
2498		 * if the generic scsi error handling layer could do at
2499		 * least some of that for us.
2500		 *
2501		 * Anyways this is an area for potential future improvement
2502		 * in this driver.
2503		 */
2504		goto out_failure;
2505	}
2506
2507	spin_unlock_irqrestore(esp->host->host_lock, flags);
2508
2509	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2510		spin_lock_irqsave(esp->host->host_lock, flags);
2511		ent->eh_done = NULL;
2512		spin_unlock_irqrestore(esp->host->host_lock, flags);
2513
2514		return FAILED;
2515	}
2516
2517	return SUCCESS;
2518
2519out_success:
2520	spin_unlock_irqrestore(esp->host->host_lock, flags);
2521	return SUCCESS;
2522
2523out_failure:
2524	spin_unlock_irqrestore(esp->host->host_lock, flags);
2525	return FAILED;
2526}
2527
2528static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2529{
2530	struct esp *esp = host_to_esp(cmd->device->host);
2531	struct completion eh_reset;
2532	unsigned long flags;
2533
2534	init_completion(&eh_reset);
2535
2536	spin_lock_irqsave(esp->host->host_lock, flags);
2537
2538	esp->eh_reset = &eh_reset;
2539
2540	esp->flags |= ESP_FLAG_RESETTING;
2541	scsi_esp_cmd(esp, ESP_CMD_RS);
2542
2543	spin_unlock_irqrestore(esp->host->host_lock, flags);
2544
2545	ssleep(esp_bus_reset_settle);
2546
2547	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2548		spin_lock_irqsave(esp->host->host_lock, flags);
2549		esp->eh_reset = NULL;
2550		spin_unlock_irqrestore(esp->host->host_lock, flags);
2551
2552		return FAILED;
2553	}
2554
2555	return SUCCESS;
2556}
2557
2558/* All bets are off, reset the entire device.  */
2559static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2560{
2561	struct esp *esp = host_to_esp(cmd->device->host);
2562	unsigned long flags;
2563
2564	spin_lock_irqsave(esp->host->host_lock, flags);
2565	esp_bootup_reset(esp);
2566	esp_reset_cleanup(esp);
2567	spin_unlock_irqrestore(esp->host->host_lock, flags);
2568
2569	ssleep(esp_bus_reset_settle);
2570
2571	return SUCCESS;
2572}
2573
2574static const char *esp_info(struct Scsi_Host *host)
2575{
2576	return "esp";
2577}
2578
2579struct scsi_host_template scsi_esp_template = {
2580	.module			= THIS_MODULE,
2581	.name			= "esp",
2582	.info			= esp_info,
2583	.queuecommand		= esp_queuecommand,
2584	.slave_alloc		= esp_slave_alloc,
2585	.slave_configure	= esp_slave_configure,
2586	.slave_destroy		= esp_slave_destroy,
2587	.eh_abort_handler	= esp_eh_abort_handler,
2588	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2589	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2590	.can_queue		= 7,
2591	.this_id		= 7,
2592	.sg_tablesize		= SG_ALL,
2593	.use_clustering		= ENABLE_CLUSTERING,
2594	.max_sectors		= 0xffff,
2595	.skip_settle_delay	= 1,
2596};
2597EXPORT_SYMBOL(scsi_esp_template);
2598
2599static void esp_get_signalling(struct Scsi_Host *host)
2600{
2601	struct esp *esp = host_to_esp(host);
2602	enum spi_signal_type type;
2603
2604	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2605		type = SPI_SIGNAL_HVD;
2606	else
2607		type = SPI_SIGNAL_SE;
2608
2609	spi_signalling(host) = type;
2610}
2611
2612static void esp_set_offset(struct scsi_target *target, int offset)
2613{
2614	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2615	struct esp *esp = host_to_esp(host);
2616	struct esp_target_data *tp = &esp->target[target->id];
2617
2618	tp->nego_goal_offset = offset;
2619	tp->flags |= ESP_TGT_CHECK_NEGO;
2620}
2621
2622static void esp_set_period(struct scsi_target *target, int period)
2623{
2624	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2625	struct esp *esp = host_to_esp(host);
2626	struct esp_target_data *tp = &esp->target[target->id];
2627
2628	tp->nego_goal_period = period;
2629	tp->flags |= ESP_TGT_CHECK_NEGO;
2630}
2631
2632static void esp_set_width(struct scsi_target *target, int width)
2633{
2634	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2635	struct esp *esp = host_to_esp(host);
2636	struct esp_target_data *tp = &esp->target[target->id];
2637
2638	tp->nego_goal_width = (width ? 1 : 0);
2639	tp->flags |= ESP_TGT_CHECK_NEGO;
2640}
2641
2642static struct spi_function_template esp_transport_ops = {
2643	.set_offset		= esp_set_offset,
2644	.show_offset		= 1,
2645	.set_period		= esp_set_period,
2646	.show_period		= 1,
2647	.set_width		= esp_set_width,
2648	.show_width		= 1,
2649	.get_signalling		= esp_get_signalling,
2650};
2651
2652static int __init esp_init(void)
2653{
2654	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2655		     sizeof(struct esp_cmd_priv));
2656
2657	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2658	if (!esp_transport_template)
2659		return -ENODEV;
2660
2661	return 0;
2662}
2663
2664static void __exit esp_exit(void)
2665{
2666	spi_release_transport(esp_transport_template);
2667}
2668
2669MODULE_DESCRIPTION("ESP SCSI driver core");
2670MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2671MODULE_LICENSE("GPL");
2672MODULE_VERSION(DRV_VERSION);
2673
2674module_param(esp_bus_reset_settle, int, 0);
2675MODULE_PARM_DESC(esp_bus_reset_settle,
2676		 "ESP scsi bus reset delay in seconds");
2677
2678module_param(esp_debug, int, 0);
2679MODULE_PARM_DESC(esp_debug,
2680"ESP bitmapped debugging message enable value:\n"
2681"	0x00000001	Log interrupt events\n"
2682"	0x00000002	Log scsi commands\n"
2683"	0x00000004	Log resets\n"
2684"	0x00000008	Log message in events\n"
2685"	0x00000010	Log message out events\n"
2686"	0x00000020	Log command completion\n"
2687"	0x00000040	Log disconnects\n"
2688"	0x00000080	Log data start\n"
2689"	0x00000100	Log data done\n"
2690"	0x00000200	Log reconnects\n"
2691"	0x00000400	Log auto-sense data\n"
2692);
2693
2694module_init(esp_init);
2695module_exit(esp_exit);
2696