• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/scsi/
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/irqreturn.h>
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME		"esp"
33#define PFX DRV_MODULE_NAME	": "
34#define DRV_VERSION		"2.000"
35#define DRV_MODULE_RELDATE	"April 19, 2007"
36
37/* SCSI bus reset settle time in seconds.  */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR		0x00000001
42#define ESP_DEBUG_SCSICMD	0x00000002
43#define ESP_DEBUG_RESET		0x00000004
44#define ESP_DEBUG_MSGIN		0x00000008
45#define ESP_DEBUG_MSGOUT	0x00000010
46#define ESP_DEBUG_CMDDONE	0x00000020
47#define ESP_DEBUG_DISCONNECT	0x00000040
48#define ESP_DEBUG_DATASTART	0x00000080
49#define ESP_DEBUG_DATADONE	0x00000100
50#define ESP_DEBUG_RECONNECT	0x00000200
51#define ESP_DEBUG_AUTOSENSE	0x00000400
52
53#define esp_log_intr(f, a...) \
54do {	if (esp_debug & ESP_DEBUG_INTR) \
55		printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do {	if (esp_debug & ESP_DEBUG_RESET) \
60		printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do {	if (esp_debug & ESP_DEBUG_MSGIN) \
65		printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
70		printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
75		printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
80		printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do {	if (esp_debug & ESP_DEBUG_DATASTART) \
85		printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do {	if (esp_debug & ESP_DEBUG_DATADONE) \
90		printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
95		printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100		printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107			      struct esp_event_ent *p)
108{
109	p->sreg = esp->sreg;
110	p->seqreg = esp->seqreg;
111	p->sreg2 = esp->sreg2;
112	p->ireg = esp->ireg;
113	p->select_state = esp->select_state;
114	p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119	struct esp_event_ent *p;
120	int idx = esp->esp_event_cur;
121
122	p = &esp->esp_event_log[idx];
123	p->type = ESP_EVENT_TYPE_CMD;
124	p->val = val;
125	esp_log_fill_regs(esp, p);
126
127	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129	esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135	struct esp_event_ent *p;
136	int idx = esp->esp_event_cur;
137
138	p = &esp->esp_event_log[idx];
139	p->type = ESP_EVENT_TYPE_EVENT;
140	p->val = val;
141	esp_log_fill_regs(esp, p);
142
143	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145	esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150	int idx = esp->esp_event_cur;
151	int stop = idx;
152
153	printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154	       esp->host->unique_id);
155	do {
156		struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158		printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159		       esp->host->unique_id, idx,
160		       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162		printk("val[%02x] sreg[%02x] seqreg[%02x] "
163		       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164		       p->val, p->sreg, p->seqreg,
165		       p->sreg2, p->ireg, p->select_state, p->event);
166
167		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168	} while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174	if (esp->rev == ESP236) {
175		int lim = 1000;
176
177		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178			if (--lim == 0) {
179				printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180				       "will not clear!\n",
181				       esp->host->unique_id);
182				break;
183			}
184			udelay(1);
185		}
186	}
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192	int idx = 0;
193
194	while (fcnt--) {
195		esp->fifo[idx++] = esp_read8(ESP_FDATA);
196		esp->fifo[idx++] = esp_read8(ESP_FDATA);
197	}
198	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199		esp_write8(0, ESP_FDATA);
200		esp->fifo[idx++] = esp_read8(ESP_FDATA);
201		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202	}
203	esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208	int i;
209
210	for (i = 0; i < ESP_MAX_TARGET; i++)
211		esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217	u8 family_code, version;
218
219	/* Now reset the ESP chip */
220	scsi_esp_cmd(esp, ESP_CMD_RC);
221	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222	if (esp->rev == FAST)
223		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
224	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
225
226	/* This is the only point at which it is reliable to read
227	 * the ID-code for a fast ESP chip variants.
228	 */
229	esp->max_period = ((35 * esp->ccycle) / 1000);
230	if (esp->rev == FAST) {
231		version = esp_read8(ESP_UID);
232		family_code = (version & 0xf8) >> 3;
233		if (family_code == 0x02)
234			esp->rev = FAS236;
235		else if (family_code == 0x0a)
236			esp->rev = FASHME; /* Version is usually '5'. */
237		else
238			esp->rev = FAS100A;
239		esp->min_period = ((4 * esp->ccycle) / 1000);
240	} else {
241		esp->min_period = ((5 * esp->ccycle) / 1000);
242	}
243	esp->max_period = (esp->max_period + 3)>>2;
244	esp->min_period = (esp->min_period + 3)>>2;
245
246	esp_write8(esp->config1, ESP_CFG1);
247	switch (esp->rev) {
248	case ESP100:
249		/* nothing to do */
250		break;
251
252	case ESP100A:
253		esp_write8(esp->config2, ESP_CFG2);
254		break;
255
256	case ESP236:
257		/* Slow 236 */
258		esp_write8(esp->config2, ESP_CFG2);
259		esp->prev_cfg3 = esp->target[0].esp_config3;
260		esp_write8(esp->prev_cfg3, ESP_CFG3);
261		break;
262
263	case FASHME:
264		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
265		/* fallthrough... */
266
267	case FAS236:
268		/* Fast 236 or HME */
269		esp_write8(esp->config2, ESP_CFG2);
270		if (esp->rev == FASHME) {
271			u8 cfg3 = esp->target[0].esp_config3;
272
273			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
274			if (esp->scsi_id >= 8)
275				cfg3 |= ESP_CONFIG3_IDBIT3;
276			esp_set_all_config3(esp, cfg3);
277		} else {
278			u32 cfg3 = esp->target[0].esp_config3;
279
280			cfg3 |= ESP_CONFIG3_FCLK;
281			esp_set_all_config3(esp, cfg3);
282		}
283		esp->prev_cfg3 = esp->target[0].esp_config3;
284		esp_write8(esp->prev_cfg3, ESP_CFG3);
285		if (esp->rev == FASHME) {
286			esp->radelay = 80;
287		} else {
288			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
289				esp->radelay = 0;
290			else
291				esp->radelay = 96;
292		}
293		break;
294
295	case FAS100A:
296		/* Fast 100a */
297		esp_write8(esp->config2, ESP_CFG2);
298		esp_set_all_config3(esp,
299				    (esp->target[0].esp_config3 |
300				     ESP_CONFIG3_FCLOCK));
301		esp->prev_cfg3 = esp->target[0].esp_config3;
302		esp_write8(esp->prev_cfg3, ESP_CFG3);
303		esp->radelay = 32;
304		break;
305
306	default:
307		break;
308	}
309
310	/* Reload the configuration registers */
311	esp_write8(esp->cfact, ESP_CFACT);
312
313	esp->prev_stp = 0;
314	esp_write8(esp->prev_stp, ESP_STP);
315
316	esp->prev_soff = 0;
317	esp_write8(esp->prev_soff, ESP_SOFF);
318
319	esp_write8(esp->neg_defp, ESP_TIMEO);
320
321	/* Eat any bitrot in the chip */
322	esp_read8(ESP_INTRPT);
323	udelay(100);
324}
325
326static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
327{
328	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
329	struct scatterlist *sg = scsi_sglist(cmd);
330	int dir = cmd->sc_data_direction;
331	int total, i;
332
333	if (dir == DMA_NONE)
334		return;
335
336	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
337	spriv->cur_residue = sg_dma_len(sg);
338	spriv->cur_sg = sg;
339
340	total = 0;
341	for (i = 0; i < spriv->u.num_sg; i++)
342		total += sg_dma_len(&sg[i]);
343	spriv->tot_residue = total;
344}
345
346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
347				   struct scsi_cmnd *cmd)
348{
349	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
350
351	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
352		return ent->sense_dma +
353			(ent->sense_ptr - cmd->sense_buffer);
354	}
355
356	return sg_dma_address(p->cur_sg) +
357		(sg_dma_len(p->cur_sg) -
358		 p->cur_residue);
359}
360
361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
362				    struct scsi_cmnd *cmd)
363{
364	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
365
366	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
367		return SCSI_SENSE_BUFFERSIZE -
368			(ent->sense_ptr - cmd->sense_buffer);
369	}
370	return p->cur_residue;
371}
372
373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
374			    struct scsi_cmnd *cmd, unsigned int len)
375{
376	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
377
378	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
379		ent->sense_ptr += len;
380		return;
381	}
382
383	p->cur_residue -= len;
384	p->tot_residue -= len;
385	if (p->cur_residue < 0 || p->tot_residue < 0) {
386		printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
387		       esp->host->unique_id);
388		printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
389		       "len[%u]\n",
390		       esp->host->unique_id,
391		       p->cur_residue, p->tot_residue, len);
392		p->cur_residue = 0;
393		p->tot_residue = 0;
394	}
395	if (!p->cur_residue && p->tot_residue) {
396		p->cur_sg++;
397		p->cur_residue = sg_dma_len(p->cur_sg);
398	}
399}
400
401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
402{
403	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
404	int dir = cmd->sc_data_direction;
405
406	if (dir == DMA_NONE)
407		return;
408
409	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
410}
411
412static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
413{
414	struct scsi_cmnd *cmd = ent->cmd;
415	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
416
417	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
418		ent->saved_sense_ptr = ent->sense_ptr;
419		return;
420	}
421	ent->saved_cur_residue = spriv->cur_residue;
422	ent->saved_cur_sg = spriv->cur_sg;
423	ent->saved_tot_residue = spriv->tot_residue;
424}
425
426static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
427{
428	struct scsi_cmnd *cmd = ent->cmd;
429	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
430
431	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
432		ent->sense_ptr = ent->saved_sense_ptr;
433		return;
434	}
435	spriv->cur_residue = ent->saved_cur_residue;
436	spriv->cur_sg = ent->saved_cur_sg;
437	spriv->tot_residue = ent->saved_tot_residue;
438}
439
440static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
441{
442	if (cmd->cmd_len == 6 ||
443	    cmd->cmd_len == 10 ||
444	    cmd->cmd_len == 12) {
445		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
446	} else {
447		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
448	}
449}
450
451static void esp_write_tgt_config3(struct esp *esp, int tgt)
452{
453	if (esp->rev > ESP100A) {
454		u8 val = esp->target[tgt].esp_config3;
455
456		if (val != esp->prev_cfg3) {
457			esp->prev_cfg3 = val;
458			esp_write8(val, ESP_CFG3);
459		}
460	}
461}
462
463static void esp_write_tgt_sync(struct esp *esp, int tgt)
464{
465	u8 off = esp->target[tgt].esp_offset;
466	u8 per = esp->target[tgt].esp_period;
467
468	if (off != esp->prev_soff) {
469		esp->prev_soff = off;
470		esp_write8(off, ESP_SOFF);
471	}
472	if (per != esp->prev_stp) {
473		esp->prev_stp = per;
474		esp_write8(per, ESP_STP);
475	}
476}
477
478static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
479{
480	if (esp->rev == FASHME) {
481		/* Arbitrary segment boundaries, 24-bit counts.  */
482		if (dma_len > (1U << 24))
483			dma_len = (1U << 24);
484	} else {
485		u32 base, end;
486
487		/* ESP chip limits other variants by 16-bits of transfer
488		 * count.  Actually on FAS100A and FAS236 we could get
489		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
490		 * in the ESP_CFG2 register but that causes other unwanted
491		 * changes so we don't use it currently.
492		 */
493		if (dma_len > (1U << 16))
494			dma_len = (1U << 16);
495
496		/* All of the DMA variants hooked up to these chips
497		 * cannot handle crossing a 24-bit address boundary.
498		 */
499		base = dma_addr & ((1U << 24) - 1U);
500		end = base + dma_len;
501		if (end > (1U << 24))
502			end = (1U <<24);
503		dma_len = end - base;
504	}
505	return dma_len;
506}
507
508static int esp_need_to_nego_wide(struct esp_target_data *tp)
509{
510	struct scsi_target *target = tp->starget;
511
512	return spi_width(target) != tp->nego_goal_width;
513}
514
515static int esp_need_to_nego_sync(struct esp_target_data *tp)
516{
517	struct scsi_target *target = tp->starget;
518
519	/* When offset is zero, period is "don't care".  */
520	if (!spi_offset(target) && !tp->nego_goal_offset)
521		return 0;
522
523	if (spi_offset(target) == tp->nego_goal_offset &&
524	    spi_period(target) == tp->nego_goal_period)
525		return 0;
526
527	return 1;
528}
529
530static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
531			     struct esp_lun_data *lp)
532{
533	if (!ent->tag[0]) {
534		/* Non-tagged, slot already taken?  */
535		if (lp->non_tagged_cmd)
536			return -EBUSY;
537
538		if (lp->hold) {
539			/* We are being held by active tagged
540			 * commands.
541			 */
542			if (lp->num_tagged)
543				return -EBUSY;
544
545			/* Tagged commands completed, we can unplug
546			 * the queue and run this untagged command.
547			 */
548			lp->hold = 0;
549		} else if (lp->num_tagged) {
550			/* Plug the queue until num_tagged decreases
551			 * to zero in esp_free_lun_tag.
552			 */
553			lp->hold = 1;
554			return -EBUSY;
555		}
556
557		lp->non_tagged_cmd = ent;
558		return 0;
559	} else {
560		/* Tagged command, see if blocked by a
561		 * non-tagged one.
562		 */
563		if (lp->non_tagged_cmd || lp->hold)
564			return -EBUSY;
565	}
566
567	BUG_ON(lp->tagged_cmds[ent->tag[1]]);
568
569	lp->tagged_cmds[ent->tag[1]] = ent;
570	lp->num_tagged++;
571
572	return 0;
573}
574
575static void esp_free_lun_tag(struct esp_cmd_entry *ent,
576			     struct esp_lun_data *lp)
577{
578	if (ent->tag[0]) {
579		BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
580		lp->tagged_cmds[ent->tag[1]] = NULL;
581		lp->num_tagged--;
582	} else {
583		BUG_ON(lp->non_tagged_cmd != ent);
584		lp->non_tagged_cmd = NULL;
585	}
586}
587
588/* When a contingent allegiance conditon is created, we force feed a
589 * REQUEST_SENSE command to the device to fetch the sense data.  I
590 * tried many other schemes, relying on the scsi error handling layer
591 * to send out the REQUEST_SENSE automatically, but this was difficult
592 * to get right especially in the presence of applications like smartd
593 * which use SG_IO to send out their own REQUEST_SENSE commands.
594 */
595static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
596{
597	struct scsi_cmnd *cmd = ent->cmd;
598	struct scsi_device *dev = cmd->device;
599	int tgt, lun;
600	u8 *p, val;
601
602	tgt = dev->id;
603	lun = dev->lun;
604
605
606	if (!ent->sense_ptr) {
607		esp_log_autosense("esp%d: Doing auto-sense for "
608				  "tgt[%d] lun[%d]\n",
609				  esp->host->unique_id, tgt, lun);
610
611		ent->sense_ptr = cmd->sense_buffer;
612		ent->sense_dma = esp->ops->map_single(esp,
613						      ent->sense_ptr,
614						      SCSI_SENSE_BUFFERSIZE,
615						      DMA_FROM_DEVICE);
616	}
617	ent->saved_sense_ptr = ent->sense_ptr;
618
619	esp->active_cmd = ent;
620
621	p = esp->command_block;
622	esp->msg_out_len = 0;
623
624	*p++ = IDENTIFY(0, lun);
625	*p++ = REQUEST_SENSE;
626	*p++ = ((dev->scsi_level <= SCSI_2) ?
627		(lun << 5) : 0);
628	*p++ = 0;
629	*p++ = 0;
630	*p++ = SCSI_SENSE_BUFFERSIZE;
631	*p++ = 0;
632
633	esp->select_state = ESP_SELECT_BASIC;
634
635	val = tgt;
636	if (esp->rev == FASHME)
637		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
638	esp_write8(val, ESP_BUSID);
639
640	esp_write_tgt_sync(esp, tgt);
641	esp_write_tgt_config3(esp, tgt);
642
643	val = (p - esp->command_block);
644
645	if (esp->rev == FASHME)
646		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
647	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
648			       val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
649}
650
651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
652{
653	struct esp_cmd_entry *ent;
654
655	list_for_each_entry(ent, &esp->queued_cmds, list) {
656		struct scsi_cmnd *cmd = ent->cmd;
657		struct scsi_device *dev = cmd->device;
658		struct esp_lun_data *lp = dev->hostdata;
659
660		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
661			ent->tag[0] = 0;
662			ent->tag[1] = 0;
663			return ent;
664		}
665
666		if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
667			ent->tag[0] = 0;
668			ent->tag[1] = 0;
669		}
670
671		if (esp_alloc_lun_tag(ent, lp) < 0)
672			continue;
673
674		return ent;
675	}
676
677	return NULL;
678}
679
680static void esp_maybe_execute_command(struct esp *esp)
681{
682	struct esp_target_data *tp;
683	struct esp_lun_data *lp;
684	struct scsi_device *dev;
685	struct scsi_cmnd *cmd;
686	struct esp_cmd_entry *ent;
687	int tgt, lun, i;
688	u32 val, start_cmd;
689	u8 *p;
690
691	if (esp->active_cmd ||
692	    (esp->flags & ESP_FLAG_RESETTING))
693		return;
694
695	ent = find_and_prep_issuable_command(esp);
696	if (!ent)
697		return;
698
699	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
700		esp_autosense(esp, ent);
701		return;
702	}
703
704	cmd = ent->cmd;
705	dev = cmd->device;
706	tgt = dev->id;
707	lun = dev->lun;
708	tp = &esp->target[tgt];
709	lp = dev->hostdata;
710
711	list_del(&ent->list);
712	list_add(&ent->list, &esp->active_cmds);
713
714	esp->active_cmd = ent;
715
716	esp_map_dma(esp, cmd);
717	esp_save_pointers(esp, ent);
718
719	esp_check_command_len(esp, cmd);
720
721	p = esp->command_block;
722
723	esp->msg_out_len = 0;
724	if (tp->flags & ESP_TGT_CHECK_NEGO) {
725		/* Need to negotiate.  If the target is broken
726		 * go for synchronous transfers and non-wide.
727		 */
728		if (tp->flags & ESP_TGT_BROKEN) {
729			tp->flags &= ~ESP_TGT_DISCONNECT;
730			tp->nego_goal_period = 0;
731			tp->nego_goal_offset = 0;
732			tp->nego_goal_width = 0;
733			tp->nego_goal_tags = 0;
734		}
735
736		/* If the settings are not changing, skip this.  */
737		if (spi_width(tp->starget) == tp->nego_goal_width &&
738		    spi_period(tp->starget) == tp->nego_goal_period &&
739		    spi_offset(tp->starget) == tp->nego_goal_offset) {
740			tp->flags &= ~ESP_TGT_CHECK_NEGO;
741			goto build_identify;
742		}
743
744		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
745			esp->msg_out_len =
746				spi_populate_width_msg(&esp->msg_out[0],
747						       (tp->nego_goal_width ?
748							1 : 0));
749			tp->flags |= ESP_TGT_NEGO_WIDE;
750		} else if (esp_need_to_nego_sync(tp)) {
751			esp->msg_out_len =
752				spi_populate_sync_msg(&esp->msg_out[0],
753						      tp->nego_goal_period,
754						      tp->nego_goal_offset);
755			tp->flags |= ESP_TGT_NEGO_SYNC;
756		} else {
757			tp->flags &= ~ESP_TGT_CHECK_NEGO;
758		}
759
760		/* Process it like a slow command.  */
761		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
762			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
763	}
764
765build_identify:
766	/* If we don't have a lun-data struct yet, we're probing
767	 * so do not disconnect.  Also, do not disconnect unless
768	 * we have a tag on this command.
769	 */
770	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
771		*p++ = IDENTIFY(1, lun);
772	else
773		*p++ = IDENTIFY(0, lun);
774
775	if (ent->tag[0] && esp->rev == ESP100) {
776		/* ESP100 lacks select w/atn3 command, use select
777		 * and stop instead.
778		 */
779		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
780	}
781
782	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
783		start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
784		if (ent->tag[0]) {
785			*p++ = ent->tag[0];
786			*p++ = ent->tag[1];
787
788			start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
789		}
790
791		for (i = 0; i < cmd->cmd_len; i++)
792			*p++ = cmd->cmnd[i];
793
794		esp->select_state = ESP_SELECT_BASIC;
795	} else {
796		esp->cmd_bytes_left = cmd->cmd_len;
797		esp->cmd_bytes_ptr = &cmd->cmnd[0];
798
799		if (ent->tag[0]) {
800			for (i = esp->msg_out_len - 1;
801			     i >= 0; i--)
802				esp->msg_out[i + 2] = esp->msg_out[i];
803			esp->msg_out[0] = ent->tag[0];
804			esp->msg_out[1] = ent->tag[1];
805			esp->msg_out_len += 2;
806		}
807
808		start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
809		esp->select_state = ESP_SELECT_MSGOUT;
810	}
811	val = tgt;
812	if (esp->rev == FASHME)
813		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
814	esp_write8(val, ESP_BUSID);
815
816	esp_write_tgt_sync(esp, tgt);
817	esp_write_tgt_config3(esp, tgt);
818
819	val = (p - esp->command_block);
820
821	if (esp_debug & ESP_DEBUG_SCSICMD) {
822		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
823		for (i = 0; i < cmd->cmd_len; i++)
824			printk("%02x ", cmd->cmnd[i]);
825		printk("]\n");
826	}
827
828	if (esp->rev == FASHME)
829		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
830	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
831			       val, 16, 0, start_cmd);
832}
833
834static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
835{
836	struct list_head *head = &esp->esp_cmd_pool;
837	struct esp_cmd_entry *ret;
838
839	if (list_empty(head)) {
840		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
841	} else {
842		ret = list_entry(head->next, struct esp_cmd_entry, list);
843		list_del(&ret->list);
844		memset(ret, 0, sizeof(*ret));
845	}
846	return ret;
847}
848
849static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
850{
851	list_add(&ent->list, &esp->esp_cmd_pool);
852}
853
854static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
855			    struct scsi_cmnd *cmd, unsigned int result)
856{
857	struct scsi_device *dev = cmd->device;
858	int tgt = dev->id;
859	int lun = dev->lun;
860
861	esp->active_cmd = NULL;
862	esp_unmap_dma(esp, cmd);
863	esp_free_lun_tag(ent, dev->hostdata);
864	cmd->result = result;
865
866	if (ent->eh_done) {
867		complete(ent->eh_done);
868		ent->eh_done = NULL;
869	}
870
871	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
872		esp->ops->unmap_single(esp, ent->sense_dma,
873				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
874		ent->sense_ptr = NULL;
875
876		/* Restore the message/status bytes to what we actually
877		 * saw originally.  Also, report that we are providing
878		 * the sense data.
879		 */
880		cmd->result = ((DRIVER_SENSE << 24) |
881			       (DID_OK << 16) |
882			       (COMMAND_COMPLETE << 8) |
883			       (SAM_STAT_CHECK_CONDITION << 0));
884
885		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
886		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
887			int i;
888
889			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
890			       esp->host->unique_id, tgt, lun);
891			for (i = 0; i < 18; i++)
892				printk("%02x ", cmd->sense_buffer[i]);
893			printk("]\n");
894		}
895	}
896
897	cmd->scsi_done(cmd);
898
899	list_del(&ent->list);
900	esp_put_ent(esp, ent);
901
902	esp_maybe_execute_command(esp);
903}
904
905static unsigned int compose_result(unsigned int status, unsigned int message,
906				   unsigned int driver_code)
907{
908	return (status | (message << 8) | (driver_code << 16));
909}
910
911static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
912{
913	struct scsi_device *dev = ent->cmd->device;
914	struct esp_lun_data *lp = dev->hostdata;
915
916	scsi_track_queue_full(dev, lp->num_tagged - 1);
917}
918
919static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
920{
921	struct scsi_device *dev = cmd->device;
922	struct esp *esp = shost_priv(dev->host);
923	struct esp_cmd_priv *spriv;
924	struct esp_cmd_entry *ent;
925
926	ent = esp_get_ent(esp);
927	if (!ent)
928		return SCSI_MLQUEUE_HOST_BUSY;
929
930	ent->cmd = cmd;
931
932	cmd->scsi_done = done;
933
934	spriv = ESP_CMD_PRIV(cmd);
935	spriv->u.dma_addr = ~(dma_addr_t)0x0;
936
937	list_add_tail(&ent->list, &esp->queued_cmds);
938
939	esp_maybe_execute_command(esp);
940
941	return 0;
942}
943
944static int esp_check_gross_error(struct esp *esp)
945{
946	if (esp->sreg & ESP_STAT_SPAM) {
947		/* Gross Error, could be one of:
948		 * - top of fifo overwritten
949		 * - top of command register overwritten
950		 * - DMA programmed with wrong direction
951		 * - improper phase change
952		 */
953		printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
954		       esp->host->unique_id, esp->sreg);
955		return 1;
956	}
957	return 0;
958}
959
960static int esp_check_spur_intr(struct esp *esp)
961{
962	switch (esp->rev) {
963	case ESP100:
964	case ESP100A:
965		/* The interrupt pending bit of the status register cannot
966		 * be trusted on these revisions.
967		 */
968		esp->sreg &= ~ESP_STAT_INTR;
969		break;
970
971	default:
972		if (!(esp->sreg & ESP_STAT_INTR)) {
973			esp->ireg = esp_read8(ESP_INTRPT);
974			if (esp->ireg & ESP_INTR_SR)
975				return 1;
976
977			/* If the DMA is indicating interrupt pending and the
978			 * ESP is not, the only possibility is a DMA error.
979			 */
980			if (!esp->ops->dma_error(esp)) {
981				printk(KERN_ERR PFX "esp%d: Spurious irq, "
982				       "sreg=%02x.\n",
983				       esp->host->unique_id, esp->sreg);
984				return -1;
985			}
986
987			printk(KERN_ERR PFX "esp%d: DMA error\n",
988			       esp->host->unique_id);
989
990			return -1;
991		}
992		break;
993	}
994
995	return 0;
996}
997
998static void esp_schedule_reset(struct esp *esp)
999{
1000	esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1001		      __builtin_return_address(0));
1002	esp->flags |= ESP_FLAG_RESETTING;
1003	esp_event(esp, ESP_EVENT_RESET);
1004}
1005
1006/* In order to avoid having to add a special half-reconnected state
1007 * into the driver we just sit here and poll through the rest of
1008 * the reselection process to get the tag message bytes.
1009 */
1010static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1011						    struct esp_lun_data *lp)
1012{
1013	struct esp_cmd_entry *ent;
1014	int i;
1015
1016	if (!lp->num_tagged) {
1017		printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1018		       esp->host->unique_id);
1019		return NULL;
1020	}
1021
1022	esp_log_reconnect("ESP: reconnect tag, ");
1023
1024	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1025		if (esp->ops->irq_pending(esp))
1026			break;
1027	}
1028	if (i == ESP_QUICKIRQ_LIMIT) {
1029		printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1030		       esp->host->unique_id);
1031		return NULL;
1032	}
1033
1034	esp->sreg = esp_read8(ESP_STATUS);
1035	esp->ireg = esp_read8(ESP_INTRPT);
1036
1037	esp_log_reconnect("IRQ(%d:%x:%x), ",
1038			  i, esp->ireg, esp->sreg);
1039
1040	if (esp->ireg & ESP_INTR_DC) {
1041		printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1042		       esp->host->unique_id);
1043		return NULL;
1044	}
1045
1046	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1047		printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1048		       esp->host->unique_id, esp->sreg);
1049		return NULL;
1050	}
1051
1052	/* DMA in the tag bytes... */
1053	esp->command_block[0] = 0xff;
1054	esp->command_block[1] = 0xff;
1055	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1056			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1057
1058	/* ACK the msssage.  */
1059	scsi_esp_cmd(esp, ESP_CMD_MOK);
1060
1061	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1062		if (esp->ops->irq_pending(esp)) {
1063			esp->sreg = esp_read8(ESP_STATUS);
1064			esp->ireg = esp_read8(ESP_INTRPT);
1065			if (esp->ireg & ESP_INTR_FDONE)
1066				break;
1067		}
1068		udelay(1);
1069	}
1070	if (i == ESP_RESELECT_TAG_LIMIT) {
1071		printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1072		       esp->host->unique_id);
1073		return NULL;
1074	}
1075	esp->ops->dma_drain(esp);
1076	esp->ops->dma_invalidate(esp);
1077
1078	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1079			  i, esp->ireg, esp->sreg,
1080			  esp->command_block[0],
1081			  esp->command_block[1]);
1082
1083	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1084	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1085		printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1086		       "type %02x.\n",
1087		       esp->host->unique_id, esp->command_block[0]);
1088		return NULL;
1089	}
1090
1091	ent = lp->tagged_cmds[esp->command_block[1]];
1092	if (!ent) {
1093		printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1094		       "tag %02x.\n",
1095		       esp->host->unique_id, esp->command_block[1]);
1096		return NULL;
1097	}
1098
1099	return ent;
1100}
1101
1102static int esp_reconnect(struct esp *esp)
1103{
1104	struct esp_cmd_entry *ent;
1105	struct esp_target_data *tp;
1106	struct esp_lun_data *lp;
1107	struct scsi_device *dev;
1108	int target, lun;
1109
1110	BUG_ON(esp->active_cmd);
1111	if (esp->rev == FASHME) {
1112		/* FASHME puts the target and lun numbers directly
1113		 * into the fifo.
1114		 */
1115		target = esp->fifo[0];
1116		lun = esp->fifo[1] & 0x7;
1117	} else {
1118		u8 bits = esp_read8(ESP_FDATA);
1119
1120		/* Older chips put the lun directly into the fifo, but
1121		 * the target is given as a sample of the arbitration
1122		 * lines on the bus at reselection time.  So we should
1123		 * see the ID of the ESP and the one reconnecting target
1124		 * set in the bitmap.
1125		 */
1126		if (!(bits & esp->scsi_id_mask))
1127			goto do_reset;
1128		bits &= ~esp->scsi_id_mask;
1129		if (!bits || (bits & (bits - 1)))
1130			goto do_reset;
1131
1132		target = ffs(bits) - 1;
1133		lun = (esp_read8(ESP_FDATA) & 0x7);
1134
1135		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1136		if (esp->rev == ESP100) {
1137			u8 ireg = esp_read8(ESP_INTRPT);
1138			/* This chip has a bug during reselection that can
1139			 * cause a spurious illegal-command interrupt, which
1140			 * we simply ACK here.  Another possibility is a bus
1141			 * reset so we must check for that.
1142			 */
1143			if (ireg & ESP_INTR_SR)
1144				goto do_reset;
1145		}
1146		scsi_esp_cmd(esp, ESP_CMD_NULL);
1147	}
1148
1149	esp_write_tgt_sync(esp, target);
1150	esp_write_tgt_config3(esp, target);
1151
1152	scsi_esp_cmd(esp, ESP_CMD_MOK);
1153
1154	if (esp->rev == FASHME)
1155		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1156			   ESP_BUSID);
1157
1158	tp = &esp->target[target];
1159	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1160	if (!dev) {
1161		printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1162		       "tgt[%u] lun[%u]\n",
1163		       esp->host->unique_id, target, lun);
1164		goto do_reset;
1165	}
1166	lp = dev->hostdata;
1167
1168	ent = lp->non_tagged_cmd;
1169	if (!ent) {
1170		ent = esp_reconnect_with_tag(esp, lp);
1171		if (!ent)
1172			goto do_reset;
1173	}
1174
1175	esp->active_cmd = ent;
1176
1177	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1178		esp->msg_out[0] = ABORT_TASK_SET;
1179		esp->msg_out_len = 1;
1180		scsi_esp_cmd(esp, ESP_CMD_SATN);
1181	}
1182
1183	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1184	esp_restore_pointers(esp, ent);
1185	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1186	return 1;
1187
1188do_reset:
1189	esp_schedule_reset(esp);
1190	return 0;
1191}
1192
1193static int esp_finish_select(struct esp *esp)
1194{
1195	struct esp_cmd_entry *ent;
1196	struct scsi_cmnd *cmd;
1197	u8 orig_select_state;
1198
1199	orig_select_state = esp->select_state;
1200
1201	/* No longer selecting.  */
1202	esp->select_state = ESP_SELECT_NONE;
1203
1204	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1205	ent = esp->active_cmd;
1206	cmd = ent->cmd;
1207
1208	if (esp->ops->dma_error(esp)) {
1209		/* If we see a DMA error during or as a result of selection,
1210		 * all bets are off.
1211		 */
1212		esp_schedule_reset(esp);
1213		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1214		return 0;
1215	}
1216
1217	esp->ops->dma_invalidate(esp);
1218
1219	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1220		struct esp_target_data *tp = &esp->target[cmd->device->id];
1221
1222		/* Carefully back out of the selection attempt.  Release
1223		 * resources (such as DMA mapping & TAG) and reset state (such
1224		 * as message out and command delivery variables).
1225		 */
1226		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1227			esp_unmap_dma(esp, cmd);
1228			esp_free_lun_tag(ent, cmd->device->hostdata);
1229			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1230			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1231			esp->cmd_bytes_ptr = NULL;
1232			esp->cmd_bytes_left = 0;
1233		} else {
1234			esp->ops->unmap_single(esp, ent->sense_dma,
1235					       SCSI_SENSE_BUFFERSIZE,
1236					       DMA_FROM_DEVICE);
1237			ent->sense_ptr = NULL;
1238		}
1239
1240		/* Now that the state is unwound properly, put back onto
1241		 * the issue queue.  This command is no longer active.
1242		 */
1243		list_del(&ent->list);
1244		list_add(&ent->list, &esp->queued_cmds);
1245		esp->active_cmd = NULL;
1246
1247		/* Return value ignored by caller, it directly invokes
1248		 * esp_reconnect().
1249		 */
1250		return 0;
1251	}
1252
1253	if (esp->ireg == ESP_INTR_DC) {
1254		struct scsi_device *dev = cmd->device;
1255
1256		/* Disconnect.  Make sure we re-negotiate sync and
1257		 * wide parameters if this target starts responding
1258		 * again in the future.
1259		 */
1260		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1261
1262		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1263		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1264		return 1;
1265	}
1266
1267	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1268		/* Selection successful.  On pre-FAST chips we have
1269		 * to do a NOP and possibly clean out the FIFO.
1270		 */
1271		if (esp->rev <= ESP236) {
1272			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1273
1274			scsi_esp_cmd(esp, ESP_CMD_NULL);
1275
1276			if (!fcnt &&
1277			    (!esp->prev_soff ||
1278			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1279				esp_flush_fifo(esp);
1280		}
1281
1282		/* If we are doing a slow command, negotiation, etc.
1283		 * we'll do the right thing as we transition to the
1284		 * next phase.
1285		 */
1286		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1287		return 0;
1288	}
1289
1290	printk("ESP: Unexpected selection completion ireg[%x].\n",
1291	       esp->ireg);
1292	esp_schedule_reset(esp);
1293	return 0;
1294}
1295
1296static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1297			       struct scsi_cmnd *cmd)
1298{
1299	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1300
1301	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1302	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1303		fifo_cnt <<= 1;
1304
1305	ecount = 0;
1306	if (!(esp->sreg & ESP_STAT_TCNT)) {
1307		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1308			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1309		if (esp->rev == FASHME)
1310			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1311	}
1312
1313	bytes_sent = esp->data_dma_len;
1314	bytes_sent -= ecount;
1315
1316	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1317		bytes_sent -= fifo_cnt;
1318
1319	flush_fifo = 0;
1320	if (!esp->prev_soff) {
1321		/* Synchronous data transfer, always flush fifo. */
1322		flush_fifo = 1;
1323	} else {
1324		if (esp->rev == ESP100) {
1325			u32 fflags, phase;
1326
1327			/* ESP100 has a chip bug where in the synchronous data
1328			 * phase it can mistake a final long REQ pulse from the
1329			 * target as an extra data byte.  Fun.
1330			 *
1331			 * To detect this case we resample the status register
1332			 * and fifo flags.  If we're still in a data phase and
1333			 * we see spurious chunks in the fifo, we return error
1334			 * to the caller which should reset and set things up
1335			 * such that we only try future transfers to this
1336			 * target in synchronous mode.
1337			 */
1338			esp->sreg = esp_read8(ESP_STATUS);
1339			phase = esp->sreg & ESP_STAT_PMASK;
1340			fflags = esp_read8(ESP_FFLAGS);
1341
1342			if ((phase == ESP_DOP &&
1343			     (fflags & ESP_FF_ONOTZERO)) ||
1344			    (phase == ESP_DIP &&
1345			     (fflags & ESP_FF_FBYTES)))
1346				return -1;
1347		}
1348		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1349			flush_fifo = 1;
1350	}
1351
1352	if (flush_fifo)
1353		esp_flush_fifo(esp);
1354
1355	return bytes_sent;
1356}
1357
1358static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1359			u8 scsi_period, u8 scsi_offset,
1360			u8 esp_stp, u8 esp_soff)
1361{
1362	spi_period(tp->starget) = scsi_period;
1363	spi_offset(tp->starget) = scsi_offset;
1364	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1365
1366	if (esp_soff) {
1367		esp_stp &= 0x1f;
1368		esp_soff |= esp->radelay;
1369		if (esp->rev >= FAS236) {
1370			u8 bit = ESP_CONFIG3_FSCSI;
1371			if (esp->rev >= FAS100A)
1372				bit = ESP_CONFIG3_FAST;
1373
1374			if (scsi_period < 50) {
1375				if (esp->rev == FASHME)
1376					esp_soff &= ~esp->radelay;
1377				tp->esp_config3 |= bit;
1378			} else {
1379				tp->esp_config3 &= ~bit;
1380			}
1381			esp->prev_cfg3 = tp->esp_config3;
1382			esp_write8(esp->prev_cfg3, ESP_CFG3);
1383		}
1384	}
1385
1386	tp->esp_period = esp->prev_stp = esp_stp;
1387	tp->esp_offset = esp->prev_soff = esp_soff;
1388
1389	esp_write8(esp_soff, ESP_SOFF);
1390	esp_write8(esp_stp, ESP_STP);
1391
1392	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1393
1394	spi_display_xfer_agreement(tp->starget);
1395}
1396
1397static void esp_msgin_reject(struct esp *esp)
1398{
1399	struct esp_cmd_entry *ent = esp->active_cmd;
1400	struct scsi_cmnd *cmd = ent->cmd;
1401	struct esp_target_data *tp;
1402	int tgt;
1403
1404	tgt = cmd->device->id;
1405	tp = &esp->target[tgt];
1406
1407	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1408		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1409
1410		if (!esp_need_to_nego_sync(tp)) {
1411			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1412			scsi_esp_cmd(esp, ESP_CMD_RATN);
1413		} else {
1414			esp->msg_out_len =
1415				spi_populate_sync_msg(&esp->msg_out[0],
1416						      tp->nego_goal_period,
1417						      tp->nego_goal_offset);
1418			tp->flags |= ESP_TGT_NEGO_SYNC;
1419			scsi_esp_cmd(esp, ESP_CMD_SATN);
1420		}
1421		return;
1422	}
1423
1424	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1425		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1426		tp->esp_period = 0;
1427		tp->esp_offset = 0;
1428		esp_setsync(esp, tp, 0, 0, 0, 0);
1429		scsi_esp_cmd(esp, ESP_CMD_RATN);
1430		return;
1431	}
1432
1433	esp->msg_out[0] = ABORT_TASK_SET;
1434	esp->msg_out_len = 1;
1435	scsi_esp_cmd(esp, ESP_CMD_SATN);
1436}
1437
1438static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1439{
1440	u8 period = esp->msg_in[3];
1441	u8 offset = esp->msg_in[4];
1442	u8 stp;
1443
1444	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1445		goto do_reject;
1446
1447	if (offset > 15)
1448		goto do_reject;
1449
1450	if (offset) {
1451		int one_clock;
1452
1453		if (period > esp->max_period) {
1454			period = offset = 0;
1455			goto do_sdtr;
1456		}
1457		if (period < esp->min_period)
1458			goto do_reject;
1459
1460		one_clock = esp->ccycle / 1000;
1461		stp = DIV_ROUND_UP(period << 2, one_clock);
1462		if (stp && esp->rev >= FAS236) {
1463			if (stp >= 50)
1464				stp--;
1465		}
1466	} else {
1467		stp = 0;
1468	}
1469
1470	esp_setsync(esp, tp, period, offset, stp, offset);
1471	return;
1472
1473do_reject:
1474	esp->msg_out[0] = MESSAGE_REJECT;
1475	esp->msg_out_len = 1;
1476	scsi_esp_cmd(esp, ESP_CMD_SATN);
1477	return;
1478
1479do_sdtr:
1480	tp->nego_goal_period = period;
1481	tp->nego_goal_offset = offset;
1482	esp->msg_out_len =
1483		spi_populate_sync_msg(&esp->msg_out[0],
1484				      tp->nego_goal_period,
1485				      tp->nego_goal_offset);
1486	scsi_esp_cmd(esp, ESP_CMD_SATN);
1487}
1488
1489static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1490{
1491	int size = 8 << esp->msg_in[3];
1492	u8 cfg3;
1493
1494	if (esp->rev != FASHME)
1495		goto do_reject;
1496
1497	if (size != 8 && size != 16)
1498		goto do_reject;
1499
1500	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1501		goto do_reject;
1502
1503	cfg3 = tp->esp_config3;
1504	if (size == 16) {
1505		tp->flags |= ESP_TGT_WIDE;
1506		cfg3 |= ESP_CONFIG3_EWIDE;
1507	} else {
1508		tp->flags &= ~ESP_TGT_WIDE;
1509		cfg3 &= ~ESP_CONFIG3_EWIDE;
1510	}
1511	tp->esp_config3 = cfg3;
1512	esp->prev_cfg3 = cfg3;
1513	esp_write8(cfg3, ESP_CFG3);
1514
1515	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1516
1517	spi_period(tp->starget) = 0;
1518	spi_offset(tp->starget) = 0;
1519	if (!esp_need_to_nego_sync(tp)) {
1520		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1521		scsi_esp_cmd(esp, ESP_CMD_RATN);
1522	} else {
1523		esp->msg_out_len =
1524			spi_populate_sync_msg(&esp->msg_out[0],
1525					      tp->nego_goal_period,
1526					      tp->nego_goal_offset);
1527		tp->flags |= ESP_TGT_NEGO_SYNC;
1528		scsi_esp_cmd(esp, ESP_CMD_SATN);
1529	}
1530	return;
1531
1532do_reject:
1533	esp->msg_out[0] = MESSAGE_REJECT;
1534	esp->msg_out_len = 1;
1535	scsi_esp_cmd(esp, ESP_CMD_SATN);
1536}
1537
1538static void esp_msgin_extended(struct esp *esp)
1539{
1540	struct esp_cmd_entry *ent = esp->active_cmd;
1541	struct scsi_cmnd *cmd = ent->cmd;
1542	struct esp_target_data *tp;
1543	int tgt = cmd->device->id;
1544
1545	tp = &esp->target[tgt];
1546	if (esp->msg_in[2] == EXTENDED_SDTR) {
1547		esp_msgin_sdtr(esp, tp);
1548		return;
1549	}
1550	if (esp->msg_in[2] == EXTENDED_WDTR) {
1551		esp_msgin_wdtr(esp, tp);
1552		return;
1553	}
1554
1555	printk("ESP: Unexpected extended msg type %x\n",
1556	       esp->msg_in[2]);
1557
1558	esp->msg_out[0] = ABORT_TASK_SET;
1559	esp->msg_out_len = 1;
1560	scsi_esp_cmd(esp, ESP_CMD_SATN);
1561}
1562
1563/* Analyze msgin bytes received from target so far.  Return non-zero
1564 * if there are more bytes needed to complete the message.
1565 */
1566static int esp_msgin_process(struct esp *esp)
1567{
1568	u8 msg0 = esp->msg_in[0];
1569	int len = esp->msg_in_len;
1570
1571	if (msg0 & 0x80) {
1572		/* Identify */
1573		printk("ESP: Unexpected msgin identify\n");
1574		return 0;
1575	}
1576
1577	switch (msg0) {
1578	case EXTENDED_MESSAGE:
1579		if (len == 1)
1580			return 1;
1581		if (len < esp->msg_in[1] + 2)
1582			return 1;
1583		esp_msgin_extended(esp);
1584		return 0;
1585
1586	case IGNORE_WIDE_RESIDUE: {
1587		struct esp_cmd_entry *ent;
1588		struct esp_cmd_priv *spriv;
1589		if (len == 1)
1590			return 1;
1591
1592		if (esp->msg_in[1] != 1)
1593			goto do_reject;
1594
1595		ent = esp->active_cmd;
1596		spriv = ESP_CMD_PRIV(ent->cmd);
1597
1598		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1599			spriv->cur_sg--;
1600			spriv->cur_residue = 1;
1601		} else
1602			spriv->cur_residue++;
1603		spriv->tot_residue++;
1604		return 0;
1605	}
1606	case NOP:
1607		return 0;
1608	case RESTORE_POINTERS:
1609		esp_restore_pointers(esp, esp->active_cmd);
1610		return 0;
1611	case SAVE_POINTERS:
1612		esp_save_pointers(esp, esp->active_cmd);
1613		return 0;
1614
1615	case COMMAND_COMPLETE:
1616	case DISCONNECT: {
1617		struct esp_cmd_entry *ent = esp->active_cmd;
1618
1619		ent->message = msg0;
1620		esp_event(esp, ESP_EVENT_FREE_BUS);
1621		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1622		return 0;
1623	}
1624	case MESSAGE_REJECT:
1625		esp_msgin_reject(esp);
1626		return 0;
1627
1628	default:
1629	do_reject:
1630		esp->msg_out[0] = MESSAGE_REJECT;
1631		esp->msg_out_len = 1;
1632		scsi_esp_cmd(esp, ESP_CMD_SATN);
1633		return 0;
1634	}
1635}
1636
1637static int esp_process_event(struct esp *esp)
1638{
1639	int write;
1640
1641again:
1642	write = 0;
1643	switch (esp->event) {
1644	case ESP_EVENT_CHECK_PHASE:
1645		switch (esp->sreg & ESP_STAT_PMASK) {
1646		case ESP_DOP:
1647			esp_event(esp, ESP_EVENT_DATA_OUT);
1648			break;
1649		case ESP_DIP:
1650			esp_event(esp, ESP_EVENT_DATA_IN);
1651			break;
1652		case ESP_STATP:
1653			esp_flush_fifo(esp);
1654			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1655			esp_event(esp, ESP_EVENT_STATUS);
1656			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1657			return 1;
1658
1659		case ESP_MOP:
1660			esp_event(esp, ESP_EVENT_MSGOUT);
1661			break;
1662
1663		case ESP_MIP:
1664			esp_event(esp, ESP_EVENT_MSGIN);
1665			break;
1666
1667		case ESP_CMDP:
1668			esp_event(esp, ESP_EVENT_CMD_START);
1669			break;
1670
1671		default:
1672			printk("ESP: Unexpected phase, sreg=%02x\n",
1673			       esp->sreg);
1674			esp_schedule_reset(esp);
1675			return 0;
1676		}
1677		goto again;
1678		break;
1679
1680	case ESP_EVENT_DATA_IN:
1681		write = 1;
1682		/* fallthru */
1683
1684	case ESP_EVENT_DATA_OUT: {
1685		struct esp_cmd_entry *ent = esp->active_cmd;
1686		struct scsi_cmnd *cmd = ent->cmd;
1687		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1688		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1689
1690		if (esp->rev == ESP100)
1691			scsi_esp_cmd(esp, ESP_CMD_NULL);
1692
1693		if (write)
1694			ent->flags |= ESP_CMD_FLAG_WRITE;
1695		else
1696			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1697
1698		if (esp->ops->dma_length_limit)
1699			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1700							     dma_len);
1701		else
1702			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1703
1704		esp->data_dma_len = dma_len;
1705
1706		if (!dma_len) {
1707			printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1708			       esp->host->unique_id);
1709			printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1710			       esp->host->unique_id,
1711			       (unsigned long long)esp_cur_dma_addr(ent, cmd),
1712			       esp_cur_dma_len(ent, cmd));
1713			esp_schedule_reset(esp);
1714			return 0;
1715		}
1716
1717		esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1718				  "write(%d)\n",
1719				  (unsigned long long)dma_addr, dma_len, write);
1720
1721		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1722				       write, ESP_CMD_DMA | ESP_CMD_TI);
1723		esp_event(esp, ESP_EVENT_DATA_DONE);
1724		break;
1725	}
1726	case ESP_EVENT_DATA_DONE: {
1727		struct esp_cmd_entry *ent = esp->active_cmd;
1728		struct scsi_cmnd *cmd = ent->cmd;
1729		int bytes_sent;
1730
1731		if (esp->ops->dma_error(esp)) {
1732			printk("ESP: data done, DMA error, resetting\n");
1733			esp_schedule_reset(esp);
1734			return 0;
1735		}
1736
1737		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1738
1739			esp->ops->dma_drain(esp);
1740		}
1741		esp->ops->dma_invalidate(esp);
1742
1743		if (esp->ireg != ESP_INTR_BSERV) {
1744			/* We should always see exactly a bus-service
1745			 * interrupt at the end of a successful transfer.
1746			 */
1747			printk("ESP: data done, not BSERV, resetting\n");
1748			esp_schedule_reset(esp);
1749			return 0;
1750		}
1751
1752		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1753
1754		esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1755				 ent->flags, bytes_sent);
1756
1757		if (bytes_sent < 0) {
1758			esp_schedule_reset(esp);
1759			return 0;
1760		}
1761
1762		esp_advance_dma(esp, ent, cmd, bytes_sent);
1763		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1764		goto again;
1765	}
1766
1767	case ESP_EVENT_STATUS: {
1768		struct esp_cmd_entry *ent = esp->active_cmd;
1769
1770		if (esp->ireg & ESP_INTR_FDONE) {
1771			ent->status = esp_read8(ESP_FDATA);
1772			ent->message = esp_read8(ESP_FDATA);
1773			scsi_esp_cmd(esp, ESP_CMD_MOK);
1774		} else if (esp->ireg == ESP_INTR_BSERV) {
1775			ent->status = esp_read8(ESP_FDATA);
1776			ent->message = 0xff;
1777			esp_event(esp, ESP_EVENT_MSGIN);
1778			return 0;
1779		}
1780
1781		if (ent->message != COMMAND_COMPLETE) {
1782			printk("ESP: Unexpected message %x in status\n",
1783			       ent->message);
1784			esp_schedule_reset(esp);
1785			return 0;
1786		}
1787
1788		esp_event(esp, ESP_EVENT_FREE_BUS);
1789		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1790		break;
1791	}
1792	case ESP_EVENT_FREE_BUS: {
1793		struct esp_cmd_entry *ent = esp->active_cmd;
1794		struct scsi_cmnd *cmd = ent->cmd;
1795
1796		if (ent->message == COMMAND_COMPLETE ||
1797		    ent->message == DISCONNECT)
1798			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1799
1800		if (ent->message == COMMAND_COMPLETE) {
1801			esp_log_cmddone("ESP: Command done status[%x] "
1802					"message[%x]\n",
1803					ent->status, ent->message);
1804			if (ent->status == SAM_STAT_TASK_SET_FULL)
1805				esp_event_queue_full(esp, ent);
1806
1807			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1808			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1809				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1810				esp_autosense(esp, ent);
1811			} else {
1812				esp_cmd_is_done(esp, ent, cmd,
1813						compose_result(ent->status,
1814							       ent->message,
1815							       DID_OK));
1816			}
1817		} else if (ent->message == DISCONNECT) {
1818			esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1819					   "tag[%x:%x]\n",
1820					   cmd->device->id,
1821					   ent->tag[0], ent->tag[1]);
1822
1823			esp->active_cmd = NULL;
1824			esp_maybe_execute_command(esp);
1825		} else {
1826			printk("ESP: Unexpected message %x in freebus\n",
1827			       ent->message);
1828			esp_schedule_reset(esp);
1829			return 0;
1830		}
1831		if (esp->active_cmd)
1832			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1833		break;
1834	}
1835	case ESP_EVENT_MSGOUT: {
1836		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1837
1838		if (esp_debug & ESP_DEBUG_MSGOUT) {
1839			int i;
1840			printk("ESP: Sending message [ ");
1841			for (i = 0; i < esp->msg_out_len; i++)
1842				printk("%02x ", esp->msg_out[i]);
1843			printk("]\n");
1844		}
1845
1846		if (esp->rev == FASHME) {
1847			int i;
1848
1849			/* Always use the fifo.  */
1850			for (i = 0; i < esp->msg_out_len; i++) {
1851				esp_write8(esp->msg_out[i], ESP_FDATA);
1852				esp_write8(0, ESP_FDATA);
1853			}
1854			scsi_esp_cmd(esp, ESP_CMD_TI);
1855		} else {
1856			if (esp->msg_out_len == 1) {
1857				esp_write8(esp->msg_out[0], ESP_FDATA);
1858				scsi_esp_cmd(esp, ESP_CMD_TI);
1859			} else {
1860				/* Use DMA. */
1861				memcpy(esp->command_block,
1862				       esp->msg_out,
1863				       esp->msg_out_len);
1864
1865				esp->ops->send_dma_cmd(esp,
1866						       esp->command_block_dma,
1867						       esp->msg_out_len,
1868						       esp->msg_out_len,
1869						       0,
1870						       ESP_CMD_DMA|ESP_CMD_TI);
1871			}
1872		}
1873		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1874		break;
1875	}
1876	case ESP_EVENT_MSGOUT_DONE:
1877		if (esp->rev == FASHME) {
1878			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1879		} else {
1880			if (esp->msg_out_len > 1)
1881				esp->ops->dma_invalidate(esp);
1882		}
1883
1884		if (!(esp->ireg & ESP_INTR_DC)) {
1885			if (esp->rev != FASHME)
1886				scsi_esp_cmd(esp, ESP_CMD_NULL);
1887		}
1888		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1889		goto again;
1890	case ESP_EVENT_MSGIN:
1891		if (esp->ireg & ESP_INTR_BSERV) {
1892			if (esp->rev == FASHME) {
1893				if (!(esp_read8(ESP_STATUS2) &
1894				      ESP_STAT2_FEMPTY))
1895					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1896			} else {
1897				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898				if (esp->rev == ESP100)
1899					scsi_esp_cmd(esp, ESP_CMD_NULL);
1900			}
1901			scsi_esp_cmd(esp, ESP_CMD_TI);
1902			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1903			return 1;
1904		}
1905		if (esp->ireg & ESP_INTR_FDONE) {
1906			u8 val;
1907
1908			if (esp->rev == FASHME)
1909				val = esp->fifo[0];
1910			else
1911				val = esp_read8(ESP_FDATA);
1912			esp->msg_in[esp->msg_in_len++] = val;
1913
1914			esp_log_msgin("ESP: Got msgin byte %x\n", val);
1915
1916			if (!esp_msgin_process(esp))
1917				esp->msg_in_len = 0;
1918
1919			if (esp->rev == FASHME)
1920				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1921
1922			scsi_esp_cmd(esp, ESP_CMD_MOK);
1923
1924			if (esp->event != ESP_EVENT_FREE_BUS)
1925				esp_event(esp, ESP_EVENT_CHECK_PHASE);
1926		} else {
1927			printk("ESP: MSGIN neither BSERV not FDON, resetting");
1928			esp_schedule_reset(esp);
1929			return 0;
1930		}
1931		break;
1932	case ESP_EVENT_CMD_START:
1933		memcpy(esp->command_block, esp->cmd_bytes_ptr,
1934		       esp->cmd_bytes_left);
1935		if (esp->rev == FASHME)
1936			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1937		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1938				       esp->cmd_bytes_left, 16, 0,
1939				       ESP_CMD_DMA | ESP_CMD_TI);
1940		esp_event(esp, ESP_EVENT_CMD_DONE);
1941		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1942		break;
1943	case ESP_EVENT_CMD_DONE:
1944		esp->ops->dma_invalidate(esp);
1945		if (esp->ireg & ESP_INTR_BSERV) {
1946			esp_event(esp, ESP_EVENT_CHECK_PHASE);
1947			goto again;
1948		}
1949		esp_schedule_reset(esp);
1950		return 0;
1951		break;
1952
1953	case ESP_EVENT_RESET:
1954		scsi_esp_cmd(esp, ESP_CMD_RS);
1955		break;
1956
1957	default:
1958		printk("ESP: Unexpected event %x, resetting\n",
1959		       esp->event);
1960		esp_schedule_reset(esp);
1961		return 0;
1962		break;
1963	}
1964	return 1;
1965}
1966
1967static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1968{
1969	struct scsi_cmnd *cmd = ent->cmd;
1970
1971	esp_unmap_dma(esp, cmd);
1972	esp_free_lun_tag(ent, cmd->device->hostdata);
1973	cmd->result = DID_RESET << 16;
1974
1975	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1976		esp->ops->unmap_single(esp, ent->sense_dma,
1977				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1978		ent->sense_ptr = NULL;
1979	}
1980
1981	cmd->scsi_done(cmd);
1982	list_del(&ent->list);
1983	esp_put_ent(esp, ent);
1984}
1985
1986static void esp_clear_hold(struct scsi_device *dev, void *data)
1987{
1988	struct esp_lun_data *lp = dev->hostdata;
1989
1990	BUG_ON(lp->num_tagged);
1991	lp->hold = 0;
1992}
1993
1994static void esp_reset_cleanup(struct esp *esp)
1995{
1996	struct esp_cmd_entry *ent, *tmp;
1997	int i;
1998
1999	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2000		struct scsi_cmnd *cmd = ent->cmd;
2001
2002		list_del(&ent->list);
2003		cmd->result = DID_RESET << 16;
2004		cmd->scsi_done(cmd);
2005		esp_put_ent(esp, ent);
2006	}
2007
2008	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2009		if (ent == esp->active_cmd)
2010			esp->active_cmd = NULL;
2011		esp_reset_cleanup_one(esp, ent);
2012	}
2013
2014	BUG_ON(esp->active_cmd != NULL);
2015
2016	/* Force renegotiation of sync/wide transfers.  */
2017	for (i = 0; i < ESP_MAX_TARGET; i++) {
2018		struct esp_target_data *tp = &esp->target[i];
2019
2020		tp->esp_period = 0;
2021		tp->esp_offset = 0;
2022		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2023				     ESP_CONFIG3_FSCSI |
2024				     ESP_CONFIG3_FAST);
2025		tp->flags &= ~ESP_TGT_WIDE;
2026		tp->flags |= ESP_TGT_CHECK_NEGO;
2027
2028		if (tp->starget)
2029			__starget_for_each_device(tp->starget, NULL,
2030						  esp_clear_hold);
2031	}
2032	esp->flags &= ~ESP_FLAG_RESETTING;
2033}
2034
2035/* Runs under host->lock */
2036static void __esp_interrupt(struct esp *esp)
2037{
2038	int finish_reset, intr_done;
2039	u8 phase;
2040
2041	esp->sreg = esp_read8(ESP_STATUS);
2042
2043	if (esp->flags & ESP_FLAG_RESETTING) {
2044		finish_reset = 1;
2045	} else {
2046		if (esp_check_gross_error(esp))
2047			return;
2048
2049		finish_reset = esp_check_spur_intr(esp);
2050		if (finish_reset < 0)
2051			return;
2052	}
2053
2054	esp->ireg = esp_read8(ESP_INTRPT);
2055
2056	if (esp->ireg & ESP_INTR_SR)
2057		finish_reset = 1;
2058
2059	if (finish_reset) {
2060		esp_reset_cleanup(esp);
2061		if (esp->eh_reset) {
2062			complete(esp->eh_reset);
2063			esp->eh_reset = NULL;
2064		}
2065		return;
2066	}
2067
2068	phase = (esp->sreg & ESP_STAT_PMASK);
2069	if (esp->rev == FASHME) {
2070		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2071		     esp->select_state == ESP_SELECT_NONE &&
2072		     esp->event != ESP_EVENT_STATUS &&
2073		     esp->event != ESP_EVENT_DATA_DONE) ||
2074		    (esp->ireg & ESP_INTR_RSEL)) {
2075			esp->sreg2 = esp_read8(ESP_STATUS2);
2076			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2077			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2078				hme_read_fifo(esp);
2079		}
2080	}
2081
2082	esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2083		     "sreg2[%02x] ireg[%02x]\n",
2084		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2085
2086	intr_done = 0;
2087
2088	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2089		printk("ESP: unexpected IREG %02x\n", esp->ireg);
2090		if (esp->ireg & ESP_INTR_IC)
2091			esp_dump_cmd_log(esp);
2092
2093		esp_schedule_reset(esp);
2094	} else {
2095		if (!(esp->ireg & ESP_INTR_RSEL)) {
2096			/* Some combination of FDONE, BSERV, DC.  */
2097			if (esp->select_state != ESP_SELECT_NONE)
2098				intr_done = esp_finish_select(esp);
2099		} else if (esp->ireg & ESP_INTR_RSEL) {
2100			if (esp->active_cmd)
2101				(void) esp_finish_select(esp);
2102			intr_done = esp_reconnect(esp);
2103		}
2104	}
2105	while (!intr_done)
2106		intr_done = esp_process_event(esp);
2107}
2108
2109irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2110{
2111	struct esp *esp = dev_id;
2112	unsigned long flags;
2113	irqreturn_t ret;
2114
2115	spin_lock_irqsave(esp->host->host_lock, flags);
2116	ret = IRQ_NONE;
2117	if (esp->ops->irq_pending(esp)) {
2118		ret = IRQ_HANDLED;
2119		for (;;) {
2120			int i;
2121
2122			__esp_interrupt(esp);
2123			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2124				break;
2125			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2126
2127			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2128				if (esp->ops->irq_pending(esp))
2129					break;
2130			}
2131			if (i == ESP_QUICKIRQ_LIMIT)
2132				break;
2133		}
2134	}
2135	spin_unlock_irqrestore(esp->host->host_lock, flags);
2136
2137	return ret;
2138}
2139EXPORT_SYMBOL(scsi_esp_intr);
2140
2141static void esp_get_revision(struct esp *esp)
2142{
2143	u8 val;
2144
2145	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2146	esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2147	esp_write8(esp->config2, ESP_CFG2);
2148
2149	val = esp_read8(ESP_CFG2);
2150	val &= ~ESP_CONFIG2_MAGIC;
2151	if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2152		/* If what we write to cfg2 does not come back, cfg2 is not
2153		 * implemented, therefore this must be a plain esp100.
2154		 */
2155		esp->rev = ESP100;
2156	} else {
2157		esp->config2 = 0;
2158		esp_set_all_config3(esp, 5);
2159		esp->prev_cfg3 = 5;
2160		esp_write8(esp->config2, ESP_CFG2);
2161		esp_write8(0, ESP_CFG3);
2162		esp_write8(esp->prev_cfg3, ESP_CFG3);
2163
2164		val = esp_read8(ESP_CFG3);
2165		if (val != 5) {
2166			/* The cfg2 register is implemented, however
2167			 * cfg3 is not, must be esp100a.
2168			 */
2169			esp->rev = ESP100A;
2170		} else {
2171			esp_set_all_config3(esp, 0);
2172			esp->prev_cfg3 = 0;
2173			esp_write8(esp->prev_cfg3, ESP_CFG3);
2174
2175			/* All of cfg{1,2,3} implemented, must be one of
2176			 * the fas variants, figure out which one.
2177			 */
2178			if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2179				esp->rev = FAST;
2180				esp->sync_defp = SYNC_DEFP_FAST;
2181			} else {
2182				esp->rev = ESP236;
2183			}
2184			esp->config2 = 0;
2185			esp_write8(esp->config2, ESP_CFG2);
2186		}
2187	}
2188}
2189
2190static void esp_init_swstate(struct esp *esp)
2191{
2192	int i;
2193
2194	INIT_LIST_HEAD(&esp->queued_cmds);
2195	INIT_LIST_HEAD(&esp->active_cmds);
2196	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2197
2198	/* Start with a clear state, domain validation (via ->slave_configure,
2199	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2200	 * commands.
2201	 */
2202	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2203		esp->target[i].flags = 0;
2204		esp->target[i].nego_goal_period = 0;
2205		esp->target[i].nego_goal_offset = 0;
2206		esp->target[i].nego_goal_width = 0;
2207		esp->target[i].nego_goal_tags = 0;
2208	}
2209}
2210
2211/* This places the ESP into a known state at boot time. */
2212static void esp_bootup_reset(struct esp *esp)
2213{
2214	u8 val;
2215
2216	/* Reset the DMA */
2217	esp->ops->reset_dma(esp);
2218
2219	/* Reset the ESP */
2220	esp_reset_esp(esp);
2221
2222	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2223	val = esp_read8(ESP_CFG1);
2224	val |= ESP_CONFIG1_SRRDISAB;
2225	esp_write8(val, ESP_CFG1);
2226
2227	scsi_esp_cmd(esp, ESP_CMD_RS);
2228	udelay(400);
2229
2230	esp_write8(esp->config1, ESP_CFG1);
2231
2232	/* Eat any bitrot in the chip and we are done... */
2233	esp_read8(ESP_INTRPT);
2234}
2235
2236static void esp_set_clock_params(struct esp *esp)
2237{
2238	int fhz;
2239	u8 ccf;
2240
2241	/* This is getting messy but it has to be done correctly or else
2242	 * you get weird behavior all over the place.  We are trying to
2243	 * basically figure out three pieces of information.
2244	 *
2245	 * a) Clock Conversion Factor
2246	 *
2247	 *    This is a representation of the input crystal clock frequency
2248	 *    going into the ESP on this machine.  Any operation whose timing
2249	 *    is longer than 400ns depends on this value being correct.  For
2250	 *    example, you'll get blips for arbitration/selection during high
2251	 *    load or with multiple targets if this is not set correctly.
2252	 *
2253	 * b) Selection Time-Out
2254	 *
2255	 *    The ESP isn't very bright and will arbitrate for the bus and try
2256	 *    to select a target forever if you let it.  This value tells the
2257	 *    ESP when it has taken too long to negotiate and that it should
2258	 *    interrupt the CPU so we can see what happened.  The value is
2259	 *    computed as follows (from NCR/Symbios chip docs).
2260	 *
2261	 *          (Time Out Period) *  (Input Clock)
2262	 *    STO = ----------------------------------
2263	 *          (8192) * (Clock Conversion Factor)
2264	 *
2265	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2266	 *
2267	 * c) Imperical constants for synchronous offset and transfer period
2268         *    register values
2269	 *
2270	 *    This entails the smallest and largest sync period we could ever
2271	 *    handle on this ESP.
2272	 */
2273	fhz = esp->cfreq;
2274
2275	ccf = ((fhz / 1000000) + 4) / 5;
2276	if (ccf == 1)
2277		ccf = 2;
2278
2279	/* If we can't find anything reasonable, just assume 20MHZ.
2280	 * This is the clock frequency of the older sun4c's where I've
2281	 * been unable to find the clock-frequency PROM property.  All
2282	 * other machines provide useful values it seems.
2283	 */
2284	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2285		fhz = 20000000;
2286		ccf = 4;
2287	}
2288
2289	esp->cfact = (ccf == 8 ? 0 : ccf);
2290	esp->cfreq = fhz;
2291	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2292	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2293	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2294	esp->sync_defp = SYNC_DEFP_SLOW;
2295}
2296
2297static const char *esp_chip_names[] = {
2298	"ESP100",
2299	"ESP100A",
2300	"ESP236",
2301	"FAS236",
2302	"FAS100A",
2303	"FAST",
2304	"FASHME",
2305};
2306
2307static struct scsi_transport_template *esp_transport_template;
2308
2309int scsi_esp_register(struct esp *esp, struct device *dev)
2310{
2311	static int instance;
2312	int err;
2313
2314	esp->host->transportt = esp_transport_template;
2315	esp->host->max_lun = ESP_MAX_LUN;
2316	esp->host->cmd_per_lun = 2;
2317	esp->host->unique_id = instance;
2318
2319	esp_set_clock_params(esp);
2320
2321	esp_get_revision(esp);
2322
2323	esp_init_swstate(esp);
2324
2325	esp_bootup_reset(esp);
2326
2327	printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2328	       esp->host->unique_id, esp->regs, esp->dma_regs,
2329	       esp->host->irq);
2330	printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2331	       esp->host->unique_id, esp_chip_names[esp->rev],
2332	       esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2333
2334	/* Let the SCSI bus reset settle. */
2335	ssleep(esp_bus_reset_settle);
2336
2337	err = scsi_add_host(esp->host, dev);
2338	if (err)
2339		return err;
2340
2341	instance++;
2342
2343	scsi_scan_host(esp->host);
2344
2345	return 0;
2346}
2347EXPORT_SYMBOL(scsi_esp_register);
2348
2349void scsi_esp_unregister(struct esp *esp)
2350{
2351	scsi_remove_host(esp->host);
2352}
2353EXPORT_SYMBOL(scsi_esp_unregister);
2354
2355static int esp_target_alloc(struct scsi_target *starget)
2356{
2357	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2358	struct esp_target_data *tp = &esp->target[starget->id];
2359
2360	tp->starget = starget;
2361
2362	return 0;
2363}
2364
2365static void esp_target_destroy(struct scsi_target *starget)
2366{
2367	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2368	struct esp_target_data *tp = &esp->target[starget->id];
2369
2370	tp->starget = NULL;
2371}
2372
2373static int esp_slave_alloc(struct scsi_device *dev)
2374{
2375	struct esp *esp = shost_priv(dev->host);
2376	struct esp_target_data *tp = &esp->target[dev->id];
2377	struct esp_lun_data *lp;
2378
2379	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2380	if (!lp)
2381		return -ENOMEM;
2382	dev->hostdata = lp;
2383
2384	spi_min_period(tp->starget) = esp->min_period;
2385	spi_max_offset(tp->starget) = 15;
2386
2387	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2388		spi_max_width(tp->starget) = 1;
2389	else
2390		spi_max_width(tp->starget) = 0;
2391
2392	return 0;
2393}
2394
2395static int esp_slave_configure(struct scsi_device *dev)
2396{
2397	struct esp *esp = shost_priv(dev->host);
2398	struct esp_target_data *tp = &esp->target[dev->id];
2399	int goal_tags, queue_depth;
2400
2401	goal_tags = 0;
2402
2403	if (dev->tagged_supported) {
2404		goal_tags = ESP_DEFAULT_TAGS;
2405
2406		if (goal_tags > ESP_MAX_TAG)
2407			goal_tags = ESP_MAX_TAG;
2408	}
2409
2410	queue_depth = goal_tags;
2411	if (queue_depth < dev->host->cmd_per_lun)
2412		queue_depth = dev->host->cmd_per_lun;
2413
2414	if (goal_tags) {
2415		scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2416		scsi_activate_tcq(dev, queue_depth);
2417	} else {
2418		scsi_deactivate_tcq(dev, queue_depth);
2419	}
2420	tp->flags |= ESP_TGT_DISCONNECT;
2421
2422	if (!spi_initial_dv(dev->sdev_target))
2423		spi_dv_device(dev);
2424
2425	return 0;
2426}
2427
2428static void esp_slave_destroy(struct scsi_device *dev)
2429{
2430	struct esp_lun_data *lp = dev->hostdata;
2431
2432	kfree(lp);
2433	dev->hostdata = NULL;
2434}
2435
2436static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2437{
2438	struct esp *esp = shost_priv(cmd->device->host);
2439	struct esp_cmd_entry *ent, *tmp;
2440	struct completion eh_done;
2441	unsigned long flags;
2442
2443	spin_lock_irqsave(esp->host->host_lock, flags);
2444	printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2445	       esp->host->unique_id, cmd, cmd->cmnd[0]);
2446	ent = esp->active_cmd;
2447	if (ent)
2448		printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2449		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2450	list_for_each_entry(ent, &esp->queued_cmds, list) {
2451		printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2452		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2453	}
2454	list_for_each_entry(ent, &esp->active_cmds, list) {
2455		printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2456		       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2457	}
2458	esp_dump_cmd_log(esp);
2459	spin_unlock_irqrestore(esp->host->host_lock, flags);
2460
2461	spin_lock_irqsave(esp->host->host_lock, flags);
2462
2463	ent = NULL;
2464	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2465		if (tmp->cmd == cmd) {
2466			ent = tmp;
2467			break;
2468		}
2469	}
2470
2471	if (ent) {
2472		/* Easiest case, we didn't even issue the command
2473		 * yet so it is trivial to abort.
2474		 */
2475		list_del(&ent->list);
2476
2477		cmd->result = DID_ABORT << 16;
2478		cmd->scsi_done(cmd);
2479
2480		esp_put_ent(esp, ent);
2481
2482		goto out_success;
2483	}
2484
2485	init_completion(&eh_done);
2486
2487	ent = esp->active_cmd;
2488	if (ent && ent->cmd == cmd) {
2489		/* Command is the currently active command on
2490		 * the bus.  If we already have an output message
2491		 * pending, no dice.
2492		 */
2493		if (esp->msg_out_len)
2494			goto out_failure;
2495
2496		/* Send out an abort, encouraging the target to
2497		 * go to MSGOUT phase by asserting ATN.
2498		 */
2499		esp->msg_out[0] = ABORT_TASK_SET;
2500		esp->msg_out_len = 1;
2501		ent->eh_done = &eh_done;
2502
2503		scsi_esp_cmd(esp, ESP_CMD_SATN);
2504	} else {
2505		/* The command is disconnected.  This is not easy to
2506		 * abort.  For now we fail and let the scsi error
2507		 * handling layer go try a scsi bus reset or host
2508		 * reset.
2509		 *
2510		 * What we could do is put together a scsi command
2511		 * solely for the purpose of sending an abort message
2512		 * to the target.  Coming up with all the code to
2513		 * cook up scsi commands, special case them everywhere,
2514		 * etc. is for questionable gain and it would be better
2515		 * if the generic scsi error handling layer could do at
2516		 * least some of that for us.
2517		 *
2518		 * Anyways this is an area for potential future improvement
2519		 * in this driver.
2520		 */
2521		goto out_failure;
2522	}
2523
2524	spin_unlock_irqrestore(esp->host->host_lock, flags);
2525
2526	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2527		spin_lock_irqsave(esp->host->host_lock, flags);
2528		ent->eh_done = NULL;
2529		spin_unlock_irqrestore(esp->host->host_lock, flags);
2530
2531		return FAILED;
2532	}
2533
2534	return SUCCESS;
2535
2536out_success:
2537	spin_unlock_irqrestore(esp->host->host_lock, flags);
2538	return SUCCESS;
2539
2540out_failure:
2541	spin_unlock_irqrestore(esp->host->host_lock, flags);
2542	return FAILED;
2543}
2544
2545static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2546{
2547	struct esp *esp = shost_priv(cmd->device->host);
2548	struct completion eh_reset;
2549	unsigned long flags;
2550
2551	init_completion(&eh_reset);
2552
2553	spin_lock_irqsave(esp->host->host_lock, flags);
2554
2555	esp->eh_reset = &eh_reset;
2556
2557	esp->flags |= ESP_FLAG_RESETTING;
2558	scsi_esp_cmd(esp, ESP_CMD_RS);
2559
2560	spin_unlock_irqrestore(esp->host->host_lock, flags);
2561
2562	ssleep(esp_bus_reset_settle);
2563
2564	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2565		spin_lock_irqsave(esp->host->host_lock, flags);
2566		esp->eh_reset = NULL;
2567		spin_unlock_irqrestore(esp->host->host_lock, flags);
2568
2569		return FAILED;
2570	}
2571
2572	return SUCCESS;
2573}
2574
2575/* All bets are off, reset the entire device.  */
2576static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2577{
2578	struct esp *esp = shost_priv(cmd->device->host);
2579	unsigned long flags;
2580
2581	spin_lock_irqsave(esp->host->host_lock, flags);
2582	esp_bootup_reset(esp);
2583	esp_reset_cleanup(esp);
2584	spin_unlock_irqrestore(esp->host->host_lock, flags);
2585
2586	ssleep(esp_bus_reset_settle);
2587
2588	return SUCCESS;
2589}
2590
2591static const char *esp_info(struct Scsi_Host *host)
2592{
2593	return "esp";
2594}
2595
2596struct scsi_host_template scsi_esp_template = {
2597	.module			= THIS_MODULE,
2598	.name			= "esp",
2599	.info			= esp_info,
2600	.queuecommand		= esp_queuecommand,
2601	.target_alloc		= esp_target_alloc,
2602	.target_destroy		= esp_target_destroy,
2603	.slave_alloc		= esp_slave_alloc,
2604	.slave_configure	= esp_slave_configure,
2605	.slave_destroy		= esp_slave_destroy,
2606	.eh_abort_handler	= esp_eh_abort_handler,
2607	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2608	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2609	.can_queue		= 7,
2610	.this_id		= 7,
2611	.sg_tablesize		= SG_ALL,
2612	.use_clustering		= ENABLE_CLUSTERING,
2613	.max_sectors		= 0xffff,
2614	.skip_settle_delay	= 1,
2615};
2616EXPORT_SYMBOL(scsi_esp_template);
2617
2618static void esp_get_signalling(struct Scsi_Host *host)
2619{
2620	struct esp *esp = shost_priv(host);
2621	enum spi_signal_type type;
2622
2623	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2624		type = SPI_SIGNAL_HVD;
2625	else
2626		type = SPI_SIGNAL_SE;
2627
2628	spi_signalling(host) = type;
2629}
2630
2631static void esp_set_offset(struct scsi_target *target, int offset)
2632{
2633	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2634	struct esp *esp = shost_priv(host);
2635	struct esp_target_data *tp = &esp->target[target->id];
2636
2637	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2638		tp->nego_goal_offset = 0;
2639	else
2640		tp->nego_goal_offset = offset;
2641	tp->flags |= ESP_TGT_CHECK_NEGO;
2642}
2643
2644static void esp_set_period(struct scsi_target *target, int period)
2645{
2646	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2647	struct esp *esp = shost_priv(host);
2648	struct esp_target_data *tp = &esp->target[target->id];
2649
2650	tp->nego_goal_period = period;
2651	tp->flags |= ESP_TGT_CHECK_NEGO;
2652}
2653
2654static void esp_set_width(struct scsi_target *target, int width)
2655{
2656	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2657	struct esp *esp = shost_priv(host);
2658	struct esp_target_data *tp = &esp->target[target->id];
2659
2660	tp->nego_goal_width = (width ? 1 : 0);
2661	tp->flags |= ESP_TGT_CHECK_NEGO;
2662}
2663
2664static struct spi_function_template esp_transport_ops = {
2665	.set_offset		= esp_set_offset,
2666	.show_offset		= 1,
2667	.set_period		= esp_set_period,
2668	.show_period		= 1,
2669	.set_width		= esp_set_width,
2670	.show_width		= 1,
2671	.get_signalling		= esp_get_signalling,
2672};
2673
2674static int __init esp_init(void)
2675{
2676	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2677		     sizeof(struct esp_cmd_priv));
2678
2679	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2680	if (!esp_transport_template)
2681		return -ENODEV;
2682
2683	return 0;
2684}
2685
2686static void __exit esp_exit(void)
2687{
2688	spi_release_transport(esp_transport_template);
2689}
2690
2691MODULE_DESCRIPTION("ESP SCSI driver core");
2692MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2693MODULE_LICENSE("GPL");
2694MODULE_VERSION(DRV_VERSION);
2695
2696module_param(esp_bus_reset_settle, int, 0);
2697MODULE_PARM_DESC(esp_bus_reset_settle,
2698		 "ESP scsi bus reset delay in seconds");
2699
2700module_param(esp_debug, int, 0);
2701MODULE_PARM_DESC(esp_debug,
2702"ESP bitmapped debugging message enable value:\n"
2703"	0x00000001	Log interrupt events\n"
2704"	0x00000002	Log scsi commands\n"
2705"	0x00000004	Log resets\n"
2706"	0x00000008	Log message in events\n"
2707"	0x00000010	Log message out events\n"
2708"	0x00000020	Log command completion\n"
2709"	0x00000040	Log disconnects\n"
2710"	0x00000080	Log data start\n"
2711"	0x00000100	Log data done\n"
2712"	0x00000200	Log reconnects\n"
2713"	0x00000400	Log auto-sense data\n"
2714);
2715
2716module_init(esp_init);
2717module_exit(esp_exit);
2718