esiop.c revision 1.8
1/*	$NetBSD: esiop.c,v 1.8 2002/04/24 09:43:14 bouyer Exp $	*/
2
3/*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.8 2002/04/24 09:43:14 bouyer Exp $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/device.h>
41#include <sys/malloc.h>
42#include <sys/buf.h>
43#include <sys/kernel.h>
44
45#include <uvm/uvm_extern.h>
46
47#include <machine/endian.h>
48#include <machine/bus.h>
49
50#include <dev/microcode/siop/esiop.out>
51
52#include <dev/scsipi/scsi_all.h>
53#include <dev/scsipi/scsi_message.h>
54#include <dev/scsipi/scsipi_all.h>
55
56#include <dev/scsipi/scsiconf.h>
57
58#include <dev/ic/siopreg.h>
59#include <dev/ic/siopvar_common.h>
60#include <dev/ic/esiopvar.h>
61
62#include "opt_siop.h"
63
64#ifndef DEBUG
65#undef DEBUG
66#endif
67#undef SIOP_DEBUG
68#undef SIOP_DEBUG_DR
69#undef SIOP_DEBUG_INTR
70#undef SIOP_DEBUG_SCHED
71#undef DUMP_SCRIPT
72
73#define SIOP_STATS
74
75#ifndef SIOP_DEFAULT_TARGET
76#define SIOP_DEFAULT_TARGET 7
77#endif
78
79/* number of cmd descriptors per block */
80#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82void	esiop_reset __P((struct esiop_softc *));
83void	esiop_checkdone __P((struct esiop_softc *));
84void	esiop_handle_reset __P((struct esiop_softc *));
85void	esiop_scsicmd_end __P((struct esiop_cmd *));
86void	esiop_unqueue __P((struct esiop_softc *, int, int));
87int	esiop_handle_qtag_reject __P((struct esiop_cmd *));
88static void	esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89void 	esiop_timeout __P((void *));
90int	esiop_scsicmd __P((struct scsipi_xfer *));
91void	esiop_scsipi_request __P((struct scsipi_channel *,
92			scsipi_adapter_req_t, void *));
93void	esiop_dump_script __P((struct esiop_softc *));
94void	esiop_morecbd __P((struct esiop_softc *));
95void	esiop_moretagtbl __P((struct esiop_softc *));
96void	siop_add_reselsw __P((struct esiop_softc *, int));
97struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
98void	esiop_target_register __P((struct esiop_softc *, u_int32_t));
99
100static int nintr = 0;
101
102#ifdef SIOP_STATS
103static int esiop_stat_intr = 0;
104static int esiop_stat_intr_shortxfer = 0;
105static int esiop_stat_intr_sdp = 0;
106static int esiop_stat_intr_done = 0;
107static int esiop_stat_intr_xferdisc = 0;
108static int esiop_stat_intr_lunresel = 0;
109static int esiop_stat_intr_qfull = 0;
110void esiop_printstats __P((void));
111#define INCSTAT(x) x++
112#else
113#define INCSTAT(x)
114#endif
115
116static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117static __inline__ void
118esiop_script_sync(sc, ops)
119	struct esiop_softc *sc;
120	int ops;
121{
122	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124		    PAGE_SIZE, ops);
125}
126
127static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128static __inline__ u_int32_t
129esiop_script_read(sc, offset)
130	struct esiop_softc *sc;
131	u_int offset;
132{
133	if (sc->sc_c.features & SF_CHIP_RAM) {
134		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135		    offset * 4);
136	} else {
137		return le32toh(sc->sc_c.sc_script[offset]);
138	}
139}
140
141static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142	u_int32_t));
143static __inline__ void
144esiop_script_write(sc, offset, val)
145	struct esiop_softc *sc;
146	u_int offset;
147	u_int32_t val;
148{
149	if (sc->sc_c.features & SF_CHIP_RAM) {
150		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151		    offset * 4, val);
152	} else {
153		sc->sc_c.sc_script[offset] = htole32(val);
154	}
155}
156
157void
158esiop_attach(sc)
159	struct esiop_softc *sc;
160{
161	if (siop_common_attach(&sc->sc_c) != 0 )
162		return;
163
164	TAILQ_INIT(&sc->free_list);
165	TAILQ_INIT(&sc->cmds);
166	TAILQ_INIT(&sc->free_tagtbl);
167	TAILQ_INIT(&sc->tag_tblblk);
168	sc->sc_currschedslot = 0;
169#ifdef SIOP_DEBUG
170	printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
171	    sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
172	    (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
173#endif
174
175	sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
176	sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
177
178	/* Do a bus reset, so that devices fall back to narrow/async */
179	siop_resetbus(&sc->sc_c);
180	/*
181	 * siop_reset() will reset the chip, thus clearing pending interrupts
182	 */
183	esiop_reset(sc);
184#ifdef DUMP_SCRIPT
185	esiop_dump_script(sc);
186#endif
187
188	config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
189}
190
191void
192esiop_reset(sc)
193	struct esiop_softc *sc;
194{
195	int i, j;
196	u_int32_t addr;
197	u_int32_t msgin_addr;
198
199	siop_common_reset(&sc->sc_c);
200
201	/*
202	 * we copy the script at the beggining of RAM. Then there is 8 bytes
203	 * for messages in.
204	 */
205	sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
206	msgin_addr =
207	    sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
208	sc->sc_free_offset += 2;
209	/* then we have the scheduler ring */
210	sc->sc_shedoffset = sc->sc_free_offset;
211	sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
212	/* then the targets DSA table */
213	sc->sc_target_table_offset = sc->sc_free_offset;
214	sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
215	/* copy and patch the script */
216	if (sc->sc_c.features & SF_CHIP_RAM) {
217		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
218		    esiop_script,
219		    sizeof(esiop_script) / sizeof(esiop_script[0]));
220		for (j = 0; j <
221		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
222		    j++) {
223			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
224			    E_tlq_offset_Used[j] * 4,
225			    sizeof(struct siop_common_xfer));
226		}
227		for (j = 0; j <
228		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
229		    j++) {
230			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
231			    E_abs_msgin2_Used[j] * 4, msgin_addr);
232		}
233
234		if (sc->sc_c.features & SF_CHIP_LED0) {
235			bus_space_write_region_4(sc->sc_c.sc_ramt,
236			    sc->sc_c.sc_ramh,
237			    Ent_led_on1, esiop_led_on,
238			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
239			bus_space_write_region_4(sc->sc_c.sc_ramt,
240			    sc->sc_c.sc_ramh,
241			    Ent_led_on2, esiop_led_on,
242			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
243			bus_space_write_region_4(sc->sc_c.sc_ramt,
244			    sc->sc_c.sc_ramh,
245			    Ent_led_off, esiop_led_off,
246			    sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
247		}
248	} else {
249		for (j = 0;
250		    j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
251			sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
252		}
253		for (j = 0; j <
254		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
255		    j++) {
256			sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
257			    htole32(sizeof(struct siop_common_xfer));
258		}
259		for (j = 0; j <
260		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
261		    j++) {
262			sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
263			    htole32(msgin_addr);
264		}
265
266		if (sc->sc_c.features & SF_CHIP_LED0) {
267			for (j = 0; j < (sizeof(esiop_led_on) /
268			    sizeof(esiop_led_on[0])); j++)
269				sc->sc_c.sc_script[
270				    Ent_led_on1 / sizeof(esiop_led_on[0]) + j
271				    ] = htole32(esiop_led_on[j]);
272			for (j = 0; j < (sizeof(esiop_led_on) /
273			    sizeof(esiop_led_on[0])); j++)
274				sc->sc_c.sc_script[
275				    Ent_led_on2 / sizeof(esiop_led_on[0]) + j
276				    ] = htole32(esiop_led_on[j]);
277			for (j = 0; j < (sizeof(esiop_led_off) /
278			    sizeof(esiop_led_off[0])); j++)
279				sc->sc_c.sc_script[
280				   Ent_led_off / sizeof(esiop_led_off[0]) + j
281				   ] = htole32(esiop_led_off[j]);
282		}
283	}
284	/* get base of scheduler ring */
285	addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
286	/* init scheduler */
287	for (i = 0; i < A_ncmd_slots; i++) {
288		esiop_script_write(sc,
289		    sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
290	}
291	sc->sc_currschedslot = 0;
292	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
293	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
294	/*
295	 * 0x78000000 is a 'move data8 to reg'. data8 is the second
296	 * octet, reg offset is the third.
297	 */
298	esiop_script_write(sc, Ent_cmdr0 / 4,
299	    0x78640000 | ((addr & 0x000000ff) <<  8));
300	esiop_script_write(sc, Ent_cmdr1 / 4,
301	    0x78650000 | ((addr & 0x0000ff00)      ));
302	esiop_script_write(sc, Ent_cmdr2 / 4,
303	    0x78660000 | ((addr & 0x00ff0000) >>  8));
304	esiop_script_write(sc, Ent_cmdr3 / 4,
305	    0x78670000 | ((addr & 0xff000000) >> 16));
306	/* set flags */
307	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
308	/* write pointer of base of target DSA table */
309	addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
310	    sc->sc_c.sc_scriptaddr;
311	esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
312	    esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
313	    ((addr & 0x000000ff) <<  8));
314	esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
315	    esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
316	    ((addr & 0x0000ff00)      ));
317	esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
318	    esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
319	    ((addr & 0x00ff0000) >>  8));
320	esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
321	    esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
322	    ((addr & 0xff000000) >> 16));
323#ifdef SIOP_DEBUG
324	printf("%s: target table offset %d free offset %d\n",
325	    sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
326	    sc->sc_free_offset);
327#endif
328
329	/* register existing targets */
330	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
331		if (sc->sc_c.targets[i])
332			esiop_target_register(sc, i);
333	}
334	/* start script */
335	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
336		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
337		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
338	}
339	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
340	    sc->sc_c.sc_scriptaddr + Ent_reselect);
341}
342
343#if 0
344#define CALL_SCRIPT(ent) do {\
345	printf ("start script DSA 0x%lx DSP 0x%lx\n", \
346	    esiop_cmd->cmd_c.dsa, \
347	    sc->sc_c.sc_scriptaddr + ent); \
348bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
349} while (0)
350#else
351#define CALL_SCRIPT(ent) do {\
352bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
353} while (0)
354#endif
355
356int
357esiop_intr(v)
358	void *v;
359{
360	struct esiop_softc *sc = v;
361	struct esiop_target *esiop_target;
362	struct esiop_cmd *esiop_cmd;
363	struct esiop_lun *esiop_lun;
364	struct scsipi_xfer *xs;
365	int istat, sist, sstat1, dstat;
366	u_int32_t irqcode;
367	int need_reset = 0;
368	int offset, target, lun, tag;
369	u_int32_t tflags;
370	u_int32_t addr;
371	int freetarget = 0;
372	int restart = 0;
373	int slot;
374	int retval = 0;
375
376again:
377	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
378	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
379		if (istat & ISTAT_SEM) {
380			bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
381			    SIOP_ISTAT, (istat & ~ISTAT_SEM));
382			esiop_checkdone(sc);
383		}
384		return retval;
385	}
386	retval = 1;
387	nintr++;
388	if (nintr > 100) {
389		panic("esiop: intr loop");
390	}
391	INCSTAT(esiop_stat_intr);
392	if (istat & ISTAT_INTF) {
393		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
394		    SIOP_ISTAT, ISTAT_INTF);
395		esiop_checkdone(sc);
396		goto again;
397	}
398	/* get CMD from T/L/Q */
399	tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
400	    SIOP_SCRATCHC);
401#ifdef SIOP_DEBUG_INTR
402		printf("interrupt, istat=0x%x tflags=0x%x "
403		    "DSA=0x%x DSP=0x%lx\n", istat, tflags,
404		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
405		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
406			SIOP_DSP) -
407		    sc->sc_c.sc_scriptaddr));
408#endif
409	target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
410	if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
411	lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
412	if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
413	tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
414
415	if (target >= 0 && lun >= 0) {
416		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
417		if (esiop_target == NULL) {
418			printf("esiop_target (target %d) not valid\n", target);
419			goto none;
420		}
421		esiop_lun = esiop_target->esiop_lun[lun];
422		if (esiop_lun == NULL) {
423			printf("esiop_lun (target %d lun %d) not valid\n",
424			    target, lun);
425			goto none;
426		}
427		esiop_cmd =
428		    (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
429		if (esiop_cmd == NULL) {
430			printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
431			    target, lun, tag);
432			goto none;
433		}
434		xs = esiop_cmd->cmd_c.xs;
435#ifdef DIAGNOSTIC
436		if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
437 			printf("esiop_cmd (target %d lun %d) "
438			    "not active (%d)\n", target, lun,
439			    esiop_cmd->cmd_c.status);
440			goto none;
441		}
442#endif
443		esiop_table_sync(esiop_cmd,
444		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
445	} else {
446none:
447		xs = NULL;
448		esiop_target = NULL;
449		esiop_lun = NULL;
450		esiop_cmd = NULL;
451	}
452	if (istat & ISTAT_DIP) {
453		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
454		    SIOP_DSTAT);
455		if (dstat & DSTAT_SSI) {
456			printf("single step dsp 0x%08x dsa 0x08%x\n",
457			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
458			    sc->sc_c.sc_rh, SIOP_DSP) -
459			    sc->sc_c.sc_scriptaddr),
460			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
461				SIOP_DSA));
462			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
463			    (istat & ISTAT_SIP) == 0) {
464				bus_space_write_1(sc->sc_c.sc_rt,
465				    sc->sc_c.sc_rh, SIOP_DCNTL,
466				    bus_space_read_1(sc->sc_c.sc_rt,
467				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
468			}
469			return 1;
470		}
471		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
472		printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
473		if (dstat & DSTAT_IID)
474			printf(" Illegal instruction");
475		if (dstat & DSTAT_ABRT)
476			printf(" abort");
477		if (dstat & DSTAT_BF)
478			printf(" bus fault");
479		if (dstat & DSTAT_MDPE)
480			printf(" parity");
481		if (dstat & DSTAT_DFE)
482			printf(" dma fifo empty");
483		printf(", DSP=0x%x DSA=0x%x: ",
484		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
485			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
486		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
487		if (esiop_cmd)
488			printf("last msg_in=0x%x status=0x%x\n",
489			    esiop_cmd->cmd_tables->msg_in[0],
490			    le32toh(esiop_cmd->cmd_tables->status));
491		else
492			printf(" current T/L/Q invalid\n");
493		need_reset = 1;
494		}
495	}
496	if (istat & ISTAT_SIP) {
497		if (istat & ISTAT_DIP)
498			delay(10);
499		/*
500		 * Can't read sist0 & sist1 independantly, or we have to
501		 * insert delay
502		 */
503		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
504		    SIOP_SIST0);
505		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
506		    SIOP_SSTAT1);
507#ifdef SIOP_DEBUG_INTR
508		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
509		    "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
510		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
511		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
512			SIOP_DSP) -
513		    sc->sc_c.sc_scriptaddr));
514#endif
515		if (sist & SIST0_RST) {
516			esiop_handle_reset(sc);
517			/* no table to flush here */
518			return 1;
519		}
520		if (sist & SIST0_SGE) {
521			if (esiop_cmd)
522				scsipi_printaddr(xs->xs_periph);
523			else
524				printf("%s:", sc->sc_c.sc_dev.dv_xname);
525			printf("scsi gross error\n");
526			if (esiop_target)
527				esiop_target->target_c.flags &= ~TARF_DT;
528			goto reset;
529		}
530		if ((sist & SIST0_MA) && need_reset == 0) {
531			if (esiop_cmd) {
532				int scratchc0;
533				dstat = bus_space_read_1(sc->sc_c.sc_rt,
534				    sc->sc_c.sc_rh, SIOP_DSTAT);
535				/*
536				 * first restore DSA, in case we were in a S/G
537				 * operation.
538				 */
539				bus_space_write_4(sc->sc_c.sc_rt,
540				    sc->sc_c.sc_rh,
541				    SIOP_DSA, esiop_cmd->cmd_c.dsa);
542				scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
543				    sc->sc_c.sc_rh, SIOP_SCRATCHC);
544				switch (sstat1 & SSTAT1_PHASE_MASK) {
545				case SSTAT1_PHASE_STATUS:
546				/*
547				 * previous phase may be aborted for any reason
548				 * ( for example, the target has less data to
549				 * transfer than requested). Just go to status
550				 * and the command should terminate.
551				 */
552					INCSTAT(esiop_stat_intr_shortxfer);
553					if ((dstat & DSTAT_DFE) == 0)
554						siop_clearfifo(&sc->sc_c);
555					/* no table to flush here */
556					CALL_SCRIPT(Ent_status);
557					return 1;
558				case SSTAT1_PHASE_MSGIN:
559					/*
560					 * target may be ready to disconnect
561					 * Save data pointers just in case.
562					 */
563					INCSTAT(esiop_stat_intr_xferdisc);
564					if (scratchc0 & A_f_c_data)
565						siop_sdp(&esiop_cmd->cmd_c);
566					else if ((dstat & DSTAT_DFE) == 0)
567						siop_clearfifo(&sc->sc_c);
568					bus_space_write_1(sc->sc_c.sc_rt,
569					    sc->sc_c.sc_rh, SIOP_SCRATCHC,
570					    scratchc0 & ~A_f_c_data);
571					esiop_table_sync(esiop_cmd,
572					    BUS_DMASYNC_PREREAD |
573					    BUS_DMASYNC_PREWRITE);
574					CALL_SCRIPT(Ent_msgin);
575					return 1;
576				}
577				printf("%s: unexpected phase mismatch %d\n",
578				    sc->sc_c.sc_dev.dv_xname,
579				    sstat1 & SSTAT1_PHASE_MASK);
580			} else {
581				printf("%s: phase mismatch without command\n",
582				    sc->sc_c.sc_dev.dv_xname);
583			}
584			need_reset = 1;
585		}
586		if (sist & SIST0_PAR) {
587			/* parity error, reset */
588			if (esiop_cmd)
589				scsipi_printaddr(xs->xs_periph);
590			else
591				printf("%s:", sc->sc_c.sc_dev.dv_xname);
592			printf("parity error\n");
593			if (esiop_target)
594				esiop_target->target_c.flags &= ~TARF_DT;
595			goto reset;
596		}
597		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
598			/* selection time out, assume there's no device here */
599			/*
600			 * SCRATCHC has not been loaded yet, we have to find
601			 * params by ourselve. scratchE0 should point to
602			 * the slot.
603			 */
604			slot = bus_space_read_1(sc->sc_c.sc_rt,
605			    sc->sc_c.sc_rh, SIOP_SCRATCHE);
606			esiop_script_sync(sc,
607			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
608			target = bus_space_read_1(sc->sc_c.sc_rt,
609			    sc->sc_c.sc_rh, SIOP_SDID);
610			esiop_cmd = esiop_cmd_find(sc, target,
611			    esiop_script_read(sc,
612			    sc->sc_shedoffset + slot * CMD_SLOTSIZE) & ~0x3);
613			/*
614			 * mark this slot as free, and advance to next slot
615			 */
616			esiop_script_write(sc,
617			    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
618			    A_f_cmd_free);
619			addr = bus_space_read_4(sc->sc_c.sc_rt,
620				    sc->sc_c.sc_rh, SIOP_SCRATCHD);
621			if (slot < (A_ncmd_slots - 1)) {
622				bus_space_write_1(sc->sc_c.sc_rt,
623				    sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
624				addr = addr + sizeof(struct esiop_slot);
625			} else {
626				bus_space_write_1(sc->sc_c.sc_rt,
627				    sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
628				addr = sc->sc_c.sc_scriptaddr +
629				    sc->sc_shedoffset * sizeof(u_int32_t);
630			}
631			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
632			    SIOP_SCRATCHD, addr);
633			esiop_script_sync(sc,
634			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
635			if (esiop_cmd) {
636				xs = esiop_cmd->cmd_c.xs;
637				esiop_target = (struct esiop_target *)
638				    esiop_cmd->cmd_c.siop_target;
639				lun = xs->xs_periph->periph_lun;
640				tag = esiop_cmd->cmd_c.tag;
641				esiop_lun = esiop_target->esiop_lun[lun];
642				esiop_cmd->cmd_c.status = CMDST_DONE;
643				xs->error = XS_SELTIMEOUT;
644				freetarget = 1;
645				goto end;
646			} else {
647				printf("%s: selection timeout without "
648				    "command, target %d (sdid 0x%x), "
649				    "slot %d\n",
650				    sc->sc_c.sc_dev.dv_xname, target,
651				    bus_space_read_1(sc->sc_c.sc_rt,
652				    sc->sc_c.sc_rh, SIOP_SDID), slot);
653				need_reset = 1;
654			}
655		}
656		if (sist & SIST0_UDC) {
657			/*
658			 * unexpected disconnect. Usually the target signals
659			 * a fatal condition this way. Attempt to get sense.
660			 */
661			 if (esiop_cmd) {
662				esiop_cmd->cmd_tables->status =
663				    htole32(SCSI_CHECK);
664				goto end;
665			}
666			printf("%s: unexpected disconnect without "
667			    "command\n", sc->sc_c.sc_dev.dv_xname);
668			goto reset;
669		}
670		if (sist & (SIST1_SBMC << 8)) {
671			/* SCSI bus mode change */
672			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
673				goto reset;
674			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
675				/*
676				 * we have a script interrupt, it will
677				 * restart the script.
678				 */
679				goto scintr;
680			}
681			/*
682			 * else we have to restart it ourselve, at the
683			 * interrupted instruction.
684			 */
685			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
686			    SIOP_DSP,
687			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
688			    SIOP_DSP) - 8);
689			return 1;
690		}
691		/* Else it's an unhandled exeption (for now). */
692		printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
693		    "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
694		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
695			SIOP_SSTAT1),
696		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
697		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
698			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
699		if (esiop_cmd) {
700			esiop_cmd->cmd_c.status = CMDST_DONE;
701			xs->error = XS_SELTIMEOUT;
702			goto end;
703		}
704		need_reset = 1;
705	}
706	if (need_reset) {
707reset:
708		/* fatal error, reset the bus */
709		siop_resetbus(&sc->sc_c);
710		/* no table to flush here */
711		return 1;
712	}
713
714scintr:
715	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
716		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
717		    SIOP_DSPS);
718#ifdef SIOP_DEBUG_INTR
719		printf("script interrupt 0x%x\n", irqcode);
720#endif
721		/*
722		 * no command, or an inactive command is only valid for a
723		 * reselect interrupt
724		 */
725		if ((irqcode & 0x80) == 0) {
726			if (esiop_cmd == NULL) {
727				printf(
728			"%s: script interrupt (0x%x) with invalid DSA !!!\n",
729				    sc->sc_c.sc_dev.dv_xname, irqcode);
730				goto reset;
731			}
732			if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
733				printf("%s: command with invalid status "
734				    "(IRQ code 0x%x current status %d) !\n",
735				    sc->sc_c.sc_dev.dv_xname,
736				    irqcode, esiop_cmd->cmd_c.status);
737				xs = NULL;
738			}
739		}
740		switch(irqcode) {
741		case A_int_err:
742			printf("error, DSP=0x%x\n",
743			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
744			    sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
745			if (xs) {
746				xs->error = XS_SELTIMEOUT;
747				goto end;
748			} else {
749				goto reset;
750			}
751		case A_int_msgin:
752		{
753			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
754			    sc->sc_c.sc_rh, SIOP_SFBR);
755			if (msgin == MSG_MESSAGE_REJECT) {
756				int msg, extmsg;
757				if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
758					/*
759					 * message was part of a identify +
760					 * something else. Identify shoudl't
761					 * have been rejected.
762					 */
763					msg =
764					    esiop_cmd->cmd_tables->msg_out[1];
765					extmsg =
766					    esiop_cmd->cmd_tables->msg_out[3];
767				} else {
768					msg =
769					    esiop_cmd->cmd_tables->msg_out[0];
770					extmsg =
771					    esiop_cmd->cmd_tables->msg_out[2];
772				}
773				if (msg == MSG_MESSAGE_REJECT) {
774					/* MSG_REJECT  for a MSG_REJECT  !*/
775					if (xs)
776						scsipi_printaddr(xs->xs_periph);
777					else
778						printf("%s: ",
779						   sc->sc_c.sc_dev.dv_xname);
780					printf("our reject message was "
781					    "rejected\n");
782					goto reset;
783				}
784				if (msg == MSG_EXTENDED &&
785				    extmsg == MSG_EXT_WDTR) {
786					/* WDTR rejected, initiate sync */
787					if ((esiop_target->target_c.flags &
788					   TARF_SYNC) == 0) {
789						esiop_target->target_c.status =
790						    TARST_OK;
791						siop_update_xfer_mode(&sc->sc_c,
792						    target);
793						/* no table to flush here */
794						CALL_SCRIPT(Ent_msgin_ack);
795						return 1;
796					}
797					esiop_target->target_c.status =
798					    TARST_SYNC_NEG;
799					siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
800					    sc->sc_c.st_minsync,
801					    sc->sc_c.maxoff);
802					esiop_table_sync(esiop_cmd,
803					    BUS_DMASYNC_PREREAD |
804					    BUS_DMASYNC_PREWRITE);
805					CALL_SCRIPT(Ent_send_msgout);
806					return 1;
807				} else if (msg == MSG_EXTENDED &&
808				    extmsg == MSG_EXT_SDTR) {
809					/* sync rejected */
810					esiop_target->target_c.offset = 0;
811					esiop_target->target_c.period = 0;
812					esiop_target->target_c.status =
813					    TARST_OK;
814					siop_update_xfer_mode(&sc->sc_c,
815					    target);
816					/* no table to flush here */
817					CALL_SCRIPT(Ent_msgin_ack);
818					return 1;
819				} else if (msg == MSG_EXTENDED &&
820				    extmsg == MSG_EXT_PPR) {
821					/* PPR rejected */
822					esiop_target->target_c.offset = 0;
823					esiop_target->target_c.period = 0;
824					esiop_target->target_c.status =
825					    TARST_OK;
826					siop_update_xfer_mode(&sc->sc_c,
827					    target);
828					/* no table to flush here */
829					CALL_SCRIPT(Ent_msgin_ack);
830					return 1;
831				} else if (msg == MSG_SIMPLE_Q_TAG ||
832				    msg == MSG_HEAD_OF_Q_TAG ||
833				    msg == MSG_ORDERED_Q_TAG) {
834					if (esiop_handle_qtag_reject(
835					    esiop_cmd) == -1)
836						goto reset;
837					CALL_SCRIPT(Ent_msgin_ack);
838					return 1;
839				}
840				if (xs)
841					scsipi_printaddr(xs->xs_periph);
842				else
843					printf("%s: ",
844					    sc->sc_c.sc_dev.dv_xname);
845				if (msg == MSG_EXTENDED) {
846					printf("scsi message reject, extended "
847					    "message sent was 0x%x\n", extmsg);
848				} else {
849					printf("scsi message reject, message "
850					    "sent was 0x%x\n", msg);
851				}
852				/* no table to flush here */
853				CALL_SCRIPT(Ent_msgin_ack);
854				return 1;
855			}
856			if (xs)
857				scsipi_printaddr(xs->xs_periph);
858			else
859				printf("%s: ", sc->sc_c.sc_dev.dv_xname);
860			printf("unhandled message 0x%x\n",
861			    esiop_cmd->cmd_tables->msg_in[0]);
862			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
863			esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
864			esiop_table_sync(esiop_cmd,
865			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
866			CALL_SCRIPT(Ent_send_msgout);
867			return 1;
868		}
869		case A_int_extmsgin:
870#ifdef SIOP_DEBUG_INTR
871			printf("extended message: msg 0x%x len %d\n",
872			    esiop_cmd->cmd_tables->msg_in[2],
873			    esiop_cmd->cmd_tables->msg_in[1]);
874#endif
875			if (esiop_cmd->cmd_tables->msg_in[1] >
876			    sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
877				printf("%s: extended message too big (%d)\n",
878				    sc->sc_c.sc_dev.dv_xname,
879				    esiop_cmd->cmd_tables->msg_in[1]);
880			esiop_cmd->cmd_tables->t_extmsgdata.count =
881			    htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
882			esiop_table_sync(esiop_cmd,
883			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
884			CALL_SCRIPT(Ent_get_extmsgdata);
885			return 1;
886		case A_int_extmsgdata:
887#ifdef SIOP_DEBUG_INTR
888			{
889			int i;
890			printf("extended message: 0x%x, data:",
891			    esiop_cmd->cmd_tables->msg_in[2]);
892			for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
893			    i++)
894				printf(" 0x%x",
895				    esiop_cmd->cmd_tables->msg_in[i]);
896			printf("\n");
897			}
898#endif
899			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
900				switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
901				case SIOP_NEG_MSGOUT:
902					esiop_table_sync(esiop_cmd,
903					    BUS_DMASYNC_PREREAD |
904					    BUS_DMASYNC_PREWRITE);
905					CALL_SCRIPT(Ent_send_msgout);
906					return(1);
907				case SIOP_NEG_ACK:
908					CALL_SCRIPT(Ent_msgin_ack);
909					return(1);
910				default:
911					panic("invalid retval from "
912					    "siop_wdtr_neg()");
913				}
914				return(1);
915			}
916			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
917				switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
918				case SIOP_NEG_MSGOUT:
919					esiop_table_sync(esiop_cmd,
920					    BUS_DMASYNC_PREREAD |
921					    BUS_DMASYNC_PREWRITE);
922					CALL_SCRIPT(Ent_send_msgout);
923					return(1);
924				case SIOP_NEG_ACK:
925					CALL_SCRIPT(Ent_msgin_ack);
926					return(1);
927				default:
928					panic("invalid retval from "
929					    "siop_wdtr_neg()");
930				}
931				return(1);
932			}
933			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
934				switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
935				case SIOP_NEG_MSGOUT:
936					esiop_table_sync(esiop_cmd,
937					    BUS_DMASYNC_PREREAD |
938					    BUS_DMASYNC_PREWRITE);
939					CALL_SCRIPT(Ent_send_msgout);
940					return(1);
941				case SIOP_NEG_ACK:
942					CALL_SCRIPT(Ent_msgin_ack);
943					return(1);
944				default:
945					panic("invalid retval from "
946					    "siop_wdtr_neg()");
947				}
948				return(1);
949			}
950			/* send a message reject */
951			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
952			esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
953			esiop_table_sync(esiop_cmd,
954			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
955			CALL_SCRIPT(Ent_send_msgout);
956			return 1;
957		case A_int_disc:
958			INCSTAT(esiop_stat_intr_sdp);
959			offset = bus_space_read_1(sc->sc_c.sc_rt,
960			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
961#ifdef SIOP_DEBUG_DR
962			printf("disconnect offset %d\n", offset);
963#endif
964			if (offset > SIOP_NSG) {
965				printf("%s: bad offset for disconnect (%d)\n",
966				    sc->sc_c.sc_dev.dv_xname, offset);
967				goto reset;
968			}
969			/*
970			 * offset == SIOP_NSG may be a valid condition if
971			 * we get a sdp when the xfer is done.
972			 * Don't call memmove in this case.
973			 */
974			if (offset < SIOP_NSG) {
975				memmove(&esiop_cmd->cmd_tables->data[0],
976				    &esiop_cmd->cmd_tables->data[offset],
977				    (SIOP_NSG - offset) * sizeof(scr_table_t));
978				esiop_table_sync(esiop_cmd,
979				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980			}
981			CALL_SCRIPT(Ent_script_sched);
982			return 1;
983		case A_int_resfail:
984			printf("reselect failed\n");
985			CALL_SCRIPT(Ent_script_sched);
986			return  1;
987		case A_int_done:
988			if (xs == NULL) {
989				printf("%s: done without command\n",
990				    sc->sc_c.sc_dev.dv_xname);
991				CALL_SCRIPT(Ent_script_sched);
992				return 1;
993			}
994#ifdef SIOP_DEBUG_INTR
995			printf("done, DSA=0x%lx target id 0x%x last msg "
996			    "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
997			    le32toh(esiop_cmd->cmd_tables->id),
998			    esiop_cmd->cmd_tables->msg_in[0],
999			    le32toh(esiop_cmd->cmd_tables->status));
1000#endif
1001			INCSTAT(esiop_stat_intr_done);
1002			esiop_cmd->cmd_c.status = CMDST_DONE;
1003			goto end;
1004		default:
1005			printf("unknown irqcode %x\n", irqcode);
1006			if (xs) {
1007				xs->error = XS_SELTIMEOUT;
1008				goto end;
1009			}
1010			goto reset;
1011		}
1012		return 1;
1013	}
1014	/* We just should't get there */
1015	panic("siop_intr: I shouldn't be there !");
1016
1017end:
1018	/*
1019	 * restart the script now if command completed properly
1020	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1021	 * queue
1022	 */
1023	xs->status = le32toh(esiop_cmd->cmd_tables->status);
1024#ifdef SIOP_DEBUG_INTR
1025	printf("esiop_intr end: status %d\n", xs->status);
1026#endif
1027	if (xs->status == SCSI_OK)
1028		CALL_SCRIPT(Ent_script_sched);
1029	else
1030		restart = 1;
1031	if (tag >= 0)
1032		esiop_lun->tactive[tag] = NULL;
1033	else
1034		esiop_lun->active = NULL;
1035	esiop_scsicmd_end(esiop_cmd);
1036	if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1037		esiop_del_dev(sc, target, lun);
1038	if (restart)
1039		CALL_SCRIPT(Ent_script_sched);
1040	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1041		/* a command terminated, so we have free slots now */
1042		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1043		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1044	}
1045
1046	return retval;
1047}
1048
1049void
1050esiop_scsicmd_end(esiop_cmd)
1051	struct esiop_cmd *esiop_cmd;
1052{
1053	struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1054	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1055
1056	switch(xs->status) {
1057	case SCSI_OK:
1058		xs->error = XS_NOERROR;
1059		break;
1060	case SCSI_BUSY:
1061		xs->error = XS_BUSY;
1062		break;
1063	case SCSI_CHECK:
1064		xs->error = XS_BUSY;
1065		/* remove commands in the queue and scheduler */
1066		esiop_unqueue(sc, xs->xs_periph->periph_target,
1067		    xs->xs_periph->periph_lun);
1068		break;
1069	case SCSI_QUEUE_FULL:
1070		INCSTAT(esiop_stat_intr_qfull);
1071#ifdef SIOP_DEBUG
1072		printf("%s:%d:%d: queue full (tag %d)\n",
1073		    sc->sc_c.sc_dev.dv_xname,
1074		    xs->xs_periph->periph_target,
1075		    xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1076#endif
1077		xs->error = XS_BUSY;
1078		break;
1079	case SCSI_SIOP_NOCHECK:
1080		/*
1081		 * don't check status, xs->error is already valid
1082		 */
1083		break;
1084	case SCSI_SIOP_NOSTATUS:
1085		/*
1086		 * the status byte was not updated, cmd was
1087		 * aborted
1088		 */
1089		xs->error = XS_SELTIMEOUT;
1090		break;
1091	default:
1092		xs->error = XS_DRIVER_STUFFUP;
1093	}
1094	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1095		bus_dmamap_sync(sc->sc_c.sc_dmat,
1096		    esiop_cmd->cmd_c.dmamap_data, 0,
1097		    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1098		    (xs->xs_control & XS_CTL_DATA_IN) ?
1099		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1100		bus_dmamap_unload(sc->sc_c.sc_dmat,
1101		    esiop_cmd->cmd_c.dmamap_data);
1102	}
1103	bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1104	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1105	esiop_cmd->cmd_c.status = CMDST_FREE;
1106	TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1107	xs->resid = 0;
1108	scsipi_done (xs);
1109}
1110
1111void
1112esiop_checkdone(sc)
1113	struct esiop_softc *sc;
1114{
1115	int target, lun, tag;
1116	struct esiop_target *esiop_target;
1117	struct esiop_lun *esiop_lun;
1118	struct esiop_cmd *esiop_cmd;
1119	int status;
1120
1121	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1122		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1123		if (esiop_target == NULL)
1124			continue;
1125		for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1126			esiop_lun = esiop_target->esiop_lun[lun];
1127			if (esiop_lun == NULL)
1128				continue;
1129			esiop_cmd = esiop_lun->active;
1130			if (esiop_cmd) {
1131				esiop_table_sync(esiop_cmd,
1132				    BUS_DMASYNC_POSTREAD |
1133				    BUS_DMASYNC_POSTWRITE);
1134				status = le32toh(esiop_cmd->cmd_tables->status);
1135				if (status == SCSI_OK) {
1136					/* Ok, this command has been handled */
1137					esiop_cmd->cmd_c.xs->status = status;
1138					esiop_lun->active = NULL;
1139					esiop_scsicmd_end(esiop_cmd);
1140				}
1141			}
1142			for (tag = 0; tag < ESIOP_NTAG; tag++) {
1143				esiop_cmd = esiop_lun->tactive[tag];
1144				if (esiop_cmd == NULL)
1145					continue;
1146				esiop_table_sync(esiop_cmd,
1147				    BUS_DMASYNC_POSTREAD |
1148				    BUS_DMASYNC_POSTWRITE);
1149				status = le32toh(esiop_cmd->cmd_tables->status);
1150				if (status == SCSI_OK) {
1151					/* Ok, this command has been handled */
1152					esiop_cmd->cmd_c.xs->status = status;
1153					esiop_lun->tactive[tag] = NULL;
1154					esiop_scsicmd_end(esiop_cmd);
1155				}
1156			}
1157		}
1158	}
1159}
1160
1161void
1162esiop_unqueue(sc, target, lun)
1163	struct esiop_softc *sc;
1164	int target;
1165	int lun;
1166{
1167 	int slot, tag;
1168	u_int32_t slotdsa;
1169	struct esiop_cmd *esiop_cmd;
1170	struct esiop_lun *esiop_lun =
1171	    ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1172
1173	/* first make sure to read valid data */
1174	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1175
1176	for (tag = 0; tag < ESIOP_NTAG; tag++) {
1177		/* look for commands in the scheduler, not yet started */
1178		if (esiop_lun->tactive[tag] == NULL)
1179			continue;
1180		esiop_cmd = esiop_lun->tactive[tag];
1181		for (slot = 0; slot < A_ncmd_slots; slot++) {
1182			slotdsa = esiop_script_read(sc,
1183			    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1184			if (slotdsa & A_f_cmd_free)
1185				continue;
1186			if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1187				break;
1188		}
1189		if (slot >  ESIOP_NTAG)
1190			continue; /* didn't find it */
1191		/* Mark this slot as ignore */
1192		esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1193		    esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1194		/* ask to requeue */
1195		esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1196		esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1197		esiop_lun->tactive[tag] = NULL;
1198		esiop_scsicmd_end(esiop_cmd);
1199	}
1200}
1201
1202/*
1203 * handle a rejected queue tag message: the command will run untagged,
1204 * has to adjust the reselect script.
1205 */
1206
1207
1208int
1209esiop_handle_qtag_reject(esiop_cmd)
1210	struct esiop_cmd *esiop_cmd;
1211{
1212	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1213	int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1214	int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1215	int tag = esiop_cmd->cmd_tables->msg_out[2];
1216	struct esiop_target *esiop_target =
1217	    (struct esiop_target*)sc->sc_c.targets[target];
1218	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1219
1220#ifdef SIOP_DEBUG
1221	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1222	    sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1223	    esiop_cmd->cmd_c.status);
1224#endif
1225
1226	if (esiop_lun->active != NULL) {
1227		printf("%s: untagged command already running for target %d "
1228		    "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1229		    target, lun, esiop_lun->active->cmd_c.status);
1230		return -1;
1231	}
1232	/* clear tag slot */
1233	esiop_lun->tactive[tag] = NULL;
1234	/* add command to non-tagged slot */
1235	esiop_lun->active = esiop_cmd;
1236	esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1237	esiop_cmd->cmd_c.tag = -1;
1238	/* update DSA table */
1239	esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1240	    esiop_cmd->cmd_c.dsa);
1241	esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1242	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1243	return 0;
1244}
1245
1246/*
1247 * handle a bus reset: reset chip, unqueue all active commands, free all
1248 * target struct and report loosage to upper layer.
1249 * As the upper layer may requeue immediatly we have to first store
1250 * all active commands in a temporary queue.
1251 */
1252void
1253esiop_handle_reset(sc)
1254	struct esiop_softc *sc;
1255{
1256	struct esiop_cmd *esiop_cmd;
1257	struct esiop_lun *esiop_lun;
1258	int target, lun, tag;
1259	/*
1260	 * scsi bus reset. reset the chip and restart
1261	 * the queue. Need to clean up all active commands
1262	 */
1263	printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1264	/* stop, reset and restart the chip */
1265	esiop_reset(sc);
1266	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1267		/* chip has been reset, all slots are free now */
1268		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1269		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1270	}
1271	/*
1272	 * Process all commands: first commmands completes, then commands
1273	 * being executed
1274	 */
1275	esiop_checkdone(sc);
1276	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1277	    target++) {
1278		struct esiop_target *esiop_target =
1279		    (struct esiop_target *)sc->sc_c.targets[target];
1280		if (esiop_target == NULL)
1281			continue;
1282		for (lun = 0; lun < 8; lun++) {
1283			esiop_lun = esiop_target->esiop_lun[lun];
1284			if (esiop_lun == NULL)
1285				continue;
1286			for (tag = -1; tag <
1287			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1288			    ESIOP_NTAG : 0);
1289			    tag++) {
1290				if (tag >= 0)
1291					esiop_cmd = esiop_lun->tactive[tag];
1292				else
1293					esiop_cmd = esiop_lun->active;
1294				if (esiop_cmd == NULL)
1295					continue;
1296				scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1297				printf("command with tag id %d reset\n", tag);
1298				esiop_cmd->cmd_c.xs->error =
1299				    (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1300		    		    XS_TIMEOUT : XS_RESET;
1301				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1302				if (tag >= 0)
1303					esiop_lun->tactive[tag] = NULL;
1304				else
1305					esiop_lun->active = NULL;
1306				esiop_cmd->cmd_c.status = CMDST_DONE;
1307				esiop_scsicmd_end(esiop_cmd);
1308			}
1309		}
1310		sc->sc_c.targets[target]->status = TARST_ASYNC;
1311		sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1312		sc->sc_c.targets[target]->period =
1313		    sc->sc_c.targets[target]->offset = 0;
1314		siop_update_xfer_mode(&sc->sc_c, target);
1315	}
1316
1317	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1318}
1319
1320void
1321esiop_scsipi_request(chan, req, arg)
1322	struct scsipi_channel *chan;
1323	scsipi_adapter_req_t req;
1324	void *arg;
1325{
1326	struct scsipi_xfer *xs;
1327	struct scsipi_periph *periph;
1328	struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1329	struct esiop_cmd *esiop_cmd;
1330	struct esiop_target *esiop_target;
1331	int s, error, i;
1332	int target;
1333	int lun;
1334
1335	switch (req) {
1336	case ADAPTER_REQ_RUN_XFER:
1337		xs = arg;
1338		periph = xs->xs_periph;
1339		target = periph->periph_target;
1340		lun = periph->periph_lun;
1341
1342		s = splbio();
1343#ifdef SIOP_DEBUG_SCHED
1344		printf("starting cmd for %d:%d\n", target, lun);
1345#endif
1346		esiop_cmd = TAILQ_FIRST(&sc->free_list);
1347		if (esiop_cmd == NULL) {
1348			xs->error = XS_RESOURCE_SHORTAGE;
1349			scsipi_done(xs);
1350			splx(s);
1351			return;
1352		}
1353		TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1354#ifdef DIAGNOSTIC
1355		if (esiop_cmd->cmd_c.status != CMDST_FREE)
1356			panic("siop_scsicmd: new cmd not free");
1357#endif
1358		esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1359		if (esiop_target == NULL) {
1360#ifdef SIOP_DEBUG
1361			printf("%s: alloc siop_target for target %d\n",
1362				sc->sc_c.sc_dev.dv_xname, target);
1363#endif
1364			sc->sc_c.targets[target] =
1365			    malloc(sizeof(struct esiop_target),
1366				M_DEVBUF, M_NOWAIT | M_ZERO);
1367			if (sc->sc_c.targets[target] == NULL) {
1368				printf("%s: can't malloc memory for "
1369				    "target %d\n", sc->sc_c.sc_dev.dv_xname,
1370				    target);
1371				xs->error = XS_RESOURCE_SHORTAGE;
1372				scsipi_done(xs);
1373				splx(s);
1374				return;
1375			}
1376			esiop_target =
1377			    (struct esiop_target*)sc->sc_c.targets[target];
1378			esiop_target->target_c.status = TARST_PROBING;
1379			esiop_target->target_c.flags = 0;
1380			esiop_target->target_c.id =
1381			    sc->sc_c.clock_div << 24; /* scntl3 */
1382			esiop_target->target_c.id |=  target << 16; /* id */
1383			/* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1384
1385			for (i=0; i < 8; i++)
1386				esiop_target->esiop_lun[i] = NULL;
1387			esiop_target_register(sc, target);
1388		}
1389		if (esiop_target->esiop_lun[lun] == NULL) {
1390			esiop_target->esiop_lun[lun] =
1391			    malloc(sizeof(struct esiop_lun), M_DEVBUF,
1392			    M_NOWAIT|M_ZERO);
1393			if (esiop_target->esiop_lun[lun] == NULL) {
1394				printf("%s: can't alloc esiop_lun for "
1395				    "target %d lun %d\n",
1396				    sc->sc_c.sc_dev.dv_xname, target, lun);
1397				xs->error = XS_RESOURCE_SHORTAGE;
1398				scsipi_done(xs);
1399				splx(s);
1400				return;
1401			}
1402		}
1403		esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1404		esiop_cmd->cmd_c.xs = xs;
1405		esiop_cmd->cmd_c.flags = 0;
1406		esiop_cmd->cmd_c.status = CMDST_READY;
1407
1408		/* load the DMA maps */
1409		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1410		    esiop_cmd->cmd_c.dmamap_cmd,
1411		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1412		if (error) {
1413			printf("%s: unable to load cmd DMA map: %d\n",
1414			    sc->sc_c.sc_dev.dv_xname, error);
1415			xs->error = XS_DRIVER_STUFFUP;
1416			scsipi_done(xs);
1417			splx(s);
1418			return;
1419		}
1420		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1421			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1422			    esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1423			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1424			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1425			     BUS_DMA_READ : BUS_DMA_WRITE));
1426			if (error) {
1427				printf("%s: unable to load cmd DMA map: %d",
1428				    sc->sc_c.sc_dev.dv_xname, error);
1429				xs->error = XS_DRIVER_STUFFUP;
1430				scsipi_done(xs);
1431				bus_dmamap_unload(sc->sc_c.sc_dmat,
1432				    esiop_cmd->cmd_c.dmamap_cmd);
1433				splx(s);
1434				return;
1435			}
1436			bus_dmamap_sync(sc->sc_c.sc_dmat,
1437			    esiop_cmd->cmd_c.dmamap_data, 0,
1438			    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1439			    (xs->xs_control & XS_CTL_DATA_IN) ?
1440			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1441		}
1442		bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1443		    0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1444		    BUS_DMASYNC_PREWRITE);
1445
1446		if (xs->xs_tag_type)
1447			esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1448		else
1449			esiop_cmd->cmd_c.tag = -1;
1450		siop_setuptables(&esiop_cmd->cmd_c);
1451		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1452		    htole32(A_f_c_target | A_f_c_lun);
1453		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1454		    htole32((target << 8) | (lun << 16));
1455		if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1456			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1457			    htole32(A_f_c_tag);
1458			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1459			    htole32(esiop_cmd->cmd_c.tag << 24);
1460		}
1461
1462		esiop_table_sync(esiop_cmd,
1463		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464		esiop_start(sc, esiop_cmd);
1465		if (xs->xs_control & XS_CTL_POLL) {
1466			/* poll for command completion */
1467			while ((xs->xs_status & XS_STS_DONE) == 0) {
1468				delay(1000);
1469				esiop_intr(sc);
1470			}
1471		}
1472		splx(s);
1473		return;
1474
1475	case ADAPTER_REQ_GROW_RESOURCES:
1476#ifdef SIOP_DEBUG
1477		printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1478		    sc->sc_c.sc_adapt.adapt_openings);
1479#endif
1480		esiop_morecbd(sc);
1481		return;
1482
1483	case ADAPTER_REQ_SET_XFER_MODE:
1484	{
1485		struct scsipi_xfer_mode *xm = arg;
1486		if (sc->sc_c.targets[xm->xm_target] == NULL)
1487			return;
1488		s = splbio();
1489		if (xm->xm_mode & PERIPH_CAP_TQING)
1490			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1491		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1492		    (sc->sc_c.features & SF_BUS_WIDE))
1493			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1494		if (xm->xm_mode & PERIPH_CAP_SYNC)
1495			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1496		if ((xm->xm_mode & PERIPH_CAP_DT) &&
1497		    (sc->sc_c.features & SF_CHIP_DT))
1498			sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1499		if ((xm->xm_mode &
1500		    (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1501		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1502			sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1503
1504		for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1505			if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1506				/* allocate a lun sw entry for this device */
1507				esiop_add_dev(sc, xm->xm_target, lun);
1508		}
1509		splx(s);
1510	}
1511	}
1512}
1513
1514static void
1515esiop_start(sc, esiop_cmd)
1516	struct esiop_softc *sc;
1517	struct esiop_cmd *esiop_cmd;
1518{
1519	struct esiop_lun *esiop_lun;
1520	struct esiop_target *esiop_target;
1521	int timeout;
1522	int target, lun, slot;
1523
1524	nintr = 0;
1525
1526	/*
1527	 * first make sure to read valid data
1528	 */
1529	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1530
1531	/*
1532	 * We use a circular queue here. sc->sc_currschedslot points to a
1533	 * free slot, unless we have filled the queue. Check this.
1534	 */
1535	slot = sc->sc_currschedslot;
1536	if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1537	    A_f_cmd_free) == 0) {
1538		/*
1539		 * no more free slot, no need to continue. freeze the queue
1540		 * and requeue this command.
1541		 */
1542		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1543		sc->sc_flags |= SCF_CHAN_NOSLOT;
1544		esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1545		esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1546		esiop_scsicmd_end(esiop_cmd);
1547		return;
1548	}
1549	/* OK, we can use this slot */
1550
1551	target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1552	lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1553	esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1554	esiop_lun = esiop_target->esiop_lun[lun];
1555	/* if non-tagged command active, panic: this shouldn't happen */
1556	if (esiop_lun->active != NULL) {
1557		panic("esiop_start: tagged cmd while untagged running");
1558	}
1559#ifdef DIAGNOSTIC
1560	/* sanity check the tag if needed */
1561	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1562		if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1563			panic("esiop_start: tag not free");
1564		if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1565		    esiop_cmd->cmd_c.tag < 0) {
1566			scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1567			printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1568			panic("esiop_start: invalid tag id");
1569		}
1570	}
1571#endif
1572#ifdef SIOP_DEBUG_SCHED
1573	printf("using slot %d for DSA 0x%lx\n", slot,
1574	    (u_long)esiop_cmd->cmd_c.dsa);
1575#endif
1576	/* mark command as active */
1577	if (esiop_cmd->cmd_c.status == CMDST_READY)
1578		esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1579	else
1580		panic("esiop_start: bad status");
1581	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1582		esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1583		/* DSA table for reselect */
1584		if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1585			esiop_script_write(sc,
1586			    esiop_target->lun_table_offset + lun + 2,
1587			    esiop_lun->lun_tagtbl->tbl_dsa);
1588			esiop_lun->lun_flags |= LUNF_TAGTABLE;
1589		}
1590		esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1591		    htole32(esiop_cmd->cmd_c.dsa);
1592		bus_dmamap_sync(sc->sc_c.sc_dmat,
1593		    esiop_lun->lun_tagtbl->tblblk->blkmap,
1594		    esiop_lun->lun_tagtbl->tbl_offset,
1595		    sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1596	} else {
1597		esiop_lun->active = esiop_cmd;
1598		/* DSA table for reselect */
1599		esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1600		    esiop_cmd->cmd_c.dsa);
1601		esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1602
1603	}
1604	/* scheduler slot: DSA */
1605	esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1606	    esiop_cmd->cmd_c.dsa);
1607	/* handle timeout */
1608	if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1609		/* start exire timer */
1610		timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1611		if (timeout == 0)
1612			timeout = 1;
1613		callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1614		    timeout, esiop_timeout, esiop_cmd);
1615	}
1616	/* make sure SCRIPT processor will read valid data */
1617	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1618	/* Signal script it has some work to do */
1619	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1620	    SIOP_ISTAT, ISTAT_SIGP);
1621	/* update the current slot, and wait for IRQ */
1622	sc->sc_currschedslot++;
1623	if (sc->sc_currschedslot >= A_ncmd_slots)
1624		sc->sc_currschedslot = 0;
1625	return;
1626}
1627
1628void
1629esiop_timeout(v)
1630	void *v;
1631{
1632	struct esiop_cmd *esiop_cmd = v;
1633	struct esiop_softc *sc =
1634	    (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1635	int s;
1636
1637	scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1638	printf("command timeout\n");
1639
1640	s = splbio();
1641	/* reset the scsi bus */
1642	siop_resetbus(&sc->sc_c);
1643
1644	/* deactivate callout */
1645	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1646	/*
1647	 * mark command has being timed out and just return;
1648	 * the bus reset will generate an interrupt,
1649	 * it will be handled in siop_intr()
1650	 */
1651	esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1652	splx(s);
1653	return;
1654
1655}
1656
1657void
1658esiop_dump_script(sc)
1659	struct esiop_softc *sc;
1660{
1661	int i;
1662	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1663		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1664		    le32toh(sc->sc_c.sc_script[i]),
1665		    le32toh(sc->sc_c.sc_script[i+1]));
1666		if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1667		    0xc0000000) {
1668			i++;
1669			printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1670		}
1671		printf("\n");
1672	}
1673}
1674
1675void
1676esiop_morecbd(sc)
1677	struct esiop_softc *sc;
1678{
1679	int error, i, s;
1680	bus_dma_segment_t seg;
1681	int rseg;
1682	struct esiop_cbd *newcbd;
1683	struct esiop_xfer *xfer;
1684	bus_addr_t dsa;
1685
1686	/* allocate a new list head */
1687	newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1688	if (newcbd == NULL) {
1689		printf("%s: can't allocate memory for command descriptors "
1690		    "head\n", sc->sc_c.sc_dev.dv_xname);
1691		return;
1692	}
1693
1694	/* allocate cmd list */
1695	newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1696	    M_DEVBUF, M_NOWAIT|M_ZERO);
1697	if (newcbd->cmds == NULL) {
1698		printf("%s: can't allocate memory for command descriptors\n",
1699		    sc->sc_c.sc_dev.dv_xname);
1700		goto bad3;
1701	}
1702	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1703	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1704	if (error) {
1705		printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1706		    sc->sc_c.sc_dev.dv_xname, error);
1707		goto bad2;
1708	}
1709	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1710	    (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1711	if (error) {
1712		printf("%s: unable to map cbd DMA memory, error = %d\n",
1713		    sc->sc_c.sc_dev.dv_xname, error);
1714		goto bad2;
1715	}
1716	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1717	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1718	if (error) {
1719		printf("%s: unable to create cbd DMA map, error = %d\n",
1720		    sc->sc_c.sc_dev.dv_xname, error);
1721		goto bad1;
1722	}
1723	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1724	    newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1725	if (error) {
1726		printf("%s: unable to load cbd DMA map, error = %d\n",
1727		    sc->sc_c.sc_dev.dv_xname, error);
1728		goto bad0;
1729	}
1730#ifdef DEBUG
1731	printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1732	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1733#endif
1734	for (i = 0; i < SIOP_NCMDPB; i++) {
1735		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1736		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1737		    &newcbd->cmds[i].cmd_c.dmamap_data);
1738		if (error) {
1739			printf("%s: unable to create data DMA map for cbd: "
1740			    "error %d\n",
1741			    sc->sc_c.sc_dev.dv_xname, error);
1742			goto bad0;
1743		}
1744		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1745		    sizeof(struct scsipi_generic), 1,
1746		    sizeof(struct scsipi_generic), 0,
1747		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1748		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1749		if (error) {
1750			printf("%s: unable to create cmd DMA map for cbd %d\n",
1751			    sc->sc_c.sc_dev.dv_xname, error);
1752			goto bad0;
1753		}
1754		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1755		newcbd->cmds[i].esiop_cbdp = newcbd;
1756		xfer = &newcbd->xfers[i];
1757		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1758		memset(newcbd->cmds[i].cmd_tables, 0,
1759		    sizeof(struct esiop_xfer));
1760		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1761		    i * sizeof(struct esiop_xfer);
1762		newcbd->cmds[i].cmd_c.dsa = dsa;
1763		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1764		xfer->siop_tables.t_msgout.count= htole32(1);
1765		xfer->siop_tables.t_msgout.addr = htole32(dsa);
1766		xfer->siop_tables.t_msgin.count= htole32(1);
1767		xfer->siop_tables.t_msgin.addr = htole32(dsa +
1768			offsetof(struct siop_common_xfer, msg_in));
1769		xfer->siop_tables.t_extmsgin.count= htole32(2);
1770		xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1771			offsetof(struct siop_common_xfer, msg_in) + 1);
1772		xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1773			offsetof(struct siop_common_xfer, msg_in) + 3);
1774		xfer->siop_tables.t_status.count= htole32(1);
1775		xfer->siop_tables.t_status.addr = htole32(dsa +
1776			offsetof(struct siop_common_xfer, status));
1777
1778		s = splbio();
1779		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1780		splx(s);
1781#ifdef SIOP_DEBUG
1782		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1783		    le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1784		    le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1785		    le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1786#endif
1787	}
1788	s = splbio();
1789	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1790	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1791	splx(s);
1792	return;
1793bad0:
1794	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1795	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1796bad1:
1797	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1798bad2:
1799	free(newcbd->cmds, M_DEVBUF);
1800bad3:
1801	free(newcbd, M_DEVBUF);
1802	return;
1803}
1804
1805void
1806esiop_moretagtbl(sc)
1807	struct esiop_softc *sc;
1808{
1809	int error, i, j, s;
1810	bus_dma_segment_t seg;
1811	int rseg;
1812	struct esiop_dsatblblk *newtblblk;
1813	struct esiop_dsatbl *newtbls;
1814	u_int32_t *tbls;
1815
1816	/* allocate a new list head */
1817	newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1818	    M_DEVBUF, M_NOWAIT|M_ZERO);
1819	if (newtblblk == NULL) {
1820		printf("%s: can't allocate memory for tag DSA table block\n",
1821		    sc->sc_c.sc_dev.dv_xname);
1822		return;
1823	}
1824
1825	/* allocate tbl list */
1826	newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1827	    M_DEVBUF, M_NOWAIT|M_ZERO);
1828	if (newtbls == NULL) {
1829		printf("%s: can't allocate memory for command descriptors\n",
1830		    sc->sc_c.sc_dev.dv_xname);
1831		goto bad3;
1832	}
1833	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1834	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1835	if (error) {
1836		printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1837		    sc->sc_c.sc_dev.dv_xname, error);
1838		goto bad2;
1839	}
1840	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1841	    (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1842	if (error) {
1843		printf("%s: unable to map tbls DMA memory, error = %d\n",
1844		    sc->sc_c.sc_dev.dv_xname, error);
1845		goto bad2;
1846	}
1847	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1848	    BUS_DMA_NOWAIT, &newtblblk->blkmap);
1849	if (error) {
1850		printf("%s: unable to create tbl DMA map, error = %d\n",
1851		    sc->sc_c.sc_dev.dv_xname, error);
1852		goto bad1;
1853	}
1854	error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1855	    tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1856	if (error) {
1857		printf("%s: unable to load tbl DMA map, error = %d\n",
1858		    sc->sc_c.sc_dev.dv_xname, error);
1859		goto bad0;
1860	}
1861#ifdef DEBUG
1862	printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1863	    sc->sc_c.sc_dev.dv_xname,
1864	    (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1865#endif
1866	for (i = 0; i < ESIOP_NTPB; i++) {
1867		newtbls[i].tblblk = newtblblk;
1868		newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1869		newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1870		newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1871		    newtbls[i].tbl_offset;
1872		for (j = 0; j < ESIOP_NTAG; j++)
1873			newtbls[i].tbl[j] = j;
1874		s = splbio();
1875		TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1876		splx(s);
1877	}
1878	s = splbio();
1879	TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1880	splx(s);
1881	return;
1882bad0:
1883	bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1884	bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1885bad1:
1886	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1887bad2:
1888	free(newtbls, M_DEVBUF);
1889bad3:
1890	free(newtblblk, M_DEVBUF);
1891	return;
1892}
1893
1894void
1895esiop_add_dev(sc, target, lun)
1896	struct esiop_softc *sc;
1897	int target;
1898	int lun;
1899{
1900	struct esiop_target *esiop_target =
1901	    (struct esiop_target *)sc->sc_c.targets[target];
1902	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1903
1904	if (esiop_target->target_c.flags & TARF_TAG) {
1905		/* we need a tag DSA table */
1906		esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1907		if (esiop_lun->lun_tagtbl == NULL) {
1908			esiop_moretagtbl(sc);
1909			esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1910			if (esiop_lun->lun_tagtbl == NULL) {
1911				/* no resources, run untagged */
1912				esiop_target->target_c.flags &= ~TARF_TAG;
1913				return;
1914			}
1915		}
1916		TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1917
1918	}
1919}
1920
1921void
1922esiop_del_dev(sc, target, lun)
1923	struct esiop_softc *sc;
1924	int target;
1925	int lun;
1926{
1927	struct esiop_target *esiop_target;
1928#ifdef SIOP_DEBUG
1929		printf("%s:%d:%d: free lun sw entry\n",
1930		    sc->sc_c.sc_dev.dv_xname, target, lun);
1931#endif
1932	if (sc->sc_c.targets[target] == NULL)
1933		return;
1934	esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1935	free(esiop_target->esiop_lun[lun], M_DEVBUF);
1936	esiop_target->esiop_lun[lun] = NULL;
1937}
1938
1939struct esiop_cmd *
1940esiop_cmd_find(sc, target, dsa)
1941	struct esiop_softc *sc;
1942	int target;
1943	u_int32_t dsa;
1944{
1945	int lun, tag;
1946	struct esiop_cmd *cmd;
1947	struct esiop_lun *esiop_lun;
1948	struct esiop_target *esiop_target =
1949	    (struct esiop_target *)sc->sc_c.targets[target];
1950
1951	if (esiop_target == NULL)
1952		return NULL;
1953
1954	for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1955		esiop_lun = esiop_target->esiop_lun[lun];
1956		if (esiop_lun == NULL)
1957			continue;
1958		cmd = esiop_lun->active;
1959		if (cmd && cmd->cmd_c.dsa == dsa)
1960			return cmd;
1961		if (esiop_target->target_c.flags & TARF_TAG) {
1962			for (tag = 0; tag < ESIOP_NTAG; tag++) {
1963				cmd = esiop_lun->tactive[tag];
1964				if (cmd && cmd->cmd_c.dsa == dsa)
1965					return cmd;
1966			}
1967		}
1968	}
1969	return NULL;
1970}
1971
1972void
1973esiop_target_register(sc, target)
1974	struct esiop_softc *sc;
1975	u_int32_t target;
1976{
1977	struct esiop_target *esiop_target =
1978	    (struct esiop_target *)sc->sc_c.targets[target];
1979
1980	/* get a DSA table for this target */
1981	esiop_target->lun_table_offset = sc->sc_free_offset;
1982	sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
1983#ifdef SIOP_DEBUG
1984	printf("%s: lun table for target %d offset %d free offset %d\n",
1985	    sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
1986	    sc->sc_free_offset);
1987#endif
1988	/* first 32 bytes are ID (for select) */
1989	esiop_script_write(sc, esiop_target->lun_table_offset,
1990	    esiop_target->target_c.id);
1991	/* Record this table in the target DSA table */
1992	esiop_script_write(sc,
1993	    sc->sc_target_table_offset + target,
1994	    (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
1995	    sc->sc_c.sc_scriptaddr);
1996	esiop_script_sync(sc,
1997	    BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1998}
1999
2000#ifdef SIOP_STATS
2001void
2002esiop_printstats()
2003{
2004	printf("esiop_stat_intr %d\n", esiop_stat_intr);
2005	printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2006	printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2007	printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2008	printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2009	printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2010	printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2011}
2012#endif
2013