esiop.c revision 1.9
1/*	$NetBSD: esiop.c,v 1.9 2002/04/25 19:34:02 bouyer Exp $	*/
2
3/*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.9 2002/04/25 19:34:02 bouyer Exp $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/device.h>
41#include <sys/malloc.h>
42#include <sys/buf.h>
43#include <sys/kernel.h>
44
45#include <uvm/uvm_extern.h>
46
47#include <machine/endian.h>
48#include <machine/bus.h>
49
50#include <dev/microcode/siop/esiop.out>
51
52#include <dev/scsipi/scsi_all.h>
53#include <dev/scsipi/scsi_message.h>
54#include <dev/scsipi/scsipi_all.h>
55
56#include <dev/scsipi/scsiconf.h>
57
58#include <dev/ic/siopreg.h>
59#include <dev/ic/siopvar_common.h>
60#include <dev/ic/esiopvar.h>
61
62#include "opt_siop.h"
63
64#ifndef DEBUG
65#undef DEBUG
66#endif
67#undef SIOP_DEBUG
68#undef SIOP_DEBUG_DR
69#undef SIOP_DEBUG_INTR
70#undef SIOP_DEBUG_SCHED
71#undef DUMP_SCRIPT
72
73#define SIOP_STATS
74
75#ifndef SIOP_DEFAULT_TARGET
76#define SIOP_DEFAULT_TARGET 7
77#endif
78
79/* number of cmd descriptors per block */
80#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82void	esiop_reset __P((struct esiop_softc *));
83void	esiop_checkdone __P((struct esiop_softc *));
84void	esiop_handle_reset __P((struct esiop_softc *));
85void	esiop_scsicmd_end __P((struct esiop_cmd *));
86void	esiop_unqueue __P((struct esiop_softc *, int, int));
87int	esiop_handle_qtag_reject __P((struct esiop_cmd *));
88static void	esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89void 	esiop_timeout __P((void *));
90int	esiop_scsicmd __P((struct scsipi_xfer *));
91void	esiop_scsipi_request __P((struct scsipi_channel *,
92			scsipi_adapter_req_t, void *));
93void	esiop_dump_script __P((struct esiop_softc *));
94void	esiop_morecbd __P((struct esiop_softc *));
95void	esiop_moretagtbl __P((struct esiop_softc *));
96void	siop_add_reselsw __P((struct esiop_softc *, int));
97void	esiop_target_register __P((struct esiop_softc *, u_int32_t));
98
99void    esiop_update_scntl3 __P((struct esiop_softc *,
100			struct siop_common_target *));
101
102#ifdef SIOP_STATS
103static int esiop_stat_intr = 0;
104static int esiop_stat_intr_shortxfer = 0;
105static int esiop_stat_intr_sdp = 0;
106static int esiop_stat_intr_done = 0;
107static int esiop_stat_intr_xferdisc = 0;
108static int esiop_stat_intr_lunresel = 0;
109static int esiop_stat_intr_qfull = 0;
110void esiop_printstats __P((void));
111#define INCSTAT(x) x++
112#else
113#define INCSTAT(x)
114#endif
115
116static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117static __inline__ void
118esiop_script_sync(sc, ops)
119	struct esiop_softc *sc;
120	int ops;
121{
122	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124		    PAGE_SIZE, ops);
125}
126
127static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128static __inline__ u_int32_t
129esiop_script_read(sc, offset)
130	struct esiop_softc *sc;
131	u_int offset;
132{
133	if (sc->sc_c.features & SF_CHIP_RAM) {
134		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135		    offset * 4);
136	} else {
137		return le32toh(sc->sc_c.sc_script[offset]);
138	}
139}
140
141static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142	u_int32_t));
143static __inline__ void
144esiop_script_write(sc, offset, val)
145	struct esiop_softc *sc;
146	u_int offset;
147	u_int32_t val;
148{
149	if (sc->sc_c.features & SF_CHIP_RAM) {
150		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151		    offset * 4, val);
152	} else {
153		sc->sc_c.sc_script[offset] = htole32(val);
154	}
155}
156
157void
158esiop_attach(sc)
159	struct esiop_softc *sc;
160{
161	if (siop_common_attach(&sc->sc_c) != 0 )
162		return;
163
164	TAILQ_INIT(&sc->free_list);
165	TAILQ_INIT(&sc->cmds);
166	TAILQ_INIT(&sc->free_tagtbl);
167	TAILQ_INIT(&sc->tag_tblblk);
168	sc->sc_currschedslot = 0;
169#ifdef SIOP_DEBUG
170	printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
171	    sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
172	    (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
173#endif
174
175	sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
176	sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
177
178	/* Do a bus reset, so that devices fall back to narrow/async */
179	siop_resetbus(&sc->sc_c);
180	/*
181	 * siop_reset() will reset the chip, thus clearing pending interrupts
182	 */
183	esiop_reset(sc);
184#ifdef DUMP_SCRIPT
185	esiop_dump_script(sc);
186#endif
187
188	config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
189}
190
191void
192esiop_reset(sc)
193	struct esiop_softc *sc;
194{
195	int i, j;
196	u_int32_t addr;
197	u_int32_t msgin_addr;
198
199	siop_common_reset(&sc->sc_c);
200
201	/*
202	 * we copy the script at the beggining of RAM. Then there is 8 bytes
203	 * for messages in.
204	 */
205	sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
206	msgin_addr =
207	    sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
208	sc->sc_free_offset += 2;
209	/* then we have the scheduler ring */
210	sc->sc_shedoffset = sc->sc_free_offset;
211	sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
212	/* then the targets DSA table */
213	sc->sc_target_table_offset = sc->sc_free_offset;
214	sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
215	/* copy and patch the script */
216	if (sc->sc_c.features & SF_CHIP_RAM) {
217		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
218		    esiop_script,
219		    sizeof(esiop_script) / sizeof(esiop_script[0]));
220		for (j = 0; j <
221		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
222		    j++) {
223			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
224			    E_tlq_offset_Used[j] * 4,
225			    sizeof(struct siop_common_xfer));
226		}
227		for (j = 0; j <
228		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
229		    j++) {
230			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
231			    E_abs_msgin2_Used[j] * 4, msgin_addr);
232		}
233
234		if (sc->sc_c.features & SF_CHIP_LED0) {
235			bus_space_write_region_4(sc->sc_c.sc_ramt,
236			    sc->sc_c.sc_ramh,
237			    Ent_led_on1, esiop_led_on,
238			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
239			bus_space_write_region_4(sc->sc_c.sc_ramt,
240			    sc->sc_c.sc_ramh,
241			    Ent_led_on2, esiop_led_on,
242			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
243			bus_space_write_region_4(sc->sc_c.sc_ramt,
244			    sc->sc_c.sc_ramh,
245			    Ent_led_off, esiop_led_off,
246			    sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
247		}
248	} else {
249		for (j = 0;
250		    j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
251			sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
252		}
253		for (j = 0; j <
254		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
255		    j++) {
256			sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
257			    htole32(sizeof(struct siop_common_xfer));
258		}
259		for (j = 0; j <
260		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
261		    j++) {
262			sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
263			    htole32(msgin_addr);
264		}
265
266		if (sc->sc_c.features & SF_CHIP_LED0) {
267			for (j = 0; j < (sizeof(esiop_led_on) /
268			    sizeof(esiop_led_on[0])); j++)
269				sc->sc_c.sc_script[
270				    Ent_led_on1 / sizeof(esiop_led_on[0]) + j
271				    ] = htole32(esiop_led_on[j]);
272			for (j = 0; j < (sizeof(esiop_led_on) /
273			    sizeof(esiop_led_on[0])); j++)
274				sc->sc_c.sc_script[
275				    Ent_led_on2 / sizeof(esiop_led_on[0]) + j
276				    ] = htole32(esiop_led_on[j]);
277			for (j = 0; j < (sizeof(esiop_led_off) /
278			    sizeof(esiop_led_off[0])); j++)
279				sc->sc_c.sc_script[
280				   Ent_led_off / sizeof(esiop_led_off[0]) + j
281				   ] = htole32(esiop_led_off[j]);
282		}
283	}
284	/* get base of scheduler ring */
285	addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
286	/* init scheduler */
287	for (i = 0; i < A_ncmd_slots; i++) {
288		esiop_script_write(sc,
289		    sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
290	}
291	sc->sc_currschedslot = 0;
292	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
293	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
294	/*
295	 * 0x78000000 is a 'move data8 to reg'. data8 is the second
296	 * octet, reg offset is the third.
297	 */
298	esiop_script_write(sc, Ent_cmdr0 / 4,
299	    0x78640000 | ((addr & 0x000000ff) <<  8));
300	esiop_script_write(sc, Ent_cmdr1 / 4,
301	    0x78650000 | ((addr & 0x0000ff00)      ));
302	esiop_script_write(sc, Ent_cmdr2 / 4,
303	    0x78660000 | ((addr & 0x00ff0000) >>  8));
304	esiop_script_write(sc, Ent_cmdr3 / 4,
305	    0x78670000 | ((addr & 0xff000000) >> 16));
306	/* set flags */
307	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
308	/* write pointer of base of target DSA table */
309	addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
310	    sc->sc_c.sc_scriptaddr;
311	esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
312	    esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
313	    ((addr & 0x000000ff) <<  8));
314	esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
315	    esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
316	    ((addr & 0x0000ff00)      ));
317	esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
318	    esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
319	    ((addr & 0x00ff0000) >>  8));
320	esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
321	    esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
322	    ((addr & 0xff000000) >> 16));
323#ifdef SIOP_DEBUG
324	printf("%s: target table offset %d free offset %d\n",
325	    sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
326	    sc->sc_free_offset);
327#endif
328
329	/* register existing targets */
330	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
331		if (sc->sc_c.targets[i])
332			esiop_target_register(sc, i);
333	}
334	/* start script */
335	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
336		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
337		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
338	}
339	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
340	    sc->sc_c.sc_scriptaddr + Ent_reselect);
341}
342
343#if 0
344#define CALL_SCRIPT(ent) do {\
345	printf ("start script DSA 0x%lx DSP 0x%lx\n", \
346	    esiop_cmd->cmd_c.dsa, \
347	    sc->sc_c.sc_scriptaddr + ent); \
348bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
349} while (0)
350#else
351#define CALL_SCRIPT(ent) do {\
352bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
353} while (0)
354#endif
355
356int
357esiop_intr(v)
358	void *v;
359{
360	struct esiop_softc *sc = v;
361	struct esiop_target *esiop_target;
362	struct esiop_cmd *esiop_cmd;
363	struct esiop_lun *esiop_lun;
364	struct scsipi_xfer *xs;
365	int istat, sist, sstat1, dstat;
366	u_int32_t irqcode;
367	int need_reset = 0;
368	int offset, target, lun, tag;
369	u_int32_t tflags;
370	u_int32_t addr;
371	int freetarget = 0;
372	int slot;
373	int retval = 0;
374
375again:
376	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
377	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
378		return retval;
379	}
380	retval = 1;
381	INCSTAT(esiop_stat_intr);
382	if (istat & ISTAT_INTF) {
383		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
384		    SIOP_ISTAT, ISTAT_INTF);
385		esiop_checkdone(sc);
386		if (sc->sc_flags & SCF_CHAN_NOSLOT) {
387			/*
388			 * at last one command terminated,
389			 * so we should have free slots now
390			 */
391			sc->sc_flags &= ~SCF_CHAN_NOSLOT;
392			scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
393		}
394		goto again;
395	}
396
397	if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
398	    (ISTAT_DIP | ISTAT_ABRT)) {
399		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
400		    SIOP_ISTAT, 0);
401	}
402
403	/* get CMD from T/L/Q */
404	tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
405	    SIOP_SCRATCHC);
406#ifdef SIOP_DEBUG_INTR
407		printf("interrupt, istat=0x%x tflags=0x%x "
408		    "DSA=0x%x DSP=0x%lx\n", istat, tflags,
409		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
410		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
411			SIOP_DSP) -
412		    sc->sc_c.sc_scriptaddr));
413#endif
414	target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
415	if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
416	lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
417	if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
418	tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
419
420	if (target >= 0 && lun >= 0) {
421		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
422		if (esiop_target == NULL) {
423			printf("esiop_target (target %d) not valid\n", target);
424			goto none;
425		}
426		esiop_lun = esiop_target->esiop_lun[lun];
427		if (esiop_lun == NULL) {
428			printf("esiop_lun (target %d lun %d) not valid\n",
429			    target, lun);
430			goto none;
431		}
432		esiop_cmd =
433		    (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
434		if (esiop_cmd == NULL) {
435			printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
436			    target, lun, tag);
437			goto none;
438		}
439		xs = esiop_cmd->cmd_c.xs;
440#ifdef DIAGNOSTIC
441		if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
442 			printf("esiop_cmd (target %d lun %d) "
443			    "not active (%d)\n", target, lun,
444			    esiop_cmd->cmd_c.status);
445			goto none;
446		}
447#endif
448		esiop_table_sync(esiop_cmd,
449		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
450	} else {
451none:
452		xs = NULL;
453		esiop_target = NULL;
454		esiop_lun = NULL;
455		esiop_cmd = NULL;
456	}
457	if (istat & ISTAT_DIP) {
458		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
459		    SIOP_DSTAT);
460		if (dstat & DSTAT_SSI) {
461			printf("single step dsp 0x%08x dsa 0x08%x\n",
462			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
463			    sc->sc_c.sc_rh, SIOP_DSP) -
464			    sc->sc_c.sc_scriptaddr),
465			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
466				SIOP_DSA));
467			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
468			    (istat & ISTAT_SIP) == 0) {
469				bus_space_write_1(sc->sc_c.sc_rt,
470				    sc->sc_c.sc_rh, SIOP_DCNTL,
471				    bus_space_read_1(sc->sc_c.sc_rt,
472				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
473			}
474			return 1;
475		}
476		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
477		printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
478		if (dstat & DSTAT_IID)
479			printf(" Illegal instruction");
480		if (dstat & DSTAT_ABRT)
481			printf(" abort");
482		if (dstat & DSTAT_BF)
483			printf(" bus fault");
484		if (dstat & DSTAT_MDPE)
485			printf(" parity");
486		if (dstat & DSTAT_DFE)
487			printf(" dma fifo empty");
488		else
489			siop_clearfifo(&sc->sc_c);
490		printf(", DSP=0x%x DSA=0x%x: ",
491		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
492			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
493		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
494		if (esiop_cmd)
495			printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
496			    target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
497			    le32toh(esiop_cmd->cmd_tables->status));
498		else
499			printf(" current T/L/Q invalid\n");
500		need_reset = 1;
501		}
502	}
503	if (istat & ISTAT_SIP) {
504		if (istat & ISTAT_DIP)
505			delay(10);
506		/*
507		 * Can't read sist0 & sist1 independantly, or we have to
508		 * insert delay
509		 */
510		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
511		    SIOP_SIST0);
512		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
513		    SIOP_SSTAT1);
514#ifdef SIOP_DEBUG_INTR
515		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
516		    "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
517		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
518		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
519			SIOP_DSP) -
520		    sc->sc_c.sc_scriptaddr));
521#endif
522		if (sist & SIST0_RST) {
523			esiop_handle_reset(sc);
524			/* no table to flush here */
525			return 1;
526		}
527		if (sist & SIST0_SGE) {
528			if (esiop_cmd)
529				scsipi_printaddr(xs->xs_periph);
530			else
531				printf("%s:", sc->sc_c.sc_dev.dv_xname);
532			printf("scsi gross error\n");
533			if (esiop_target)
534				esiop_target->target_c.flags &= ~TARF_DT;
535			goto reset;
536		}
537		if ((sist & SIST0_MA) && need_reset == 0) {
538			if (esiop_cmd) {
539				int scratchc0;
540				dstat = bus_space_read_1(sc->sc_c.sc_rt,
541				    sc->sc_c.sc_rh, SIOP_DSTAT);
542				/*
543				 * first restore DSA, in case we were in a S/G
544				 * operation.
545				 */
546				bus_space_write_4(sc->sc_c.sc_rt,
547				    sc->sc_c.sc_rh,
548				    SIOP_DSA, esiop_cmd->cmd_c.dsa);
549				scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
550				    sc->sc_c.sc_rh, SIOP_SCRATCHC);
551				switch (sstat1 & SSTAT1_PHASE_MASK) {
552				case SSTAT1_PHASE_STATUS:
553				/*
554				 * previous phase may be aborted for any reason
555				 * ( for example, the target has less data to
556				 * transfer than requested). Just go to status
557				 * and the command should terminate.
558				 */
559					INCSTAT(esiop_stat_intr_shortxfer);
560					if ((dstat & DSTAT_DFE) == 0)
561						siop_clearfifo(&sc->sc_c);
562					/* no table to flush here */
563					CALL_SCRIPT(Ent_status);
564					return 1;
565				case SSTAT1_PHASE_MSGIN:
566					/*
567					 * target may be ready to disconnect
568					 * Save data pointers just in case.
569					 */
570					INCSTAT(esiop_stat_intr_xferdisc);
571					if (scratchc0 & A_f_c_data)
572						siop_sdp(&esiop_cmd->cmd_c);
573					else if ((dstat & DSTAT_DFE) == 0)
574						siop_clearfifo(&sc->sc_c);
575					bus_space_write_1(sc->sc_c.sc_rt,
576					    sc->sc_c.sc_rh, SIOP_SCRATCHC,
577					    scratchc0 & ~A_f_c_data);
578					esiop_table_sync(esiop_cmd,
579					    BUS_DMASYNC_PREREAD |
580					    BUS_DMASYNC_PREWRITE);
581					CALL_SCRIPT(Ent_msgin);
582					return 1;
583				}
584				printf("%s: unexpected phase mismatch %d\n",
585				    sc->sc_c.sc_dev.dv_xname,
586				    sstat1 & SSTAT1_PHASE_MASK);
587			} else {
588				printf("%s: phase mismatch without command\n",
589				    sc->sc_c.sc_dev.dv_xname);
590			}
591			need_reset = 1;
592		}
593		if (sist & SIST0_PAR) {
594			/* parity error, reset */
595			if (esiop_cmd)
596				scsipi_printaddr(xs->xs_periph);
597			else
598				printf("%s:", sc->sc_c.sc_dev.dv_xname);
599			printf("parity error\n");
600			if (esiop_target)
601				esiop_target->target_c.flags &= ~TARF_DT;
602			goto reset;
603		}
604		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
605			/*
606			 * selection time out, assume there's no device here
607			 * We also have to update the ring pointer ourselve
608			 */
609			slot = bus_space_read_1(sc->sc_c.sc_rt,
610			    sc->sc_c.sc_rh, SIOP_SCRATCHE);
611			esiop_script_sync(sc,
612			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
613#ifdef SIOP_DEBUG_SCHED
614			printf("sel timeout target %d, slot %d\n", target, slot);
615#endif
616			/*
617			 * mark this slot as free, and advance to next slot
618			 */
619			esiop_script_write(sc,
620			    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
621			    A_f_cmd_free);
622			addr = bus_space_read_4(sc->sc_c.sc_rt,
623				    sc->sc_c.sc_rh, SIOP_SCRATCHD);
624			if (slot < (A_ncmd_slots - 1)) {
625				bus_space_write_1(sc->sc_c.sc_rt,
626				    sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
627				addr = addr + sizeof(struct esiop_slot);
628			} else {
629				bus_space_write_1(sc->sc_c.sc_rt,
630				    sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
631				addr = sc->sc_c.sc_scriptaddr +
632				    sc->sc_shedoffset * sizeof(u_int32_t);
633			}
634			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
635			    SIOP_SCRATCHD, addr);
636			esiop_script_sync(sc,
637			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
638			if (esiop_cmd) {
639				esiop_cmd->cmd_c.status = CMDST_DONE;
640				xs->error = XS_SELTIMEOUT;
641				freetarget = 1;
642				goto end;
643			} else {
644				printf("%s: selection timeout without "
645				    "command, target %d (sdid 0x%x), "
646				    "slot %d\n",
647				    sc->sc_c.sc_dev.dv_xname, target,
648				    bus_space_read_1(sc->sc_c.sc_rt,
649				    sc->sc_c.sc_rh, SIOP_SDID), slot);
650				need_reset = 1;
651			}
652		}
653		if (sist & SIST0_UDC) {
654			/*
655			 * unexpected disconnect. Usually the target signals
656			 * a fatal condition this way. Attempt to get sense.
657			 */
658			 if (esiop_cmd) {
659				esiop_cmd->cmd_tables->status =
660				    htole32(SCSI_CHECK);
661				goto end;
662			}
663			printf("%s: unexpected disconnect without "
664			    "command\n", sc->sc_c.sc_dev.dv_xname);
665			goto reset;
666		}
667		if (sist & (SIST1_SBMC << 8)) {
668			/* SCSI bus mode change */
669			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
670				goto reset;
671			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
672				/*
673				 * we have a script interrupt, it will
674				 * restart the script.
675				 */
676				goto scintr;
677			}
678			/*
679			 * else we have to restart it ourselve, at the
680			 * interrupted instruction.
681			 */
682			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
683			    SIOP_DSP,
684			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
685			    SIOP_DSP) - 8);
686			return 1;
687		}
688		/* Else it's an unhandled exeption (for now). */
689		printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
690		    "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
691		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
692			SIOP_SSTAT1),
693		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
694		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
695			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
696		if (esiop_cmd) {
697			esiop_cmd->cmd_c.status = CMDST_DONE;
698			xs->error = XS_SELTIMEOUT;
699			goto end;
700		}
701		need_reset = 1;
702	}
703	if (need_reset) {
704reset:
705		/* fatal error, reset the bus */
706		siop_resetbus(&sc->sc_c);
707		/* no table to flush here */
708		return 1;
709	}
710
711scintr:
712	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
713		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
714		    SIOP_DSPS);
715#ifdef SIOP_DEBUG_INTR
716		printf("script interrupt 0x%x\n", irqcode);
717#endif
718		/*
719		 * no command, or an inactive command is only valid for a
720		 * reselect interrupt
721		 */
722		if ((irqcode & 0x80) == 0) {
723			if (esiop_cmd == NULL) {
724				printf(
725			"%s: script interrupt (0x%x) with invalid DSA !!!\n",
726				    sc->sc_c.sc_dev.dv_xname, irqcode);
727				goto reset;
728			}
729			if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
730				printf("%s: command with invalid status "
731				    "(IRQ code 0x%x current status %d) !\n",
732				    sc->sc_c.sc_dev.dv_xname,
733				    irqcode, esiop_cmd->cmd_c.status);
734				xs = NULL;
735			}
736		}
737		switch(irqcode) {
738		case A_int_err:
739			printf("error, DSP=0x%x\n",
740			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
741			    sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
742			if (xs) {
743				xs->error = XS_SELTIMEOUT;
744				goto end;
745			} else {
746				goto reset;
747			}
748		case A_int_msgin:
749		{
750			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
751			    sc->sc_c.sc_rh, SIOP_SFBR);
752			if (msgin == MSG_MESSAGE_REJECT) {
753				int msg, extmsg;
754				if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
755					/*
756					 * message was part of a identify +
757					 * something else. Identify shoudl't
758					 * have been rejected.
759					 */
760					msg =
761					    esiop_cmd->cmd_tables->msg_out[1];
762					extmsg =
763					    esiop_cmd->cmd_tables->msg_out[3];
764				} else {
765					msg =
766					    esiop_cmd->cmd_tables->msg_out[0];
767					extmsg =
768					    esiop_cmd->cmd_tables->msg_out[2];
769				}
770				if (msg == MSG_MESSAGE_REJECT) {
771					/* MSG_REJECT  for a MSG_REJECT  !*/
772					if (xs)
773						scsipi_printaddr(xs->xs_periph);
774					else
775						printf("%s: ",
776						   sc->sc_c.sc_dev.dv_xname);
777					printf("our reject message was "
778					    "rejected\n");
779					goto reset;
780				}
781				if (msg == MSG_EXTENDED &&
782				    extmsg == MSG_EXT_WDTR) {
783					/* WDTR rejected, initiate sync */
784					if ((esiop_target->target_c.flags &
785					   TARF_SYNC) == 0) {
786						esiop_target->target_c.status =
787						    TARST_OK;
788						siop_update_xfer_mode(&sc->sc_c,
789						    target);
790						/* no table to flush here */
791						CALL_SCRIPT(Ent_msgin_ack);
792						return 1;
793					}
794					esiop_target->target_c.status =
795					    TARST_SYNC_NEG;
796					siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
797					    sc->sc_c.st_minsync,
798					    sc->sc_c.maxoff);
799					esiop_table_sync(esiop_cmd,
800					    BUS_DMASYNC_PREREAD |
801					    BUS_DMASYNC_PREWRITE);
802					CALL_SCRIPT(Ent_send_msgout);
803					return 1;
804				} else if (msg == MSG_EXTENDED &&
805				    extmsg == MSG_EXT_SDTR) {
806					/* sync rejected */
807					esiop_target->target_c.offset = 0;
808					esiop_target->target_c.period = 0;
809					esiop_target->target_c.status =
810					    TARST_OK;
811					siop_update_xfer_mode(&sc->sc_c,
812					    target);
813					/* no table to flush here */
814					CALL_SCRIPT(Ent_msgin_ack);
815					return 1;
816				} else if (msg == MSG_EXTENDED &&
817				    extmsg == MSG_EXT_PPR) {
818					/* PPR rejected */
819					esiop_target->target_c.offset = 0;
820					esiop_target->target_c.period = 0;
821					esiop_target->target_c.status =
822					    TARST_OK;
823					siop_update_xfer_mode(&sc->sc_c,
824					    target);
825					/* no table to flush here */
826					CALL_SCRIPT(Ent_msgin_ack);
827					return 1;
828				} else if (msg == MSG_SIMPLE_Q_TAG ||
829				    msg == MSG_HEAD_OF_Q_TAG ||
830				    msg == MSG_ORDERED_Q_TAG) {
831					if (esiop_handle_qtag_reject(
832					    esiop_cmd) == -1)
833						goto reset;
834					CALL_SCRIPT(Ent_msgin_ack);
835					return 1;
836				}
837				if (xs)
838					scsipi_printaddr(xs->xs_periph);
839				else
840					printf("%s: ",
841					    sc->sc_c.sc_dev.dv_xname);
842				if (msg == MSG_EXTENDED) {
843					printf("scsi message reject, extended "
844					    "message sent was 0x%x\n", extmsg);
845				} else {
846					printf("scsi message reject, message "
847					    "sent was 0x%x\n", msg);
848				}
849				/* no table to flush here */
850				CALL_SCRIPT(Ent_msgin_ack);
851				return 1;
852			}
853			if (xs)
854				scsipi_printaddr(xs->xs_periph);
855			else
856				printf("%s: ", sc->sc_c.sc_dev.dv_xname);
857			printf("unhandled message 0x%x\n",
858			    esiop_cmd->cmd_tables->msg_in[0]);
859			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
860			esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
861			esiop_table_sync(esiop_cmd,
862			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
863			CALL_SCRIPT(Ent_send_msgout);
864			return 1;
865		}
866		case A_int_extmsgin:
867#ifdef SIOP_DEBUG_INTR
868			printf("extended message: msg 0x%x len %d\n",
869			    esiop_cmd->cmd_tables->msg_in[2],
870			    esiop_cmd->cmd_tables->msg_in[1]);
871#endif
872			if (esiop_cmd->cmd_tables->msg_in[1] >
873			    sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
874				printf("%s: extended message too big (%d)\n",
875				    sc->sc_c.sc_dev.dv_xname,
876				    esiop_cmd->cmd_tables->msg_in[1]);
877			esiop_cmd->cmd_tables->t_extmsgdata.count =
878			    htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
879			esiop_table_sync(esiop_cmd,
880			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
881			CALL_SCRIPT(Ent_get_extmsgdata);
882			return 1;
883		case A_int_extmsgdata:
884#ifdef SIOP_DEBUG_INTR
885			{
886			int i;
887			printf("extended message: 0x%x, data:",
888			    esiop_cmd->cmd_tables->msg_in[2]);
889			for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
890			    i++)
891				printf(" 0x%x",
892				    esiop_cmd->cmd_tables->msg_in[i]);
893			printf("\n");
894			}
895#endif
896			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
897				switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
898				case SIOP_NEG_MSGOUT:
899					esiop_update_scntl3(sc,
900					    esiop_cmd->cmd_c.siop_target);
901					esiop_table_sync(esiop_cmd,
902					    BUS_DMASYNC_PREREAD |
903					    BUS_DMASYNC_PREWRITE);
904					CALL_SCRIPT(Ent_send_msgout);
905					return 1;
906				case SIOP_NEG_ACK:
907					esiop_update_scntl3(sc,
908					    esiop_cmd->cmd_c.siop_target);
909					CALL_SCRIPT(Ent_msgin_ack);
910					return 1;
911				default:
912					panic("invalid retval from "
913					    "siop_wdtr_neg()");
914				}
915				return 1;
916			}
917			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
918				switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
919				case SIOP_NEG_MSGOUT:
920					esiop_update_scntl3(sc,
921					    esiop_cmd->cmd_c.siop_target);
922					esiop_table_sync(esiop_cmd,
923					    BUS_DMASYNC_PREREAD |
924					    BUS_DMASYNC_PREWRITE);
925					CALL_SCRIPT(Ent_send_msgout);
926					return 1;
927				case SIOP_NEG_ACK:
928					esiop_update_scntl3(sc,
929					    esiop_cmd->cmd_c.siop_target);
930					CALL_SCRIPT(Ent_msgin_ack);
931					return 1;
932				default:
933					panic("invalid retval from "
934					    "siop_wdtr_neg()");
935				}
936				return 1;
937			}
938			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
939				switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
940				case SIOP_NEG_MSGOUT:
941					esiop_update_scntl3(sc,
942					    esiop_cmd->cmd_c.siop_target);
943					esiop_table_sync(esiop_cmd,
944					    BUS_DMASYNC_PREREAD |
945					    BUS_DMASYNC_PREWRITE);
946					CALL_SCRIPT(Ent_send_msgout);
947					return 1;
948				case SIOP_NEG_ACK:
949					esiop_update_scntl3(sc,
950					    esiop_cmd->cmd_c.siop_target);
951					CALL_SCRIPT(Ent_msgin_ack);
952					return 1;
953				default:
954					panic("invalid retval from "
955					    "siop_wdtr_neg()");
956				}
957				return 1;
958			}
959			/* send a message reject */
960			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
961			esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
962			esiop_table_sync(esiop_cmd,
963			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
964			CALL_SCRIPT(Ent_send_msgout);
965			return 1;
966		case A_int_disc:
967			INCSTAT(esiop_stat_intr_sdp);
968			offset = bus_space_read_1(sc->sc_c.sc_rt,
969			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
970#ifdef SIOP_DEBUG_DR
971			printf("disconnect offset %d\n", offset);
972#endif
973			if (offset > SIOP_NSG) {
974				printf("%s: bad offset for disconnect (%d)\n",
975				    sc->sc_c.sc_dev.dv_xname, offset);
976				goto reset;
977			}
978			/*
979			 * offset == SIOP_NSG may be a valid condition if
980			 * we get a sdp when the xfer is done.
981			 * Don't call memmove in this case.
982			 */
983			if (offset < SIOP_NSG) {
984				memmove(&esiop_cmd->cmd_tables->data[0],
985				    &esiop_cmd->cmd_tables->data[offset],
986				    (SIOP_NSG - offset) * sizeof(scr_table_t));
987				esiop_table_sync(esiop_cmd,
988				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
989			}
990			CALL_SCRIPT(Ent_script_sched);
991			return 1;
992		case A_int_resfail:
993			printf("reselect failed\n");
994			CALL_SCRIPT(Ent_script_sched);
995			return 1;
996		case A_int_done:
997			if (xs == NULL) {
998				printf("%s: done without command\n",
999				    sc->sc_c.sc_dev.dv_xname);
1000				CALL_SCRIPT(Ent_script_sched);
1001				return 1;
1002			}
1003#ifdef SIOP_DEBUG_INTR
1004			printf("done, DSA=0x%lx target id 0x%x last msg "
1005			    "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1006			    le32toh(esiop_cmd->cmd_tables->id),
1007			    esiop_cmd->cmd_tables->msg_in[0],
1008			    le32toh(esiop_cmd->cmd_tables->status));
1009#endif
1010			INCSTAT(esiop_stat_intr_done);
1011			esiop_cmd->cmd_c.status = CMDST_DONE;
1012			goto end;
1013		default:
1014			printf("unknown irqcode %x\n", irqcode);
1015			if (xs) {
1016				xs->error = XS_SELTIMEOUT;
1017				goto end;
1018			}
1019			goto reset;
1020		}
1021		return 1;
1022	}
1023	/* We just should't get there */
1024	panic("siop_intr: I shouldn't be there !");
1025
1026end:
1027	/*
1028	 * restart the script now if command completed properly
1029	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1030	 * queue
1031	 */
1032	xs->status = le32toh(esiop_cmd->cmd_tables->status);
1033#ifdef SIOP_DEBUG_INTR
1034	printf("esiop_intr end: status %d\n", xs->status);
1035#endif
1036	if (tag >= 0)
1037		esiop_lun->tactive[tag] = NULL;
1038	else
1039		esiop_lun->active = NULL;
1040	esiop_scsicmd_end(esiop_cmd);
1041	if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1042		esiop_del_dev(sc, target, lun);
1043	CALL_SCRIPT(Ent_script_sched);
1044	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1045		/* a command terminated, so we have free slots now */
1046		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1047		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1048	}
1049
1050	return 1;
1051}
1052
1053void
1054esiop_scsicmd_end(esiop_cmd)
1055	struct esiop_cmd *esiop_cmd;
1056{
1057	struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1058	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1059
1060	switch(xs->status) {
1061	case SCSI_OK:
1062		xs->error = XS_NOERROR;
1063		break;
1064	case SCSI_BUSY:
1065		xs->error = XS_BUSY;
1066		break;
1067	case SCSI_CHECK:
1068		xs->error = XS_BUSY;
1069		/* remove commands in the queue and scheduler */
1070		esiop_unqueue(sc, xs->xs_periph->periph_target,
1071		    xs->xs_periph->periph_lun);
1072		break;
1073	case SCSI_QUEUE_FULL:
1074		INCSTAT(esiop_stat_intr_qfull);
1075#ifdef SIOP_DEBUG
1076		printf("%s:%d:%d: queue full (tag %d)\n",
1077		    sc->sc_c.sc_dev.dv_xname,
1078		    xs->xs_periph->periph_target,
1079		    xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1080#endif
1081		xs->error = XS_BUSY;
1082		break;
1083	case SCSI_SIOP_NOCHECK:
1084		/*
1085		 * don't check status, xs->error is already valid
1086		 */
1087		break;
1088	case SCSI_SIOP_NOSTATUS:
1089		/*
1090		 * the status byte was not updated, cmd was
1091		 * aborted
1092		 */
1093		xs->error = XS_SELTIMEOUT;
1094		break;
1095	default:
1096		xs->error = XS_DRIVER_STUFFUP;
1097	}
1098	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1099		bus_dmamap_sync(sc->sc_c.sc_dmat,
1100		    esiop_cmd->cmd_c.dmamap_data, 0,
1101		    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1102		    (xs->xs_control & XS_CTL_DATA_IN) ?
1103		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1104		bus_dmamap_unload(sc->sc_c.sc_dmat,
1105		    esiop_cmd->cmd_c.dmamap_data);
1106	}
1107	bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1108	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1109	esiop_cmd->cmd_c.status = CMDST_FREE;
1110	TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1111	xs->resid = 0;
1112	scsipi_done (xs);
1113}
1114
1115void
1116esiop_checkdone(sc)
1117	struct esiop_softc *sc;
1118{
1119	int target, lun, tag;
1120	struct esiop_target *esiop_target;
1121	struct esiop_lun *esiop_lun;
1122	struct esiop_cmd *esiop_cmd;
1123	int status;
1124
1125	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1126		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1127		if (esiop_target == NULL)
1128			continue;
1129		for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1130			esiop_lun = esiop_target->esiop_lun[lun];
1131			if (esiop_lun == NULL)
1132				continue;
1133			esiop_cmd = esiop_lun->active;
1134			if (esiop_cmd) {
1135				esiop_table_sync(esiop_cmd,
1136				    BUS_DMASYNC_POSTREAD |
1137				    BUS_DMASYNC_POSTWRITE);
1138				status = le32toh(esiop_cmd->cmd_tables->status);
1139				if (status == SCSI_OK) {
1140					/* Ok, this command has been handled */
1141					esiop_cmd->cmd_c.xs->status = status;
1142					esiop_lun->active = NULL;
1143					esiop_scsicmd_end(esiop_cmd);
1144				}
1145			}
1146			for (tag = 0; tag < ESIOP_NTAG; tag++) {
1147				esiop_cmd = esiop_lun->tactive[tag];
1148				if (esiop_cmd == NULL)
1149					continue;
1150				esiop_table_sync(esiop_cmd,
1151				    BUS_DMASYNC_POSTREAD |
1152				    BUS_DMASYNC_POSTWRITE);
1153				status = le32toh(esiop_cmd->cmd_tables->status);
1154				if (status == SCSI_OK) {
1155					/* Ok, this command has been handled */
1156					esiop_cmd->cmd_c.xs->status = status;
1157					esiop_lun->tactive[tag] = NULL;
1158					esiop_scsicmd_end(esiop_cmd);
1159				}
1160			}
1161		}
1162	}
1163}
1164
1165void
1166esiop_unqueue(sc, target, lun)
1167	struct esiop_softc *sc;
1168	int target;
1169	int lun;
1170{
1171 	int slot, tag;
1172	u_int32_t slotdsa;
1173	struct esiop_cmd *esiop_cmd;
1174	struct esiop_lun *esiop_lun =
1175	    ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1176
1177	/* first make sure to read valid data */
1178	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1179
1180	for (tag = 0; tag < ESIOP_NTAG; tag++) {
1181		/* look for commands in the scheduler, not yet started */
1182		if (esiop_lun->tactive[tag] == NULL)
1183			continue;
1184		esiop_cmd = esiop_lun->tactive[tag];
1185		for (slot = 0; slot < A_ncmd_slots; slot++) {
1186			slotdsa = esiop_script_read(sc,
1187			    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1188			/* if the slot has any flag, it won't match the DSA */
1189			if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1190				/* Mark this slot as ignore */
1191				esiop_script_write(sc,
1192				    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1193				    esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1194				/* ask to requeue */
1195				esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1196				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1197				esiop_lun->tactive[tag] = NULL;
1198				esiop_scsicmd_end(esiop_cmd);
1199				break;
1200			}
1201		}
1202	}
1203	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1204}
1205
1206/*
1207 * handle a rejected queue tag message: the command will run untagged,
1208 * has to adjust the reselect script.
1209 */
1210
1211
1212int
1213esiop_handle_qtag_reject(esiop_cmd)
1214	struct esiop_cmd *esiop_cmd;
1215{
1216	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1217	int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1218	int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1219	int tag = esiop_cmd->cmd_tables->msg_out[2];
1220	struct esiop_target *esiop_target =
1221	    (struct esiop_target*)sc->sc_c.targets[target];
1222	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1223
1224#ifdef SIOP_DEBUG
1225	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1226	    sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1227	    esiop_cmd->cmd_c.status);
1228#endif
1229
1230	if (esiop_lun->active != NULL) {
1231		printf("%s: untagged command already running for target %d "
1232		    "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1233		    target, lun, esiop_lun->active->cmd_c.status);
1234		return -1;
1235	}
1236	/* clear tag slot */
1237	esiop_lun->tactive[tag] = NULL;
1238	/* add command to non-tagged slot */
1239	esiop_lun->active = esiop_cmd;
1240	esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1241	esiop_cmd->cmd_c.tag = -1;
1242	/* update DSA table */
1243	esiop_script_write(sc, esiop_target->lun_table_offset +
1244	    lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1245	    esiop_cmd->cmd_c.dsa);
1246	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1247	return 0;
1248}
1249
1250/*
1251 * handle a bus reset: reset chip, unqueue all active commands, free all
1252 * target struct and report loosage to upper layer.
1253 * As the upper layer may requeue immediatly we have to first store
1254 * all active commands in a temporary queue.
1255 */
1256void
1257esiop_handle_reset(sc)
1258	struct esiop_softc *sc;
1259{
1260	struct esiop_cmd *esiop_cmd;
1261	struct esiop_lun *esiop_lun;
1262	int target, lun, tag;
1263	/*
1264	 * scsi bus reset. reset the chip and restart
1265	 * the queue. Need to clean up all active commands
1266	 */
1267	printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1268	/* stop, reset and restart the chip */
1269	esiop_reset(sc);
1270
1271	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1272		/* chip has been reset, all slots are free now */
1273		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1274		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1275	}
1276	/*
1277	 * Process all commands: first commmands completes, then commands
1278	 * being executed
1279	 */
1280	esiop_checkdone(sc);
1281	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1282	    target++) {
1283		struct esiop_target *esiop_target =
1284		    (struct esiop_target *)sc->sc_c.targets[target];
1285		if (esiop_target == NULL)
1286			continue;
1287		for (lun = 0; lun < 8; lun++) {
1288			esiop_lun = esiop_target->esiop_lun[lun];
1289			if (esiop_lun == NULL)
1290				continue;
1291			for (tag = -1; tag <
1292			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1293			    ESIOP_NTAG : 0);
1294			    tag++) {
1295				if (tag >= 0)
1296					esiop_cmd = esiop_lun->tactive[tag];
1297				else
1298					esiop_cmd = esiop_lun->active;
1299				if (esiop_cmd == NULL)
1300					continue;
1301				scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1302				printf("command with tag id %d reset\n", tag);
1303				esiop_cmd->cmd_c.xs->error =
1304				    (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1305		    		    XS_TIMEOUT : XS_RESET;
1306				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1307				if (tag >= 0)
1308					esiop_lun->tactive[tag] = NULL;
1309				else
1310					esiop_lun->active = NULL;
1311				esiop_cmd->cmd_c.status = CMDST_DONE;
1312				esiop_scsicmd_end(esiop_cmd);
1313			}
1314		}
1315		sc->sc_c.targets[target]->status = TARST_ASYNC;
1316		sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1317		sc->sc_c.targets[target]->period =
1318		    sc->sc_c.targets[target]->offset = 0;
1319		siop_update_xfer_mode(&sc->sc_c, target);
1320	}
1321
1322	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1323}
1324
1325void
1326esiop_scsipi_request(chan, req, arg)
1327	struct scsipi_channel *chan;
1328	scsipi_adapter_req_t req;
1329	void *arg;
1330{
1331	struct scsipi_xfer *xs;
1332	struct scsipi_periph *periph;
1333	struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1334	struct esiop_cmd *esiop_cmd;
1335	struct esiop_target *esiop_target;
1336	int s, error, i;
1337	int target;
1338	int lun;
1339
1340	switch (req) {
1341	case ADAPTER_REQ_RUN_XFER:
1342		xs = arg;
1343		periph = xs->xs_periph;
1344		target = periph->periph_target;
1345		lun = periph->periph_lun;
1346
1347		s = splbio();
1348#ifdef SIOP_DEBUG_SCHED
1349		printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1350		    xs->xs_tag_type, xs->xs_tag_id);
1351#endif
1352		esiop_cmd = TAILQ_FIRST(&sc->free_list);
1353		if (esiop_cmd == NULL) {
1354			xs->error = XS_RESOURCE_SHORTAGE;
1355			scsipi_done(xs);
1356			splx(s);
1357			return;
1358		}
1359		TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1360#ifdef DIAGNOSTIC
1361		if (esiop_cmd->cmd_c.status != CMDST_FREE)
1362			panic("siop_scsicmd: new cmd not free");
1363#endif
1364		esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1365		if (esiop_target == NULL) {
1366#ifdef SIOP_DEBUG
1367			printf("%s: alloc siop_target for target %d\n",
1368				sc->sc_c.sc_dev.dv_xname, target);
1369#endif
1370			sc->sc_c.targets[target] =
1371			    malloc(sizeof(struct esiop_target),
1372				M_DEVBUF, M_NOWAIT | M_ZERO);
1373			if (sc->sc_c.targets[target] == NULL) {
1374				printf("%s: can't malloc memory for "
1375				    "target %d\n", sc->sc_c.sc_dev.dv_xname,
1376				    target);
1377				xs->error = XS_RESOURCE_SHORTAGE;
1378				scsipi_done(xs);
1379				splx(s);
1380				return;
1381			}
1382			esiop_target =
1383			    (struct esiop_target*)sc->sc_c.targets[target];
1384			esiop_target->target_c.status = TARST_PROBING;
1385			esiop_target->target_c.flags = 0;
1386			esiop_target->target_c.id =
1387			    sc->sc_c.clock_div << 24; /* scntl3 */
1388			esiop_target->target_c.id |=  target << 16; /* id */
1389			/* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1390
1391			for (i=0; i < 8; i++)
1392				esiop_target->esiop_lun[i] = NULL;
1393			esiop_target_register(sc, target);
1394		}
1395		if (esiop_target->esiop_lun[lun] == NULL) {
1396			esiop_target->esiop_lun[lun] =
1397			    malloc(sizeof(struct esiop_lun), M_DEVBUF,
1398			    M_NOWAIT|M_ZERO);
1399			if (esiop_target->esiop_lun[lun] == NULL) {
1400				printf("%s: can't alloc esiop_lun for "
1401				    "target %d lun %d\n",
1402				    sc->sc_c.sc_dev.dv_xname, target, lun);
1403				xs->error = XS_RESOURCE_SHORTAGE;
1404				scsipi_done(xs);
1405				splx(s);
1406				return;
1407			}
1408		}
1409		esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1410		esiop_cmd->cmd_c.xs = xs;
1411		esiop_cmd->cmd_c.flags = 0;
1412		esiop_cmd->cmd_c.status = CMDST_READY;
1413
1414		/* load the DMA maps */
1415		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1416		    esiop_cmd->cmd_c.dmamap_cmd,
1417		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1418		if (error) {
1419			printf("%s: unable to load cmd DMA map: %d\n",
1420			    sc->sc_c.sc_dev.dv_xname, error);
1421			xs->error = XS_DRIVER_STUFFUP;
1422			scsipi_done(xs);
1423			splx(s);
1424			return;
1425		}
1426		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1427			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1428			    esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1429			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1430			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1431			     BUS_DMA_READ : BUS_DMA_WRITE));
1432			if (error) {
1433				printf("%s: unable to load cmd DMA map: %d",
1434				    sc->sc_c.sc_dev.dv_xname, error);
1435				xs->error = XS_DRIVER_STUFFUP;
1436				scsipi_done(xs);
1437				bus_dmamap_unload(sc->sc_c.sc_dmat,
1438				    esiop_cmd->cmd_c.dmamap_cmd);
1439				splx(s);
1440				return;
1441			}
1442			bus_dmamap_sync(sc->sc_c.sc_dmat,
1443			    esiop_cmd->cmd_c.dmamap_data, 0,
1444			    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1445			    (xs->xs_control & XS_CTL_DATA_IN) ?
1446			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1447		}
1448		bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1449		    0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1450		    BUS_DMASYNC_PREWRITE);
1451
1452		if (xs->xs_tag_type)
1453			esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1454		else
1455			esiop_cmd->cmd_c.tag = -1;
1456		siop_setuptables(&esiop_cmd->cmd_c);
1457		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1458		    htole32(A_f_c_target | A_f_c_lun);
1459		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1460		    htole32((target << 8) | (lun << 16));
1461		if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1462			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1463			    htole32(A_f_c_tag);
1464			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1465			    htole32(esiop_cmd->cmd_c.tag << 24);
1466		}
1467
1468		esiop_table_sync(esiop_cmd,
1469		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1470		esiop_start(sc, esiop_cmd);
1471		if (xs->xs_control & XS_CTL_POLL) {
1472			/* poll for command completion */
1473			while ((xs->xs_status & XS_STS_DONE) == 0) {
1474				delay(1000);
1475				esiop_intr(sc);
1476			}
1477		}
1478		splx(s);
1479		return;
1480
1481	case ADAPTER_REQ_GROW_RESOURCES:
1482#ifdef SIOP_DEBUG
1483		printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1484		    sc->sc_c.sc_adapt.adapt_openings);
1485#endif
1486		esiop_morecbd(sc);
1487		return;
1488
1489	case ADAPTER_REQ_SET_XFER_MODE:
1490	{
1491		struct scsipi_xfer_mode *xm = arg;
1492		if (sc->sc_c.targets[xm->xm_target] == NULL)
1493			return;
1494		s = splbio();
1495		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1496		    (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1497			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1498			/* allocate tag tables for this device */
1499			for (lun = 0;
1500			    lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1501				if (sc->sc_c.sc_chan.chan_periphs[
1502				    xm->xm_target][lun])
1503					esiop_add_dev(sc, xm->xm_target, lun);
1504			}
1505		}
1506		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1507		    (sc->sc_c.features & SF_BUS_WIDE))
1508			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1509		if (xm->xm_mode & PERIPH_CAP_SYNC)
1510			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1511		if ((xm->xm_mode & PERIPH_CAP_DT) &&
1512		    (sc->sc_c.features & SF_CHIP_DT))
1513			sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1514		if ((xm->xm_mode &
1515		    (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1516		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1517			sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1518
1519		splx(s);
1520	}
1521	}
1522}
1523
1524static void
1525esiop_start(sc, esiop_cmd)
1526	struct esiop_softc *sc;
1527	struct esiop_cmd *esiop_cmd;
1528{
1529	struct esiop_lun *esiop_lun;
1530	struct esiop_target *esiop_target;
1531	int timeout;
1532	int target, lun, slot;
1533
1534	/*
1535	 * first make sure to read valid data
1536	 */
1537	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1538
1539	/*
1540	 * We use a circular queue here. sc->sc_currschedslot points to a
1541	 * free slot, unless we have filled the queue. Check this.
1542	 */
1543	slot = sc->sc_currschedslot;
1544	if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1545	    A_f_cmd_free) == 0) {
1546		/*
1547		 * no more free slot, no need to continue. freeze the queue
1548		 * and requeue this command.
1549		 */
1550		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1551		sc->sc_flags |= SCF_CHAN_NOSLOT;
1552		esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1553		esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1554		esiop_scsicmd_end(esiop_cmd);
1555		return;
1556	}
1557	/* OK, we can use this slot */
1558
1559	target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1560	lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1561	esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1562	esiop_lun = esiop_target->esiop_lun[lun];
1563	/* if non-tagged command active, panic: this shouldn't happen */
1564	if (esiop_lun->active != NULL) {
1565		panic("esiop_start: tagged cmd while untagged running");
1566	}
1567#ifdef DIAGNOSTIC
1568	/* sanity check the tag if needed */
1569	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1570		if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1571			panic("esiop_start: tag not free");
1572		if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1573		    esiop_cmd->cmd_c.tag < 0) {
1574			scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1575			printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1576			panic("esiop_start: invalid tag id");
1577		}
1578	}
1579#endif
1580#ifdef SIOP_DEBUG_SCHED
1581	printf("using slot %d for DSA 0x%lx\n", slot,
1582	    (u_long)esiop_cmd->cmd_c.dsa);
1583#endif
1584	/* mark command as active */
1585	if (esiop_cmd->cmd_c.status == CMDST_READY)
1586		esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1587	else
1588		panic("esiop_start: bad status");
1589	/* DSA table for reselect */
1590	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1591		esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1592		/* DSA table for reselect */
1593		esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1594		    htole32(esiop_cmd->cmd_c.dsa);
1595		bus_dmamap_sync(sc->sc_c.sc_dmat,
1596		    esiop_lun->lun_tagtbl->tblblk->blkmap,
1597		    esiop_lun->lun_tagtbl->tbl_offset,
1598		    sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1599	} else {
1600		esiop_lun->active = esiop_cmd;
1601		esiop_script_write(sc,
1602		    esiop_target->lun_table_offset +
1603		    lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1604		    esiop_cmd->cmd_c.dsa);
1605	}
1606	/* scheduler slot: DSA */
1607	esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1608	    esiop_cmd->cmd_c.dsa);
1609	/* make sure SCRIPT processor will read valid data */
1610	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1611	/* handle timeout */
1612	if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1613		/* start exire timer */
1614		timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1615		if (timeout == 0)
1616			timeout = 1;
1617		callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1618		    timeout, esiop_timeout, esiop_cmd);
1619	}
1620	/* Signal script it has some work to do */
1621	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1622	    SIOP_ISTAT, ISTAT_SIGP);
1623	/* update the current slot, and wait for IRQ */
1624	sc->sc_currschedslot++;
1625	if (sc->sc_currschedslot >= A_ncmd_slots)
1626		sc->sc_currschedslot = 0;
1627	return;
1628}
1629
1630void
1631esiop_timeout(v)
1632	void *v;
1633{
1634	struct esiop_cmd *esiop_cmd = v;
1635	struct esiop_softc *sc =
1636	    (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1637	int s;
1638#ifdef SIOP_DEBUG
1639	int slot, slotdsa;
1640#endif
1641
1642	s = splbio();
1643	esiop_table_sync(esiop_cmd,
1644	    BUS_DMASYNC_POSTREAD |
1645	    BUS_DMASYNC_POSTWRITE);
1646	scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1647#ifdef SIOP_DEBUG
1648	printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1649
1650	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1651	for (slot = 0; slot < A_ncmd_slots; slot++) {
1652		slotdsa = esiop_script_read(sc,
1653		    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1654		if ((slotdsa & 0x01) == 0)
1655			printf("slot %d not free (0x%x)\n", slot, slotdsa);
1656	}
1657	printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1658	printf("DSP 0x%lx DSA 0x%x\n",
1659	    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1660	    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1661	bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1662	printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1663#else
1664	printf("command timeout\n");
1665#endif
1666	/* reset the scsi bus */
1667	siop_resetbus(&sc->sc_c);
1668
1669	/* deactivate callout */
1670	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1671	/*
1672	 * mark command has being timed out and just return;
1673	 * the bus reset will generate an interrupt,
1674	 * it will be handled in siop_intr()
1675	 */
1676	esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1677	splx(s);
1678	return;
1679
1680}
1681
1682void
1683esiop_dump_script(sc)
1684	struct esiop_softc *sc;
1685{
1686	int i;
1687	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1688		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1689		    le32toh(sc->sc_c.sc_script[i]),
1690		    le32toh(sc->sc_c.sc_script[i+1]));
1691		if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1692		    0xc0000000) {
1693			i++;
1694			printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1695		}
1696		printf("\n");
1697	}
1698}
1699
1700void
1701esiop_morecbd(sc)
1702	struct esiop_softc *sc;
1703{
1704	int error, i, s;
1705	bus_dma_segment_t seg;
1706	int rseg;
1707	struct esiop_cbd *newcbd;
1708	struct esiop_xfer *xfer;
1709	bus_addr_t dsa;
1710
1711	/* allocate a new list head */
1712	newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1713	if (newcbd == NULL) {
1714		printf("%s: can't allocate memory for command descriptors "
1715		    "head\n", sc->sc_c.sc_dev.dv_xname);
1716		return;
1717	}
1718
1719	/* allocate cmd list */
1720	newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1721	    M_DEVBUF, M_NOWAIT|M_ZERO);
1722	if (newcbd->cmds == NULL) {
1723		printf("%s: can't allocate memory for command descriptors\n",
1724		    sc->sc_c.sc_dev.dv_xname);
1725		goto bad3;
1726	}
1727	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1728	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1729	if (error) {
1730		printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1731		    sc->sc_c.sc_dev.dv_xname, error);
1732		goto bad2;
1733	}
1734	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1735	    (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1736	if (error) {
1737		printf("%s: unable to map cbd DMA memory, error = %d\n",
1738		    sc->sc_c.sc_dev.dv_xname, error);
1739		goto bad2;
1740	}
1741	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1742	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1743	if (error) {
1744		printf("%s: unable to create cbd DMA map, error = %d\n",
1745		    sc->sc_c.sc_dev.dv_xname, error);
1746		goto bad1;
1747	}
1748	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1749	    newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1750	if (error) {
1751		printf("%s: unable to load cbd DMA map, error = %d\n",
1752		    sc->sc_c.sc_dev.dv_xname, error);
1753		goto bad0;
1754	}
1755#ifdef DEBUG
1756	printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1757	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1758#endif
1759	for (i = 0; i < SIOP_NCMDPB; i++) {
1760		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1761		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1762		    &newcbd->cmds[i].cmd_c.dmamap_data);
1763		if (error) {
1764			printf("%s: unable to create data DMA map for cbd: "
1765			    "error %d\n",
1766			    sc->sc_c.sc_dev.dv_xname, error);
1767			goto bad0;
1768		}
1769		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1770		    sizeof(struct scsipi_generic), 1,
1771		    sizeof(struct scsipi_generic), 0,
1772		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1773		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1774		if (error) {
1775			printf("%s: unable to create cmd DMA map for cbd %d\n",
1776			    sc->sc_c.sc_dev.dv_xname, error);
1777			goto bad0;
1778		}
1779		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1780		newcbd->cmds[i].esiop_cbdp = newcbd;
1781		xfer = &newcbd->xfers[i];
1782		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1783		memset(newcbd->cmds[i].cmd_tables, 0,
1784		    sizeof(struct esiop_xfer));
1785		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1786		    i * sizeof(struct esiop_xfer);
1787		newcbd->cmds[i].cmd_c.dsa = dsa;
1788		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1789		xfer->siop_tables.t_msgout.count= htole32(1);
1790		xfer->siop_tables.t_msgout.addr = htole32(dsa);
1791		xfer->siop_tables.t_msgin.count= htole32(1);
1792		xfer->siop_tables.t_msgin.addr = htole32(dsa +
1793			offsetof(struct siop_common_xfer, msg_in));
1794		xfer->siop_tables.t_extmsgin.count= htole32(2);
1795		xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1796			offsetof(struct siop_common_xfer, msg_in) + 1);
1797		xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1798			offsetof(struct siop_common_xfer, msg_in) + 3);
1799		xfer->siop_tables.t_status.count= htole32(1);
1800		xfer->siop_tables.t_status.addr = htole32(dsa +
1801			offsetof(struct siop_common_xfer, status));
1802
1803		s = splbio();
1804		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1805		splx(s);
1806#ifdef SIOP_DEBUG
1807		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1808		    le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1809		    le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1810		    le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1811#endif
1812	}
1813	s = splbio();
1814	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1815	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1816	splx(s);
1817	return;
1818bad0:
1819	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1820	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1821bad1:
1822	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1823bad2:
1824	free(newcbd->cmds, M_DEVBUF);
1825bad3:
1826	free(newcbd, M_DEVBUF);
1827	return;
1828}
1829
1830void
1831esiop_moretagtbl(sc)
1832	struct esiop_softc *sc;
1833{
1834	int error, i, j, s;
1835	bus_dma_segment_t seg;
1836	int rseg;
1837	struct esiop_dsatblblk *newtblblk;
1838	struct esiop_dsatbl *newtbls;
1839	u_int32_t *tbls;
1840
1841	/* allocate a new list head */
1842	newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1843	    M_DEVBUF, M_NOWAIT|M_ZERO);
1844	if (newtblblk == NULL) {
1845		printf("%s: can't allocate memory for tag DSA table block\n",
1846		    sc->sc_c.sc_dev.dv_xname);
1847		return;
1848	}
1849
1850	/* allocate tbl list */
1851	newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1852	    M_DEVBUF, M_NOWAIT|M_ZERO);
1853	if (newtbls == NULL) {
1854		printf("%s: can't allocate memory for command descriptors\n",
1855		    sc->sc_c.sc_dev.dv_xname);
1856		goto bad3;
1857	}
1858	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1859	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1860	if (error) {
1861		printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1862		    sc->sc_c.sc_dev.dv_xname, error);
1863		goto bad2;
1864	}
1865	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1866	    (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1867	if (error) {
1868		printf("%s: unable to map tbls DMA memory, error = %d\n",
1869		    sc->sc_c.sc_dev.dv_xname, error);
1870		goto bad2;
1871	}
1872	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1873	    BUS_DMA_NOWAIT, &newtblblk->blkmap);
1874	if (error) {
1875		printf("%s: unable to create tbl DMA map, error = %d\n",
1876		    sc->sc_c.sc_dev.dv_xname, error);
1877		goto bad1;
1878	}
1879	error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1880	    tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1881	if (error) {
1882		printf("%s: unable to load tbl DMA map, error = %d\n",
1883		    sc->sc_c.sc_dev.dv_xname, error);
1884		goto bad0;
1885	}
1886#ifdef DEBUG
1887	printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1888	    sc->sc_c.sc_dev.dv_xname,
1889	    (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1890#endif
1891	for (i = 0; i < ESIOP_NTPB; i++) {
1892		newtbls[i].tblblk = newtblblk;
1893		newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1894		newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1895		newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1896		    newtbls[i].tbl_offset;
1897		for (j = 0; j < ESIOP_NTAG; j++)
1898			newtbls[i].tbl[j] = j;
1899		s = splbio();
1900		TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1901		splx(s);
1902	}
1903	s = splbio();
1904	TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1905	splx(s);
1906	return;
1907bad0:
1908	bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1909	bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1910bad1:
1911	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1912bad2:
1913	free(newtbls, M_DEVBUF);
1914bad3:
1915	free(newtblblk, M_DEVBUF);
1916	return;
1917}
1918
1919void
1920esiop_update_scntl3(sc, _siop_target)
1921	struct esiop_softc *sc;
1922	struct siop_common_target *_siop_target;
1923{
1924	struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1925	esiop_script_write(sc, esiop_target->lun_table_offset,
1926	    esiop_target->target_c.id);
1927}
1928
1929void
1930esiop_add_dev(sc, target, lun)
1931	struct esiop_softc *sc;
1932	int target;
1933	int lun;
1934{
1935	struct esiop_target *esiop_target =
1936	    (struct esiop_target *)sc->sc_c.targets[target];
1937	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1938
1939	/* we need a tag DSA table */
1940	esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1941	if (esiop_lun->lun_tagtbl == NULL) {
1942		esiop_moretagtbl(sc);
1943		esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1944		if (esiop_lun->lun_tagtbl == NULL) {
1945			/* no resources, run untagged */
1946			esiop_target->target_c.flags &= ~TARF_TAG;
1947			return;
1948		}
1949	}
1950	TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1951	/* Update LUN DSA table */
1952	esiop_script_write(sc, esiop_target->lun_table_offset +
1953	   lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
1954	    esiop_lun->lun_tagtbl->tbl_dsa);
1955	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1956}
1957
1958void
1959esiop_del_dev(sc, target, lun)
1960	struct esiop_softc *sc;
1961	int target;
1962	int lun;
1963{
1964	struct esiop_target *esiop_target;
1965#ifdef SIOP_DEBUG
1966		printf("%s:%d:%d: free lun sw entry\n",
1967		    sc->sc_c.sc_dev.dv_xname, target, lun);
1968#endif
1969	if (sc->sc_c.targets[target] == NULL)
1970		return;
1971	esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1972	free(esiop_target->esiop_lun[lun], M_DEVBUF);
1973	esiop_target->esiop_lun[lun] = NULL;
1974}
1975
1976void
1977esiop_target_register(sc, target)
1978	struct esiop_softc *sc;
1979	u_int32_t target;
1980{
1981	struct esiop_target *esiop_target =
1982	    (struct esiop_target *)sc->sc_c.targets[target];
1983	struct esiop_lun *esiop_lun;
1984	int lun;
1985
1986	/* get a DSA table for this target */
1987	esiop_target->lun_table_offset = sc->sc_free_offset;
1988	sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
1989#ifdef SIOP_DEBUG
1990	printf("%s: lun table for target %d offset %d free offset %d\n",
1991	    sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
1992	    sc->sc_free_offset);
1993#endif
1994	/* first 32 bytes are ID (for select) */
1995	esiop_script_write(sc, esiop_target->lun_table_offset,
1996	    esiop_target->target_c.id);
1997	/* Record this table in the target DSA table */
1998	esiop_script_write(sc,
1999	    sc->sc_target_table_offset + target,
2000	    (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2001	    sc->sc_c.sc_scriptaddr);
2002	/* if we have a tag table, register it */
2003	for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2004		esiop_lun = esiop_target->esiop_lun[lun];
2005		if (esiop_lun == NULL)
2006			continue;
2007		if (esiop_lun->lun_tagtbl)
2008			esiop_script_write(sc, esiop_target->lun_table_offset +
2009			   lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2010			    esiop_lun->lun_tagtbl->tbl_dsa);
2011	}
2012	esiop_script_sync(sc,
2013	    BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
2014}
2015
2016#ifdef SIOP_STATS
2017void
2018esiop_printstats()
2019{
2020	printf("esiop_stat_intr %d\n", esiop_stat_intr);
2021	printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2022	printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2023	printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2024	printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2025	printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2026	printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2027}
2028#endif
2029