esiop.c revision 1.21
1/*	$NetBSD: esiop.c,v 1.21 2003/08/04 19:50:11 bouyer Exp $	*/
2
3/*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.21 2003/08/04 19:50:11 bouyer Exp $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/device.h>
41#include <sys/malloc.h>
42#include <sys/buf.h>
43#include <sys/kernel.h>
44
45#include <uvm/uvm_extern.h>
46
47#include <machine/endian.h>
48#include <machine/bus.h>
49
50#include <dev/microcode/siop/esiop.out>
51
52#include <dev/scsipi/scsi_all.h>
53#include <dev/scsipi/scsi_message.h>
54#include <dev/scsipi/scsipi_all.h>
55
56#include <dev/scsipi/scsiconf.h>
57
58#include <dev/ic/siopreg.h>
59#include <dev/ic/siopvar_common.h>
60#include <dev/ic/esiopvar.h>
61
62#include "opt_siop.h"
63
64#ifndef DEBUG
65#undef DEBUG
66#endif
67#undef SIOP_DEBUG
68#undef SIOP_DEBUG_DR
69#undef SIOP_DEBUG_INTR
70#undef SIOP_DEBUG_SCHED
71#undef DUMP_SCRIPT
72
73#define SIOP_STATS
74
75#ifndef SIOP_DEFAULT_TARGET
76#define SIOP_DEFAULT_TARGET 7
77#endif
78
79/* number of cmd descriptors per block */
80#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82void	esiop_reset __P((struct esiop_softc *));
83void	esiop_checkdone __P((struct esiop_softc *));
84void	esiop_handle_reset __P((struct esiop_softc *));
85void	esiop_scsicmd_end __P((struct esiop_cmd *));
86void	esiop_unqueue __P((struct esiop_softc *, int, int));
87int	esiop_handle_qtag_reject __P((struct esiop_cmd *));
88static void	esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89void 	esiop_timeout __P((void *));
90void	esiop_scsipi_request __P((struct scsipi_channel *,
91			scsipi_adapter_req_t, void *));
92void	esiop_dump_script __P((struct esiop_softc *));
93void	esiop_morecbd __P((struct esiop_softc *));
94void	esiop_moretagtbl __P((struct esiop_softc *));
95void	siop_add_reselsw __P((struct esiop_softc *, int));
96void	esiop_target_register __P((struct esiop_softc *, u_int32_t));
97
98void    esiop_update_scntl3 __P((struct esiop_softc *,
99			struct siop_common_target *));
100
101#ifdef SIOP_STATS
102static int esiop_stat_intr = 0;
103static int esiop_stat_intr_shortxfer = 0;
104static int esiop_stat_intr_sdp = 0;
105static int esiop_stat_intr_done = 0;
106static int esiop_stat_intr_xferdisc = 0;
107static int esiop_stat_intr_lunresel = 0;
108static int esiop_stat_intr_qfull = 0;
109void esiop_printstats __P((void));
110#define INCSTAT(x) x++
111#else
112#define INCSTAT(x)
113#endif
114
115static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
116static __inline__ void
117esiop_script_sync(sc, ops)
118	struct esiop_softc *sc;
119	int ops;
120{
121	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
122		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
123		    PAGE_SIZE, ops);
124}
125
126static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
127static __inline__ u_int32_t
128esiop_script_read(sc, offset)
129	struct esiop_softc *sc;
130	u_int offset;
131{
132	if (sc->sc_c.features & SF_CHIP_RAM) {
133		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
134		    offset * 4);
135	} else {
136		return le32toh(sc->sc_c.sc_script[offset]);
137	}
138}
139
140static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
141	u_int32_t));
142static __inline__ void
143esiop_script_write(sc, offset, val)
144	struct esiop_softc *sc;
145	u_int offset;
146	u_int32_t val;
147{
148	if (sc->sc_c.features & SF_CHIP_RAM) {
149		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
150		    offset * 4, val);
151	} else {
152		sc->sc_c.sc_script[offset] = htole32(val);
153	}
154}
155
156void
157esiop_attach(sc)
158	struct esiop_softc *sc;
159{
160	struct esiop_dsatbl *tagtbl_donering;
161
162	if (siop_common_attach(&sc->sc_c) != 0 )
163		return;
164
165	TAILQ_INIT(&sc->free_list);
166	TAILQ_INIT(&sc->cmds);
167	TAILQ_INIT(&sc->free_tagtbl);
168	TAILQ_INIT(&sc->tag_tblblk);
169	sc->sc_currschedslot = 0;
170#ifdef SIOP_DEBUG
171	aprint_debug("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
172	    sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
173	    (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
174#endif
175
176	sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
177	sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
178
179	/*
180	 * get space for the CMD done slot. For this we use a tag table entry.
181	 * It's the same size and allows us to not waste 3/4 of a page
182	 */
183#ifdef DIAGNOSTIC
184	if (ESIOP_NTAG != A_ndone_slots) {
185		aprint_error("%s: size of tag DSA table different from the done"
186		    " ring\n", sc->sc_c.sc_dev.dv_xname);
187		return;
188	}
189#endif
190	esiop_moretagtbl(sc);
191	tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
192	if (tagtbl_donering == NULL) {
193		aprint_error("%s: no memory for command done ring\n",
194		    sc->sc_c.sc_dev.dv_xname);
195		return;
196	}
197	TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
198	sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
199	sc->sc_done_offset = tagtbl_donering->tbl_offset;
200	sc->sc_done_slot = &tagtbl_donering->tbl[0];
201
202	/* Do a bus reset, so that devices fall back to narrow/async */
203	siop_resetbus(&sc->sc_c);
204	/*
205	 * siop_reset() will reset the chip, thus clearing pending interrupts
206	 */
207	esiop_reset(sc);
208#ifdef DUMP_SCRIPT
209	esiop_dump_script(sc);
210#endif
211
212	config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
213}
214
215void
216esiop_reset(sc)
217	struct esiop_softc *sc;
218{
219	int i, j;
220	u_int32_t addr;
221	u_int32_t msgin_addr, sem_addr;
222
223	siop_common_reset(&sc->sc_c);
224
225	/*
226	 * we copy the script at the beggining of RAM. Then there is 4 bytes
227	 * for messages in, and 4 bytes for semaphore
228	 */
229	sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
230	msgin_addr =
231	    sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
232	sc->sc_free_offset += 1;
233	sc->sc_semoffset = sc->sc_free_offset;
234	sem_addr =
235	    sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
236	sc->sc_free_offset += 1;
237	/* then we have the scheduler ring */
238	sc->sc_shedoffset = sc->sc_free_offset;
239	sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
240	/* then the targets DSA table */
241	sc->sc_target_table_offset = sc->sc_free_offset;
242	sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
243	/* copy and patch the script */
244	if (sc->sc_c.features & SF_CHIP_RAM) {
245		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
246		    esiop_script,
247		    sizeof(esiop_script) / sizeof(esiop_script[0]));
248		for (j = 0; j <
249		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
250		    j++) {
251			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
252			    E_tlq_offset_Used[j] * 4,
253			    sizeof(struct siop_common_xfer));
254		}
255		for (j = 0; j <
256		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
257		    j++) {
258			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
259			    E_abs_msgin2_Used[j] * 4, msgin_addr);
260		}
261		for (j = 0; j <
262		    (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
263		    j++) {
264			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
265			    E_abs_sem_Used[j] * 4, sem_addr);
266		}
267
268		if (sc->sc_c.features & SF_CHIP_LED0) {
269			bus_space_write_region_4(sc->sc_c.sc_ramt,
270			    sc->sc_c.sc_ramh,
271			    Ent_led_on1, esiop_led_on,
272			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
273			bus_space_write_region_4(sc->sc_c.sc_ramt,
274			    sc->sc_c.sc_ramh,
275			    Ent_led_on2, esiop_led_on,
276			    sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
277			bus_space_write_region_4(sc->sc_c.sc_ramt,
278			    sc->sc_c.sc_ramh,
279			    Ent_led_off, esiop_led_off,
280			    sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
281		}
282	} else {
283		for (j = 0;
284		    j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
285			sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
286		}
287		for (j = 0; j <
288		    (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
289		    j++) {
290			sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
291			    htole32(sizeof(struct siop_common_xfer));
292		}
293		for (j = 0; j <
294		    (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
295		    j++) {
296			sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
297			    htole32(msgin_addr);
298		}
299		for (j = 0; j <
300		    (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
301		    j++) {
302			sc->sc_c.sc_script[E_abs_sem_Used[j]] =
303			    htole32(sem_addr);
304		}
305
306		if (sc->sc_c.features & SF_CHIP_LED0) {
307			for (j = 0; j < (sizeof(esiop_led_on) /
308			    sizeof(esiop_led_on[0])); j++)
309				sc->sc_c.sc_script[
310				    Ent_led_on1 / sizeof(esiop_led_on[0]) + j
311				    ] = htole32(esiop_led_on[j]);
312			for (j = 0; j < (sizeof(esiop_led_on) /
313			    sizeof(esiop_led_on[0])); j++)
314				sc->sc_c.sc_script[
315				    Ent_led_on2 / sizeof(esiop_led_on[0]) + j
316				    ] = htole32(esiop_led_on[j]);
317			for (j = 0; j < (sizeof(esiop_led_off) /
318			    sizeof(esiop_led_off[0])); j++)
319				sc->sc_c.sc_script[
320				   Ent_led_off / sizeof(esiop_led_off[0]) + j
321				   ] = htole32(esiop_led_off[j]);
322		}
323	}
324	/* get base of scheduler ring */
325	addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
326	/* init scheduler */
327	for (i = 0; i < A_ncmd_slots; i++) {
328		esiop_script_write(sc,
329		    sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
330	}
331	sc->sc_currschedslot = 0;
332	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
333	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
334	/*
335	 * 0x78000000 is a 'move data8 to reg'. data8 is the second
336	 * octet, reg offset is the third.
337	 */
338	esiop_script_write(sc, Ent_cmdr0 / 4,
339	    0x78640000 | ((addr & 0x000000ff) <<  8));
340	esiop_script_write(sc, Ent_cmdr1 / 4,
341	    0x78650000 | ((addr & 0x0000ff00)      ));
342	esiop_script_write(sc, Ent_cmdr2 / 4,
343	    0x78660000 | ((addr & 0x00ff0000) >>  8));
344	esiop_script_write(sc, Ent_cmdr3 / 4,
345	    0x78670000 | ((addr & 0xff000000) >> 16));
346	/* done ring */
347	for (i = 0; i < A_ndone_slots; i++)
348		sc->sc_done_slot[i] = 0;
349	bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
350	    sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
351	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
352	addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
353	sc->sc_currdoneslot = 0;
354	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
355	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
356	esiop_script_write(sc, Ent_doner0 / 4,
357	    0x786c0000 | ((addr & 0x000000ff) <<  8));
358	esiop_script_write(sc, Ent_doner1 / 4,
359	    0x786d0000 | ((addr & 0x0000ff00)      ));
360	esiop_script_write(sc, Ent_doner2 / 4,
361	    0x786e0000 | ((addr & 0x00ff0000) >>  8));
362	esiop_script_write(sc, Ent_doner3 / 4,
363	    0x786f0000 | ((addr & 0xff000000) >> 16));
364
365	/* set flags */
366	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
367	/* write pointer of base of target DSA table */
368	addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
369	    sc->sc_c.sc_scriptaddr;
370	esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
371	    esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
372	    ((addr & 0x000000ff) <<  8));
373	esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
374	    esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
375	    ((addr & 0x0000ff00)      ));
376	esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
377	    esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
378	    ((addr & 0x00ff0000) >>  8));
379	esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
380	    esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
381	    ((addr & 0xff000000) >> 16));
382#ifdef SIOP_DEBUG
383	printf("%s: target table offset %d free offset %d\n",
384	    sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
385	    sc->sc_free_offset);
386#endif
387
388	/* register existing targets */
389	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
390		if (sc->sc_c.targets[i])
391			esiop_target_register(sc, i);
392	}
393	/* start script */
394	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
395		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
396		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
397	}
398	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
399	    sc->sc_c.sc_scriptaddr + Ent_reselect);
400}
401
402#if 0
403#define CALL_SCRIPT(ent) do {\
404	printf ("start script DSA 0x%lx DSP 0x%lx\n", \
405	    esiop_cmd->cmd_c.dsa, \
406	    sc->sc_c.sc_scriptaddr + ent); \
407bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
408} while (0)
409#else
410#define CALL_SCRIPT(ent) do {\
411bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
412} while (0)
413#endif
414
415int
416esiop_intr(v)
417	void *v;
418{
419	struct esiop_softc *sc = v;
420	struct esiop_target *esiop_target;
421	struct esiop_cmd *esiop_cmd;
422	struct esiop_lun *esiop_lun;
423	struct scsipi_xfer *xs;
424	int istat, sist, sstat1, dstat;
425	u_int32_t irqcode;
426	int need_reset = 0;
427	int offset, target, lun, tag;
428	u_int32_t tflags;
429	u_int32_t addr;
430	int freetarget = 0;
431	int slot;
432	int retval = 0;
433
434again:
435	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
436	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
437		return retval;
438	}
439	retval = 1;
440	INCSTAT(esiop_stat_intr);
441	esiop_checkdone(sc);
442	if (istat & ISTAT_INTF) {
443		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444		    SIOP_ISTAT, ISTAT_INTF);
445		goto again;
446	}
447
448	if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
449	    (ISTAT_DIP | ISTAT_ABRT)) {
450		/* clear abort */
451		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452		    SIOP_ISTAT, 0);
453	}
454
455	/* get CMD from T/L/Q */
456	tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
457	    SIOP_SCRATCHC);
458#ifdef SIOP_DEBUG_INTR
459		printf("interrupt, istat=0x%x tflags=0x%x "
460		    "DSA=0x%x DSP=0x%lx\n", istat, tflags,
461		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
462		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
463			SIOP_DSP) -
464		    sc->sc_c.sc_scriptaddr));
465#endif
466	target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
467	if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
468	lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
469	if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
470	tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
471
472	if (target >= 0 && lun >= 0) {
473		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
474		if (esiop_target == NULL) {
475			printf("esiop_target (target %d) not valid\n", target);
476			goto none;
477		}
478		esiop_lun = esiop_target->esiop_lun[lun];
479		if (esiop_lun == NULL) {
480			printf("esiop_lun (target %d lun %d) not valid\n",
481			    target, lun);
482			goto none;
483		}
484		esiop_cmd =
485		    (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
486		if (esiop_cmd == NULL) {
487			printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
488			    target, lun, tag);
489			goto none;
490		}
491		xs = esiop_cmd->cmd_c.xs;
492#ifdef DIAGNOSTIC
493		if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
494 			printf("esiop_cmd (target %d lun %d) "
495			    "not active (%d)\n", target, lun,
496			    esiop_cmd->cmd_c.status);
497			goto none;
498		}
499#endif
500		esiop_table_sync(esiop_cmd,
501		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
502	} else {
503none:
504		xs = NULL;
505		esiop_target = NULL;
506		esiop_lun = NULL;
507		esiop_cmd = NULL;
508	}
509	if (istat & ISTAT_DIP) {
510		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
511		    SIOP_DSTAT);
512		if (dstat & DSTAT_ABRT) {
513			/* was probably generated by a bus reset IOCTL */
514			if ((dstat & DSTAT_DFE) == 0)
515				siop_clearfifo(&sc->sc_c);
516			goto reset;
517		}
518		if (dstat & DSTAT_SSI) {
519			printf("single step dsp 0x%08x dsa 0x08%x\n",
520			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
521			    sc->sc_c.sc_rh, SIOP_DSP) -
522			    sc->sc_c.sc_scriptaddr),
523			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
524				SIOP_DSA));
525			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
526			    (istat & ISTAT_SIP) == 0) {
527				bus_space_write_1(sc->sc_c.sc_rt,
528				    sc->sc_c.sc_rh, SIOP_DCNTL,
529				    bus_space_read_1(sc->sc_c.sc_rt,
530				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
531			}
532			return 1;
533		}
534
535		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
536		printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
537		if (dstat & DSTAT_IID)
538			printf(" Illegal instruction");
539		if (dstat & DSTAT_BF)
540			printf(" bus fault");
541		if (dstat & DSTAT_MDPE)
542			printf(" parity");
543		if (dstat & DSTAT_DFE)
544			printf(" DMA fifo empty");
545		else
546			siop_clearfifo(&sc->sc_c);
547		printf(", DSP=0x%x DSA=0x%x: ",
548		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
549			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
550		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
551		if (esiop_cmd)
552			printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
553			    target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
554			    le32toh(esiop_cmd->cmd_tables->status));
555		else
556			printf(" current T/L/Q invalid\n");
557		need_reset = 1;
558		}
559	}
560	if (istat & ISTAT_SIP) {
561		if (istat & ISTAT_DIP)
562			delay(10);
563		/*
564		 * Can't read sist0 & sist1 independantly, or we have to
565		 * insert delay
566		 */
567		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
568		    SIOP_SIST0);
569		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
570		    SIOP_SSTAT1);
571#ifdef SIOP_DEBUG_INTR
572		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
573		    "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
574		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
575		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576			SIOP_DSP) -
577		    sc->sc_c.sc_scriptaddr));
578#endif
579		if (sist & SIST0_RST) {
580			esiop_handle_reset(sc);
581			/* no table to flush here */
582			return 1;
583		}
584		if (sist & SIST0_SGE) {
585			if (esiop_cmd)
586				scsipi_printaddr(xs->xs_periph);
587			else
588				printf("%s:", sc->sc_c.sc_dev.dv_xname);
589			printf("scsi gross error\n");
590			if (esiop_target)
591				esiop_target->target_c.flags &= ~TARF_DT;
592#ifdef DEBUG
593			printf("DSA=0x%x DSP=0x%lx\n",
594			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
595			    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
596				SIOP_DSP) -
597			    sc->sc_c.sc_scriptaddr));
598			printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
599			    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
600			     bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
601			     bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
602			     bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
603
604#endif
605			goto reset;
606		}
607		if ((sist & SIST0_MA) && need_reset == 0) {
608			if (esiop_cmd) {
609				int scratchc0;
610				dstat = bus_space_read_1(sc->sc_c.sc_rt,
611				    sc->sc_c.sc_rh, SIOP_DSTAT);
612				/*
613				 * first restore DSA, in case we were in a S/G
614				 * operation.
615				 */
616				bus_space_write_4(sc->sc_c.sc_rt,
617				    sc->sc_c.sc_rh,
618				    SIOP_DSA, esiop_cmd->cmd_c.dsa);
619				scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
620				    sc->sc_c.sc_rh, SIOP_SCRATCHC);
621				switch (sstat1 & SSTAT1_PHASE_MASK) {
622				case SSTAT1_PHASE_STATUS:
623				/*
624				 * previous phase may be aborted for any reason
625				 * ( for example, the target has less data to
626				 * transfer than requested). Just go to status
627				 * and the command should terminate.
628				 */
629					INCSTAT(esiop_stat_intr_shortxfer);
630					if ((dstat & DSTAT_DFE) == 0)
631						siop_clearfifo(&sc->sc_c);
632					/* no table to flush here */
633					CALL_SCRIPT(Ent_status);
634					return 1;
635				case SSTAT1_PHASE_MSGIN:
636					/*
637					 * target may be ready to disconnect
638					 * Save data pointers just in case.
639					 */
640					INCSTAT(esiop_stat_intr_xferdisc);
641					if (scratchc0 & A_f_c_data)
642						siop_sdp(&esiop_cmd->cmd_c);
643					else if ((dstat & DSTAT_DFE) == 0)
644						siop_clearfifo(&sc->sc_c);
645					bus_space_write_1(sc->sc_c.sc_rt,
646					    sc->sc_c.sc_rh, SIOP_SCRATCHC,
647					    scratchc0 & ~A_f_c_data);
648					esiop_table_sync(esiop_cmd,
649					    BUS_DMASYNC_PREREAD |
650					    BUS_DMASYNC_PREWRITE);
651					CALL_SCRIPT(Ent_msgin);
652					return 1;
653				}
654				printf("%s: unexpected phase mismatch %d\n",
655				    sc->sc_c.sc_dev.dv_xname,
656				    sstat1 & SSTAT1_PHASE_MASK);
657			} else {
658				printf("%s: phase mismatch without command\n",
659				    sc->sc_c.sc_dev.dv_xname);
660			}
661			need_reset = 1;
662		}
663		if (sist & SIST0_PAR) {
664			/* parity error, reset */
665			if (esiop_cmd)
666				scsipi_printaddr(xs->xs_periph);
667			else
668				printf("%s:", sc->sc_c.sc_dev.dv_xname);
669			printf("parity error\n");
670			if (esiop_target)
671				esiop_target->target_c.flags &= ~TARF_DT;
672			goto reset;
673		}
674		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
675			/*
676			 * selection time out, assume there's no device here
677			 * We also have to update the ring pointer ourselve
678			 */
679			slot = bus_space_read_1(sc->sc_c.sc_rt,
680			    sc->sc_c.sc_rh, SIOP_SCRATCHE);
681			esiop_script_sync(sc,
682			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
683#ifdef SIOP_DEBUG_SCHED
684			printf("sel timeout target %d, slot %d\n", target, slot);
685#endif
686			/*
687			 * mark this slot as free, and advance to next slot
688			 */
689			esiop_script_write(sc,
690			    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
691			    A_f_cmd_free);
692			addr = bus_space_read_4(sc->sc_c.sc_rt,
693				    sc->sc_c.sc_rh, SIOP_SCRATCHD);
694			if (slot < (A_ncmd_slots - 1)) {
695				bus_space_write_1(sc->sc_c.sc_rt,
696				    sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
697				addr = addr + sizeof(struct esiop_slot);
698			} else {
699				bus_space_write_1(sc->sc_c.sc_rt,
700				    sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
701				addr = sc->sc_c.sc_scriptaddr +
702				    sc->sc_shedoffset * sizeof(u_int32_t);
703			}
704			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
705			    SIOP_SCRATCHD, addr);
706			esiop_script_sync(sc,
707			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
708			if (esiop_cmd) {
709				esiop_cmd->cmd_c.status = CMDST_DONE;
710				xs->error = XS_SELTIMEOUT;
711				freetarget = 1;
712				goto end;
713			} else {
714				printf("%s: selection timeout without "
715				    "command, target %d (sdid 0x%x), "
716				    "slot %d\n",
717				    sc->sc_c.sc_dev.dv_xname, target,
718				    bus_space_read_1(sc->sc_c.sc_rt,
719				    sc->sc_c.sc_rh, SIOP_SDID), slot);
720				need_reset = 1;
721			}
722		}
723		if (sist & SIST0_UDC) {
724			/*
725			 * unexpected disconnect. Usually the target signals
726			 * a fatal condition this way. Attempt to get sense.
727			 */
728			 if (esiop_cmd) {
729				esiop_cmd->cmd_tables->status =
730				    htole32(SCSI_CHECK);
731				goto end;
732			}
733			printf("%s: unexpected disconnect without "
734			    "command\n", sc->sc_c.sc_dev.dv_xname);
735			goto reset;
736		}
737		if (sist & (SIST1_SBMC << 8)) {
738			/* SCSI bus mode change */
739			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
740				goto reset;
741			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
742				/*
743				 * we have a script interrupt, it will
744				 * restart the script.
745				 */
746				goto scintr;
747			}
748			/*
749			 * else we have to restart it ourselve, at the
750			 * interrupted instruction.
751			 */
752			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
753			    SIOP_DSP,
754			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
755			    SIOP_DSP) - 8);
756			return 1;
757		}
758		/* Else it's an unhandled exeption (for now). */
759		printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
760		    "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
761		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
762			SIOP_SSTAT1),
763		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
764		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
765			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
766		if (esiop_cmd) {
767			esiop_cmd->cmd_c.status = CMDST_DONE;
768			xs->error = XS_SELTIMEOUT;
769			goto end;
770		}
771		need_reset = 1;
772	}
773	if (need_reset) {
774reset:
775		/* fatal error, reset the bus */
776		siop_resetbus(&sc->sc_c);
777		/* no table to flush here */
778		return 1;
779	}
780
781scintr:
782	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
783		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
784		    SIOP_DSPS);
785#ifdef SIOP_DEBUG_INTR
786		printf("script interrupt 0x%x\n", irqcode);
787#endif
788		/*
789		 * no command, or an inactive command is only valid for a
790		 * reselect interrupt
791		 */
792		if ((irqcode & 0x80) == 0) {
793			if (esiop_cmd == NULL) {
794				printf(
795			"%s: script interrupt (0x%x) with invalid DSA !!!\n",
796				    sc->sc_c.sc_dev.dv_xname, irqcode);
797				goto reset;
798			}
799			if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
800				printf("%s: command with invalid status "
801				    "(IRQ code 0x%x current status %d) !\n",
802				    sc->sc_c.sc_dev.dv_xname,
803				    irqcode, esiop_cmd->cmd_c.status);
804				xs = NULL;
805			}
806		}
807		switch(irqcode) {
808		case A_int_err:
809			printf("error, DSP=0x%x\n",
810			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
811			    sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
812			if (xs) {
813				xs->error = XS_SELTIMEOUT;
814				goto end;
815			} else {
816				goto reset;
817			}
818		case A_int_msgin:
819		{
820			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
821			    sc->sc_c.sc_rh, SIOP_SFBR);
822			if (msgin == MSG_MESSAGE_REJECT) {
823				int msg, extmsg;
824				if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
825					/*
826					 * message was part of a identify +
827					 * something else. Identify shouldn't
828					 * have been rejected.
829					 */
830					msg =
831					    esiop_cmd->cmd_tables->msg_out[1];
832					extmsg =
833					    esiop_cmd->cmd_tables->msg_out[3];
834				} else {
835					msg =
836					    esiop_cmd->cmd_tables->msg_out[0];
837					extmsg =
838					    esiop_cmd->cmd_tables->msg_out[2];
839				}
840				if (msg == MSG_MESSAGE_REJECT) {
841					/* MSG_REJECT  for a MSG_REJECT  !*/
842					if (xs)
843						scsipi_printaddr(xs->xs_periph);
844					else
845						printf("%s: ",
846						   sc->sc_c.sc_dev.dv_xname);
847					printf("our reject message was "
848					    "rejected\n");
849					goto reset;
850				}
851				if (msg == MSG_EXTENDED &&
852				    extmsg == MSG_EXT_WDTR) {
853					/* WDTR rejected, initiate sync */
854					if ((esiop_target->target_c.flags &
855					   TARF_SYNC) == 0) {
856						esiop_target->target_c.status =
857						    TARST_OK;
858						siop_update_xfer_mode(&sc->sc_c,
859						    target);
860						/* no table to flush here */
861						CALL_SCRIPT(Ent_msgin_ack);
862						return 1;
863					}
864					esiop_target->target_c.status =
865					    TARST_SYNC_NEG;
866					siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
867					    sc->sc_c.st_minsync,
868					    sc->sc_c.maxoff);
869					esiop_table_sync(esiop_cmd,
870					    BUS_DMASYNC_PREREAD |
871					    BUS_DMASYNC_PREWRITE);
872					CALL_SCRIPT(Ent_send_msgout);
873					return 1;
874				} else if (msg == MSG_EXTENDED &&
875				    extmsg == MSG_EXT_SDTR) {
876					/* sync rejected */
877					esiop_target->target_c.offset = 0;
878					esiop_target->target_c.period = 0;
879					esiop_target->target_c.status =
880					    TARST_OK;
881					siop_update_xfer_mode(&sc->sc_c,
882					    target);
883					/* no table to flush here */
884					CALL_SCRIPT(Ent_msgin_ack);
885					return 1;
886				} else if (msg == MSG_EXTENDED &&
887				    extmsg == MSG_EXT_PPR) {
888					/* PPR rejected */
889					esiop_target->target_c.offset = 0;
890					esiop_target->target_c.period = 0;
891					esiop_target->target_c.status =
892					    TARST_OK;
893					siop_update_xfer_mode(&sc->sc_c,
894					    target);
895					/* no table to flush here */
896					CALL_SCRIPT(Ent_msgin_ack);
897					return 1;
898				} else if (msg == MSG_SIMPLE_Q_TAG ||
899				    msg == MSG_HEAD_OF_Q_TAG ||
900				    msg == MSG_ORDERED_Q_TAG) {
901					if (esiop_handle_qtag_reject(
902					    esiop_cmd) == -1)
903						goto reset;
904					CALL_SCRIPT(Ent_msgin_ack);
905					return 1;
906				}
907				if (xs)
908					scsipi_printaddr(xs->xs_periph);
909				else
910					printf("%s: ",
911					    sc->sc_c.sc_dev.dv_xname);
912				if (msg == MSG_EXTENDED) {
913					printf("scsi message reject, extended "
914					    "message sent was 0x%x\n", extmsg);
915				} else {
916					printf("scsi message reject, message "
917					    "sent was 0x%x\n", msg);
918				}
919				/* no table to flush here */
920				CALL_SCRIPT(Ent_msgin_ack);
921				return 1;
922			}
923			if (xs)
924				scsipi_printaddr(xs->xs_periph);
925			else
926				printf("%s: ", sc->sc_c.sc_dev.dv_xname);
927			printf("unhandled message 0x%x\n",
928			    esiop_cmd->cmd_tables->msg_in[0]);
929			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
930			esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
931			esiop_table_sync(esiop_cmd,
932			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
933			CALL_SCRIPT(Ent_send_msgout);
934			return 1;
935		}
936		case A_int_extmsgin:
937#ifdef SIOP_DEBUG_INTR
938			printf("extended message: msg 0x%x len %d\n",
939			    esiop_cmd->cmd_tables->msg_in[2],
940			    esiop_cmd->cmd_tables->msg_in[1]);
941#endif
942			if (esiop_cmd->cmd_tables->msg_in[1] >
943			    sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
944				printf("%s: extended message too big (%d)\n",
945				    sc->sc_c.sc_dev.dv_xname,
946				    esiop_cmd->cmd_tables->msg_in[1]);
947			esiop_cmd->cmd_tables->t_extmsgdata.count =
948			    htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
949			esiop_table_sync(esiop_cmd,
950			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
951			CALL_SCRIPT(Ent_get_extmsgdata);
952			return 1;
953		case A_int_extmsgdata:
954#ifdef SIOP_DEBUG_INTR
955			{
956			int i;
957			printf("extended message: 0x%x, data:",
958			    esiop_cmd->cmd_tables->msg_in[2]);
959			for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
960			    i++)
961				printf(" 0x%x",
962				    esiop_cmd->cmd_tables->msg_in[i]);
963			printf("\n");
964			}
965#endif
966			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
967				switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
968				case SIOP_NEG_MSGOUT:
969					esiop_update_scntl3(sc,
970					    esiop_cmd->cmd_c.siop_target);
971					esiop_table_sync(esiop_cmd,
972					    BUS_DMASYNC_PREREAD |
973					    BUS_DMASYNC_PREWRITE);
974					CALL_SCRIPT(Ent_send_msgout);
975					return 1;
976				case SIOP_NEG_ACK:
977					esiop_update_scntl3(sc,
978					    esiop_cmd->cmd_c.siop_target);
979					CALL_SCRIPT(Ent_msgin_ack);
980					return 1;
981				default:
982					panic("invalid retval from "
983					    "siop_wdtr_neg()");
984				}
985				return 1;
986			}
987			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
988				switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
989				case SIOP_NEG_MSGOUT:
990					esiop_update_scntl3(sc,
991					    esiop_cmd->cmd_c.siop_target);
992					esiop_table_sync(esiop_cmd,
993					    BUS_DMASYNC_PREREAD |
994					    BUS_DMASYNC_PREWRITE);
995					CALL_SCRIPT(Ent_send_msgout);
996					return 1;
997				case SIOP_NEG_ACK:
998					esiop_update_scntl3(sc,
999					    esiop_cmd->cmd_c.siop_target);
1000					CALL_SCRIPT(Ent_msgin_ack);
1001					return 1;
1002				default:
1003					panic("invalid retval from "
1004					    "siop_wdtr_neg()");
1005				}
1006				return 1;
1007			}
1008			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1009				switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1010				case SIOP_NEG_MSGOUT:
1011					esiop_update_scntl3(sc,
1012					    esiop_cmd->cmd_c.siop_target);
1013					esiop_table_sync(esiop_cmd,
1014					    BUS_DMASYNC_PREREAD |
1015					    BUS_DMASYNC_PREWRITE);
1016					CALL_SCRIPT(Ent_send_msgout);
1017					return 1;
1018				case SIOP_NEG_ACK:
1019					esiop_update_scntl3(sc,
1020					    esiop_cmd->cmd_c.siop_target);
1021					CALL_SCRIPT(Ent_msgin_ack);
1022					return 1;
1023				default:
1024					panic("invalid retval from "
1025					    "siop_wdtr_neg()");
1026				}
1027				return 1;
1028			}
1029			/* send a message reject */
1030			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1031			esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1032			esiop_table_sync(esiop_cmd,
1033			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1034			CALL_SCRIPT(Ent_send_msgout);
1035			return 1;
1036		case A_int_disc:
1037			INCSTAT(esiop_stat_intr_sdp);
1038			offset = bus_space_read_1(sc->sc_c.sc_rt,
1039			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1040#ifdef SIOP_DEBUG_DR
1041			printf("disconnect offset %d\n", offset);
1042#endif
1043			if (offset > SIOP_NSG) {
1044				printf("%s: bad offset for disconnect (%d)\n",
1045				    sc->sc_c.sc_dev.dv_xname, offset);
1046				goto reset;
1047			}
1048			/*
1049			 * offset == SIOP_NSG may be a valid condition if
1050			 * we get a sdp when the xfer is done.
1051			 * Don't call memmove in this case.
1052			 */
1053			if (offset < SIOP_NSG) {
1054				memmove(&esiop_cmd->cmd_tables->data[0],
1055				    &esiop_cmd->cmd_tables->data[offset],
1056				    (SIOP_NSG - offset) * sizeof(scr_table_t));
1057				esiop_table_sync(esiop_cmd,
1058				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1059			}
1060			CALL_SCRIPT(Ent_script_sched);
1061			return 1;
1062		case A_int_resfail:
1063			printf("reselect failed\n");
1064			CALL_SCRIPT(Ent_script_sched);
1065			return 1;
1066		case A_int_done:
1067			if (xs == NULL) {
1068				printf("%s: done without command\n",
1069				    sc->sc_c.sc_dev.dv_xname);
1070				CALL_SCRIPT(Ent_script_sched);
1071				return 1;
1072			}
1073#ifdef SIOP_DEBUG_INTR
1074			printf("done, DSA=0x%lx target id 0x%x last msg "
1075			    "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1076			    le32toh(esiop_cmd->cmd_tables->id),
1077			    esiop_cmd->cmd_tables->msg_in[0],
1078			    le32toh(esiop_cmd->cmd_tables->status));
1079#endif
1080			INCSTAT(esiop_stat_intr_done);
1081			esiop_cmd->cmd_c.status = CMDST_DONE;
1082			goto end;
1083		default:
1084			printf("unknown irqcode %x\n", irqcode);
1085			if (xs) {
1086				xs->error = XS_SELTIMEOUT;
1087				goto end;
1088			}
1089			goto reset;
1090		}
1091		return 1;
1092	}
1093	/* We just should't get there */
1094	panic("siop_intr: I shouldn't be there !");
1095
1096end:
1097	/*
1098	 * restart the script now if command completed properly
1099	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1100	 * queue
1101	 */
1102	xs->status = le32toh(esiop_cmd->cmd_tables->status);
1103#ifdef SIOP_DEBUG_INTR
1104	printf("esiop_intr end: status %d\n", xs->status);
1105#endif
1106	if (tag >= 0)
1107		esiop_lun->tactive[tag] = NULL;
1108	else
1109		esiop_lun->active = NULL;
1110	esiop_scsicmd_end(esiop_cmd);
1111	if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1112		esiop_del_dev(sc, target, lun);
1113	CALL_SCRIPT(Ent_script_sched);
1114	return 1;
1115}
1116
1117void
1118esiop_scsicmd_end(esiop_cmd)
1119	struct esiop_cmd *esiop_cmd;
1120{
1121	struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1122	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1123
1124	switch(xs->status) {
1125	case SCSI_OK:
1126		xs->error = XS_NOERROR;
1127		break;
1128	case SCSI_BUSY:
1129		xs->error = XS_BUSY;
1130		break;
1131	case SCSI_CHECK:
1132		xs->error = XS_BUSY;
1133		/* remove commands in the queue and scheduler */
1134		esiop_unqueue(sc, xs->xs_periph->periph_target,
1135		    xs->xs_periph->periph_lun);
1136		break;
1137	case SCSI_QUEUE_FULL:
1138		INCSTAT(esiop_stat_intr_qfull);
1139#ifdef SIOP_DEBUG
1140		printf("%s:%d:%d: queue full (tag %d)\n",
1141		    sc->sc_c.sc_dev.dv_xname,
1142		    xs->xs_periph->periph_target,
1143		    xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1144#endif
1145		xs->error = XS_BUSY;
1146		break;
1147	case SCSI_SIOP_NOCHECK:
1148		/*
1149		 * don't check status, xs->error is already valid
1150		 */
1151		break;
1152	case SCSI_SIOP_NOSTATUS:
1153		/*
1154		 * the status byte was not updated, cmd was
1155		 * aborted
1156		 */
1157		xs->error = XS_SELTIMEOUT;
1158		break;
1159	default:
1160		scsipi_printaddr(xs->xs_periph);
1161		printf("invalid status code %d\n", xs->status);
1162		xs->error = XS_DRIVER_STUFFUP;
1163	}
1164	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1165		bus_dmamap_sync(sc->sc_c.sc_dmat,
1166		    esiop_cmd->cmd_c.dmamap_data, 0,
1167		    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1168		    (xs->xs_control & XS_CTL_DATA_IN) ?
1169		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1170		bus_dmamap_unload(sc->sc_c.sc_dmat,
1171		    esiop_cmd->cmd_c.dmamap_data);
1172	}
1173	bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1174	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1175	esiop_cmd->cmd_c.status = CMDST_FREE;
1176	TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1177	xs->resid = 0;
1178	scsipi_done (xs);
1179}
1180
1181void
1182esiop_checkdone(sc)
1183	struct esiop_softc *sc;
1184{
1185	int target, lun, tag;
1186	struct esiop_target *esiop_target;
1187	struct esiop_lun *esiop_lun;
1188	struct esiop_cmd *esiop_cmd;
1189	u_int32_t slot;
1190	int needsync = 0;
1191	int status;
1192	u_int32_t sem;
1193
1194	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1195	sem = esiop_script_read(sc, sc->sc_semoffset);
1196	esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1197	if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1198		/*
1199		 * at last one command have been started,
1200		 * so we should have free slots now
1201		 */
1202		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1203		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1204	}
1205	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1206
1207	if ((sem & A_sem_done) == 0) {
1208		/* no pending done command */
1209		return;
1210	}
1211
1212	bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1213	    sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1214	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1215next:
1216	if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1217		if (needsync)
1218			bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1219			    sc->sc_done_offset,
1220			    A_ndone_slots * sizeof(u_int32_t),
1221			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1222		return;
1223	}
1224
1225	needsync = 1;
1226
1227	slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1228	sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1229	sc->sc_currdoneslot += 1;
1230	if (sc->sc_currdoneslot == A_ndone_slots)
1231		sc->sc_currdoneslot = 0;
1232
1233	target =  (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1234	lun =  (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1235	tag =  (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1236
1237	esiop_target = (target >= 0) ?
1238	    (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1239	if (esiop_target == NULL) {
1240		printf("esiop_target (target %d) not valid\n", target);
1241		goto next;
1242	}
1243	esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1244	if (esiop_lun == NULL) {
1245		printf("esiop_lun (target %d lun %d) not valid\n",
1246		    target, lun);
1247		goto next;
1248	}
1249	esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1250	if (esiop_cmd == NULL) {
1251		printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1252		    target, lun, tag);
1253			goto next;
1254	}
1255
1256	esiop_table_sync(esiop_cmd,
1257		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1258	status = le32toh(esiop_cmd->cmd_tables->status);
1259#ifdef DIAGNOSTIC
1260	if (status != SCSI_OK) {
1261		printf("command for T/L/Q %d/%d/%d status %d\n",
1262		    target, lun, tag, status);
1263		goto next;
1264	}
1265
1266#endif
1267	/* Ok, this command has been handled */
1268	esiop_cmd->cmd_c.xs->status = status;
1269	if (tag >= 0)
1270		esiop_lun->tactive[tag] = NULL;
1271	else
1272		esiop_lun->active = NULL;
1273	esiop_scsicmd_end(esiop_cmd);
1274	goto next;
1275}
1276
1277void
1278esiop_unqueue(sc, target, lun)
1279	struct esiop_softc *sc;
1280	int target;
1281	int lun;
1282{
1283 	int slot, tag;
1284	u_int32_t slotdsa;
1285	struct esiop_cmd *esiop_cmd;
1286	struct esiop_lun *esiop_lun =
1287	    ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1288
1289	/* first make sure to read valid data */
1290	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1291
1292	for (tag = 0; tag < ESIOP_NTAG; tag++) {
1293		/* look for commands in the scheduler, not yet started */
1294		if (esiop_lun->tactive[tag] == NULL)
1295			continue;
1296		esiop_cmd = esiop_lun->tactive[tag];
1297		for (slot = 0; slot < A_ncmd_slots; slot++) {
1298			slotdsa = esiop_script_read(sc,
1299			    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1300			/* if the slot has any flag, it won't match the DSA */
1301			if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1302				/* Mark this slot as ignore */
1303				esiop_script_write(sc,
1304				    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1305				    esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1306				/* ask to requeue */
1307				esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1308				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1309				esiop_lun->tactive[tag] = NULL;
1310				esiop_scsicmd_end(esiop_cmd);
1311				break;
1312			}
1313		}
1314	}
1315	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1316}
1317
1318/*
1319 * handle a rejected queue tag message: the command will run untagged,
1320 * has to adjust the reselect script.
1321 */
1322
1323
1324int
1325esiop_handle_qtag_reject(esiop_cmd)
1326	struct esiop_cmd *esiop_cmd;
1327{
1328	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1329	int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1330	int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1331	int tag = esiop_cmd->cmd_tables->msg_out[2];
1332	struct esiop_target *esiop_target =
1333	    (struct esiop_target*)sc->sc_c.targets[target];
1334	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1335
1336#ifdef SIOP_DEBUG
1337	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1338	    sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1339	    esiop_cmd->cmd_c.status);
1340#endif
1341
1342	if (esiop_lun->active != NULL) {
1343		printf("%s: untagged command already running for target %d "
1344		    "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1345		    target, lun, esiop_lun->active->cmd_c.status);
1346		return -1;
1347	}
1348	/* clear tag slot */
1349	esiop_lun->tactive[tag] = NULL;
1350	/* add command to non-tagged slot */
1351	esiop_lun->active = esiop_cmd;
1352	esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1353	esiop_cmd->cmd_c.tag = -1;
1354	/* update DSA table */
1355	esiop_script_write(sc, esiop_target->lun_table_offset +
1356	    lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1357	    esiop_cmd->cmd_c.dsa);
1358	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1359	return 0;
1360}
1361
1362/*
1363 * handle a bus reset: reset chip, unqueue all active commands, free all
1364 * target struct and report loosage to upper layer.
1365 * As the upper layer may requeue immediatly we have to first store
1366 * all active commands in a temporary queue.
1367 */
1368void
1369esiop_handle_reset(sc)
1370	struct esiop_softc *sc;
1371{
1372	struct esiop_cmd *esiop_cmd;
1373	struct esiop_lun *esiop_lun;
1374	int target, lun, tag;
1375	/*
1376	 * scsi bus reset. reset the chip and restart
1377	 * the queue. Need to clean up all active commands
1378	 */
1379	printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1380	/* stop, reset and restart the chip */
1381	esiop_reset(sc);
1382
1383	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1384		/* chip has been reset, all slots are free now */
1385		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1386		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1387	}
1388	/*
1389	 * Process all commands: first commmands completes, then commands
1390	 * being executed
1391	 */
1392	esiop_checkdone(sc);
1393	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1394	    target++) {
1395		struct esiop_target *esiop_target =
1396		    (struct esiop_target *)sc->sc_c.targets[target];
1397		if (esiop_target == NULL)
1398			continue;
1399		for (lun = 0; lun < 8; lun++) {
1400			esiop_lun = esiop_target->esiop_lun[lun];
1401			if (esiop_lun == NULL)
1402				continue;
1403			for (tag = -1; tag <
1404			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1405			    ESIOP_NTAG : 0);
1406			    tag++) {
1407				if (tag >= 0)
1408					esiop_cmd = esiop_lun->tactive[tag];
1409				else
1410					esiop_cmd = esiop_lun->active;
1411				if (esiop_cmd == NULL)
1412					continue;
1413				scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1414				printf("command with tag id %d reset\n", tag);
1415				esiop_cmd->cmd_c.xs->error =
1416				    (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1417		    		    XS_TIMEOUT : XS_RESET;
1418				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1419				if (tag >= 0)
1420					esiop_lun->tactive[tag] = NULL;
1421				else
1422					esiop_lun->active = NULL;
1423				esiop_cmd->cmd_c.status = CMDST_DONE;
1424				esiop_scsicmd_end(esiop_cmd);
1425			}
1426		}
1427		sc->sc_c.targets[target]->status = TARST_ASYNC;
1428		sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1429		sc->sc_c.targets[target]->period =
1430		    sc->sc_c.targets[target]->offset = 0;
1431		siop_update_xfer_mode(&sc->sc_c, target);
1432	}
1433
1434	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1435}
1436
1437void
1438esiop_scsipi_request(chan, req, arg)
1439	struct scsipi_channel *chan;
1440	scsipi_adapter_req_t req;
1441	void *arg;
1442{
1443	struct scsipi_xfer *xs;
1444	struct scsipi_periph *periph;
1445	struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1446	struct esiop_cmd *esiop_cmd;
1447	struct esiop_target *esiop_target;
1448	int s, error, i;
1449	int target;
1450	int lun;
1451
1452	switch (req) {
1453	case ADAPTER_REQ_RUN_XFER:
1454		xs = arg;
1455		periph = xs->xs_periph;
1456		target = periph->periph_target;
1457		lun = periph->periph_lun;
1458
1459		s = splbio();
1460		/*
1461		 * first check if there are pending complete commands.
1462		 * this can free us some resources (in the rings for example).
1463		 * we have to lock it to avoid recursion.
1464		 */
1465		if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1466			sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1467			esiop_checkdone(sc);
1468			sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1469		}
1470#ifdef SIOP_DEBUG_SCHED
1471		printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1472		    xs->xs_tag_type, xs->xs_tag_id);
1473#endif
1474		esiop_cmd = TAILQ_FIRST(&sc->free_list);
1475		if (esiop_cmd == NULL) {
1476			xs->error = XS_RESOURCE_SHORTAGE;
1477			scsipi_done(xs);
1478			splx(s);
1479			return;
1480		}
1481		TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1482#ifdef DIAGNOSTIC
1483		if (esiop_cmd->cmd_c.status != CMDST_FREE)
1484			panic("siop_scsicmd: new cmd not free");
1485#endif
1486		esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1487		if (esiop_target == NULL) {
1488#ifdef SIOP_DEBUG
1489			printf("%s: alloc siop_target for target %d\n",
1490				sc->sc_c.sc_dev.dv_xname, target);
1491#endif
1492			sc->sc_c.targets[target] =
1493			    malloc(sizeof(struct esiop_target),
1494				M_DEVBUF, M_NOWAIT | M_ZERO);
1495			if (sc->sc_c.targets[target] == NULL) {
1496				printf("%s: can't malloc memory for "
1497				    "target %d\n", sc->sc_c.sc_dev.dv_xname,
1498				    target);
1499				xs->error = XS_RESOURCE_SHORTAGE;
1500				scsipi_done(xs);
1501				splx(s);
1502				return;
1503			}
1504			esiop_target =
1505			    (struct esiop_target*)sc->sc_c.targets[target];
1506			esiop_target->target_c.status = TARST_PROBING;
1507			esiop_target->target_c.flags = 0;
1508			esiop_target->target_c.id =
1509			    sc->sc_c.clock_div << 24; /* scntl3 */
1510			esiop_target->target_c.id |=  target << 16; /* id */
1511			/* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1512
1513			for (i=0; i < 8; i++)
1514				esiop_target->esiop_lun[i] = NULL;
1515			esiop_target_register(sc, target);
1516		}
1517		if (esiop_target->esiop_lun[lun] == NULL) {
1518			esiop_target->esiop_lun[lun] =
1519			    malloc(sizeof(struct esiop_lun), M_DEVBUF,
1520			    M_NOWAIT|M_ZERO);
1521			if (esiop_target->esiop_lun[lun] == NULL) {
1522				printf("%s: can't alloc esiop_lun for "
1523				    "target %d lun %d\n",
1524				    sc->sc_c.sc_dev.dv_xname, target, lun);
1525				xs->error = XS_RESOURCE_SHORTAGE;
1526				scsipi_done(xs);
1527				splx(s);
1528				return;
1529			}
1530		}
1531		esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1532		esiop_cmd->cmd_c.xs = xs;
1533		esiop_cmd->cmd_c.flags = 0;
1534		esiop_cmd->cmd_c.status = CMDST_READY;
1535
1536		/* load the DMA maps */
1537		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1538		    esiop_cmd->cmd_c.dmamap_cmd,
1539		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1540		if (error) {
1541			printf("%s: unable to load cmd DMA map: %d\n",
1542			    sc->sc_c.sc_dev.dv_xname, error);
1543			xs->error = XS_DRIVER_STUFFUP;
1544			scsipi_done(xs);
1545			splx(s);
1546			return;
1547		}
1548		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1549			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1550			    esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1551			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1552			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1553			     BUS_DMA_READ : BUS_DMA_WRITE));
1554			if (error) {
1555				printf("%s: unable to load cmd DMA map: %d",
1556				    sc->sc_c.sc_dev.dv_xname, error);
1557				xs->error = XS_DRIVER_STUFFUP;
1558				scsipi_done(xs);
1559				bus_dmamap_unload(sc->sc_c.sc_dmat,
1560				    esiop_cmd->cmd_c.dmamap_cmd);
1561				splx(s);
1562				return;
1563			}
1564			bus_dmamap_sync(sc->sc_c.sc_dmat,
1565			    esiop_cmd->cmd_c.dmamap_data, 0,
1566			    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1567			    (xs->xs_control & XS_CTL_DATA_IN) ?
1568			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1569		}
1570		bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1571		    0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1572		    BUS_DMASYNC_PREWRITE);
1573
1574		if (xs->xs_tag_type)
1575			esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1576		else
1577			esiop_cmd->cmd_c.tag = -1;
1578		siop_setuptables(&esiop_cmd->cmd_c);
1579		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1580		    htole32(A_f_c_target | A_f_c_lun);
1581		((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1582		    htole32((target << 8) | (lun << 16));
1583		if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1584			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1585			    htole32(A_f_c_tag);
1586			((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1587			    htole32(esiop_cmd->cmd_c.tag << 24);
1588		}
1589
1590		esiop_table_sync(esiop_cmd,
1591		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592		esiop_start(sc, esiop_cmd);
1593		if (xs->xs_control & XS_CTL_POLL) {
1594			/* poll for command completion */
1595			while ((xs->xs_status & XS_STS_DONE) == 0) {
1596				delay(1000);
1597				esiop_intr(sc);
1598			}
1599		}
1600		splx(s);
1601		return;
1602
1603	case ADAPTER_REQ_GROW_RESOURCES:
1604#ifdef SIOP_DEBUG
1605		printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1606		    sc->sc_c.sc_adapt.adapt_openings);
1607#endif
1608		esiop_morecbd(sc);
1609		return;
1610
1611	case ADAPTER_REQ_SET_XFER_MODE:
1612	{
1613		struct scsipi_xfer_mode *xm = arg;
1614		if (sc->sc_c.targets[xm->xm_target] == NULL)
1615			return;
1616		s = splbio();
1617		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1618		    (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1619			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1620			/* allocate tag tables for this device */
1621			for (lun = 0;
1622			    lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1623				if (scsipi_lookup_periph(chan,
1624				    xm->xm_target, lun) != NULL)
1625					esiop_add_dev(sc, xm->xm_target, lun);
1626			}
1627		}
1628		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1629		    (sc->sc_c.features & SF_BUS_WIDE))
1630			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1631		if (xm->xm_mode & PERIPH_CAP_SYNC)
1632			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1633		if ((xm->xm_mode & PERIPH_CAP_DT) &&
1634		    (sc->sc_c.features & SF_CHIP_DT))
1635			sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1636		if ((xm->xm_mode &
1637		    (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1638		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1639			sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1640
1641		splx(s);
1642	}
1643	}
1644}
1645
1646static void
1647esiop_start(sc, esiop_cmd)
1648	struct esiop_softc *sc;
1649	struct esiop_cmd *esiop_cmd;
1650{
1651	struct esiop_lun *esiop_lun;
1652	struct esiop_target *esiop_target;
1653	int timeout;
1654	int target, lun, slot;
1655
1656	/*
1657	 * first make sure to read valid data
1658	 */
1659	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1660
1661	/*
1662	 * We use a circular queue here. sc->sc_currschedslot points to a
1663	 * free slot, unless we have filled the queue. Check this.
1664	 */
1665	slot = sc->sc_currschedslot;
1666	if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1667	    A_f_cmd_free) == 0) {
1668		/*
1669		 * no more free slot, no need to continue. freeze the queue
1670		 * and requeue this command.
1671		 */
1672		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1673		sc->sc_flags |= SCF_CHAN_NOSLOT;
1674		esiop_script_sync(sc,
1675		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1676		esiop_script_write(sc, sc->sc_semoffset,
1677		    esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1678		esiop_script_sync(sc,
1679		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1680		esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1681		esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1682		esiop_scsicmd_end(esiop_cmd);
1683		return;
1684	}
1685	/* OK, we can use this slot */
1686
1687	target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1688	lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1689	esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1690	esiop_lun = esiop_target->esiop_lun[lun];
1691	/* if non-tagged command active, panic: this shouldn't happen */
1692	if (esiop_lun->active != NULL) {
1693		panic("esiop_start: tagged cmd while untagged running");
1694	}
1695#ifdef DIAGNOSTIC
1696	/* sanity check the tag if needed */
1697	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1698		if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1699			panic("esiop_start: tag not free");
1700		if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1701		    esiop_cmd->cmd_c.tag < 0) {
1702			scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1703			printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1704			panic("esiop_start: invalid tag id");
1705		}
1706	}
1707#endif
1708#ifdef SIOP_DEBUG_SCHED
1709	printf("using slot %d for DSA 0x%lx\n", slot,
1710	    (u_long)esiop_cmd->cmd_c.dsa);
1711#endif
1712	/* mark command as active */
1713	if (esiop_cmd->cmd_c.status == CMDST_READY)
1714		esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1715	else
1716		panic("esiop_start: bad status");
1717	/* DSA table for reselect */
1718	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1719		esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1720		/* DSA table for reselect */
1721		esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1722		    htole32(esiop_cmd->cmd_c.dsa);
1723		bus_dmamap_sync(sc->sc_c.sc_dmat,
1724		    esiop_lun->lun_tagtbl->tblblk->blkmap,
1725		    esiop_lun->lun_tagtbl->tbl_offset,
1726		    sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1727	} else {
1728		esiop_lun->active = esiop_cmd;
1729		esiop_script_write(sc,
1730		    esiop_target->lun_table_offset +
1731		    lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1732		    esiop_cmd->cmd_c.dsa);
1733	}
1734	/* scheduler slot: DSA */
1735	esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1736	    esiop_cmd->cmd_c.dsa);
1737	/* make sure SCRIPT processor will read valid data */
1738	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1739	/* handle timeout */
1740	if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1741		/* start exire timer */
1742		timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1743		if (timeout == 0)
1744			timeout = 1;
1745		callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1746		    timeout, esiop_timeout, esiop_cmd);
1747	}
1748	/* Signal script it has some work to do */
1749	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1750	    SIOP_ISTAT, ISTAT_SIGP);
1751	/* update the current slot, and wait for IRQ */
1752	sc->sc_currschedslot++;
1753	if (sc->sc_currschedslot >= A_ncmd_slots)
1754		sc->sc_currschedslot = 0;
1755	return;
1756}
1757
1758void
1759esiop_timeout(v)
1760	void *v;
1761{
1762	struct esiop_cmd *esiop_cmd = v;
1763	struct esiop_softc *sc =
1764	    (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1765	int s;
1766#ifdef SIOP_DEBUG
1767	int slot, slotdsa;
1768#endif
1769
1770	s = splbio();
1771	esiop_table_sync(esiop_cmd,
1772	    BUS_DMASYNC_POSTREAD |
1773	    BUS_DMASYNC_POSTWRITE);
1774	scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1775#ifdef SIOP_DEBUG
1776	printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1777
1778	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1779	for (slot = 0; slot < A_ncmd_slots; slot++) {
1780		slotdsa = esiop_script_read(sc,
1781		    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1782		if ((slotdsa & 0x01) == 0)
1783			printf("slot %d not free (0x%x)\n", slot, slotdsa);
1784	}
1785	printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1786	printf("DSP 0x%lx DSA 0x%x\n",
1787	    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1788	    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1789	bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1790	printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1791#else
1792	printf("command timeout\n");
1793#endif
1794	/* reset the scsi bus */
1795	siop_resetbus(&sc->sc_c);
1796
1797	/* deactivate callout */
1798	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1799	/*
1800	 * mark command has being timed out and just return;
1801	 * the bus reset will generate an interrupt,
1802	 * it will be handled in siop_intr()
1803	 */
1804	esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1805	splx(s);
1806	return;
1807
1808}
1809
1810void
1811esiop_dump_script(sc)
1812	struct esiop_softc *sc;
1813{
1814	int i;
1815	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1816		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1817		    le32toh(sc->sc_c.sc_script[i]),
1818		    le32toh(sc->sc_c.sc_script[i+1]));
1819		if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1820		    0xc0000000) {
1821			i++;
1822			printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1823		}
1824		printf("\n");
1825	}
1826}
1827
1828void
1829esiop_morecbd(sc)
1830	struct esiop_softc *sc;
1831{
1832	int error, i, s;
1833	bus_dma_segment_t seg;
1834	int rseg;
1835	struct esiop_cbd *newcbd;
1836	struct esiop_xfer *xfer;
1837	bus_addr_t dsa;
1838
1839	/* allocate a new list head */
1840	newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1841	if (newcbd == NULL) {
1842		printf("%s: can't allocate memory for command descriptors "
1843		    "head\n", sc->sc_c.sc_dev.dv_xname);
1844		return;
1845	}
1846
1847	/* allocate cmd list */
1848	newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1849	    M_DEVBUF, M_NOWAIT|M_ZERO);
1850	if (newcbd->cmds == NULL) {
1851		printf("%s: can't allocate memory for command descriptors\n",
1852		    sc->sc_c.sc_dev.dv_xname);
1853		goto bad3;
1854	}
1855	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1856	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1857	if (error) {
1858		printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1859		    sc->sc_c.sc_dev.dv_xname, error);
1860		goto bad2;
1861	}
1862	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1863	    (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1864	if (error) {
1865		printf("%s: unable to map cbd DMA memory, error = %d\n",
1866		    sc->sc_c.sc_dev.dv_xname, error);
1867		goto bad2;
1868	}
1869	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1870	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1871	if (error) {
1872		printf("%s: unable to create cbd DMA map, error = %d\n",
1873		    sc->sc_c.sc_dev.dv_xname, error);
1874		goto bad1;
1875	}
1876	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1877	    newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1878	if (error) {
1879		printf("%s: unable to load cbd DMA map, error = %d\n",
1880		    sc->sc_c.sc_dev.dv_xname, error);
1881		goto bad0;
1882	}
1883#ifdef DEBUG
1884	printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1885	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1886#endif
1887	for (i = 0; i < SIOP_NCMDPB; i++) {
1888		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1889		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1890		    &newcbd->cmds[i].cmd_c.dmamap_data);
1891		if (error) {
1892			printf("%s: unable to create data DMA map for cbd: "
1893			    "error %d\n",
1894			    sc->sc_c.sc_dev.dv_xname, error);
1895			goto bad0;
1896		}
1897		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1898		    sizeof(struct scsipi_generic), 1,
1899		    sizeof(struct scsipi_generic), 0,
1900		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1901		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1902		if (error) {
1903			printf("%s: unable to create cmd DMA map for cbd %d\n",
1904			    sc->sc_c.sc_dev.dv_xname, error);
1905			goto bad0;
1906		}
1907		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1908		newcbd->cmds[i].esiop_cbdp = newcbd;
1909		xfer = &newcbd->xfers[i];
1910		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1911		memset(newcbd->cmds[i].cmd_tables, 0,
1912		    sizeof(struct esiop_xfer));
1913		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1914		    i * sizeof(struct esiop_xfer);
1915		newcbd->cmds[i].cmd_c.dsa = dsa;
1916		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1917		xfer->siop_tables.t_msgout.count= htole32(1);
1918		xfer->siop_tables.t_msgout.addr = htole32(dsa);
1919		xfer->siop_tables.t_msgin.count= htole32(1);
1920		xfer->siop_tables.t_msgin.addr = htole32(dsa +
1921			offsetof(struct siop_common_xfer, msg_in));
1922		xfer->siop_tables.t_extmsgin.count= htole32(2);
1923		xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1924			offsetof(struct siop_common_xfer, msg_in) + 1);
1925		xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1926			offsetof(struct siop_common_xfer, msg_in) + 3);
1927		xfer->siop_tables.t_status.count= htole32(1);
1928		xfer->siop_tables.t_status.addr = htole32(dsa +
1929			offsetof(struct siop_common_xfer, status));
1930
1931		s = splbio();
1932		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1933		splx(s);
1934#ifdef SIOP_DEBUG
1935		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1936		    le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1937		    le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1938		    le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1939#endif
1940	}
1941	s = splbio();
1942	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1943	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1944	splx(s);
1945	return;
1946bad0:
1947	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1948	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1949bad1:
1950	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1951bad2:
1952	free(newcbd->cmds, M_DEVBUF);
1953bad3:
1954	free(newcbd, M_DEVBUF);
1955	return;
1956}
1957
1958void
1959esiop_moretagtbl(sc)
1960	struct esiop_softc *sc;
1961{
1962	int error, i, j, s;
1963	bus_dma_segment_t seg;
1964	int rseg;
1965	struct esiop_dsatblblk *newtblblk;
1966	struct esiop_dsatbl *newtbls;
1967	u_int32_t *tbls;
1968
1969	/* allocate a new list head */
1970	newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1971	    M_DEVBUF, M_NOWAIT|M_ZERO);
1972	if (newtblblk == NULL) {
1973		printf("%s: can't allocate memory for tag DSA table block\n",
1974		    sc->sc_c.sc_dev.dv_xname);
1975		return;
1976	}
1977
1978	/* allocate tbl list */
1979	newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1980	    M_DEVBUF, M_NOWAIT|M_ZERO);
1981	if (newtbls == NULL) {
1982		printf("%s: can't allocate memory for command descriptors\n",
1983		    sc->sc_c.sc_dev.dv_xname);
1984		goto bad3;
1985	}
1986	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1987	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1988	if (error) {
1989		printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1990		    sc->sc_c.sc_dev.dv_xname, error);
1991		goto bad2;
1992	}
1993	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1994	    (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1995	if (error) {
1996		printf("%s: unable to map tbls DMA memory, error = %d\n",
1997		    sc->sc_c.sc_dev.dv_xname, error);
1998		goto bad2;
1999	}
2000	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2001	    BUS_DMA_NOWAIT, &newtblblk->blkmap);
2002	if (error) {
2003		printf("%s: unable to create tbl DMA map, error = %d\n",
2004		    sc->sc_c.sc_dev.dv_xname, error);
2005		goto bad1;
2006	}
2007	error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2008	    tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2009	if (error) {
2010		printf("%s: unable to load tbl DMA map, error = %d\n",
2011		    sc->sc_c.sc_dev.dv_xname, error);
2012		goto bad0;
2013	}
2014#ifdef DEBUG
2015	printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2016	    sc->sc_c.sc_dev.dv_xname,
2017	    (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2018#endif
2019	for (i = 0; i < ESIOP_NTPB; i++) {
2020		newtbls[i].tblblk = newtblblk;
2021		newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2022		newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2023		newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2024		    newtbls[i].tbl_offset;
2025		for (j = 0; j < ESIOP_NTAG; j++)
2026			newtbls[i].tbl[j] = j;
2027		s = splbio();
2028		TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2029		splx(s);
2030	}
2031	s = splbio();
2032	TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2033	splx(s);
2034	return;
2035bad0:
2036	bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2037	bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2038bad1:
2039	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2040bad2:
2041	free(newtbls, M_DEVBUF);
2042bad3:
2043	free(newtblblk, M_DEVBUF);
2044	return;
2045}
2046
2047void
2048esiop_update_scntl3(sc, _siop_target)
2049	struct esiop_softc *sc;
2050	struct siop_common_target *_siop_target;
2051{
2052	struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2053	esiop_script_write(sc, esiop_target->lun_table_offset,
2054	    esiop_target->target_c.id);
2055	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2056}
2057
2058void
2059esiop_add_dev(sc, target, lun)
2060	struct esiop_softc *sc;
2061	int target;
2062	int lun;
2063{
2064	struct esiop_target *esiop_target =
2065	    (struct esiop_target *)sc->sc_c.targets[target];
2066	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2067
2068	/* we need a tag DSA table */
2069	esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2070	if (esiop_lun->lun_tagtbl == NULL) {
2071		esiop_moretagtbl(sc);
2072		esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2073		if (esiop_lun->lun_tagtbl == NULL) {
2074			/* no resources, run untagged */
2075			esiop_target->target_c.flags &= ~TARF_TAG;
2076			return;
2077		}
2078	}
2079	TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2080	/* Update LUN DSA table */
2081	esiop_script_write(sc, esiop_target->lun_table_offset +
2082	   lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2083	    esiop_lun->lun_tagtbl->tbl_dsa);
2084	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2085}
2086
2087void
2088esiop_del_dev(sc, target, lun)
2089	struct esiop_softc *sc;
2090	int target;
2091	int lun;
2092{
2093	struct esiop_target *esiop_target;
2094#ifdef SIOP_DEBUG
2095		printf("%s:%d:%d: free lun sw entry\n",
2096		    sc->sc_c.sc_dev.dv_xname, target, lun);
2097#endif
2098	if (sc->sc_c.targets[target] == NULL)
2099		return;
2100	esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2101	free(esiop_target->esiop_lun[lun], M_DEVBUF);
2102	esiop_target->esiop_lun[lun] = NULL;
2103}
2104
2105void
2106esiop_target_register(sc, target)
2107	struct esiop_softc *sc;
2108	u_int32_t target;
2109{
2110	struct esiop_target *esiop_target =
2111	    (struct esiop_target *)sc->sc_c.targets[target];
2112	struct esiop_lun *esiop_lun;
2113	int lun;
2114
2115	/* get a DSA table for this target */
2116	esiop_target->lun_table_offset = sc->sc_free_offset;
2117	sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2118#ifdef SIOP_DEBUG
2119	printf("%s: lun table for target %d offset %d free offset %d\n",
2120	    sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2121	    sc->sc_free_offset);
2122#endif
2123	/* first 32 bytes are ID (for select) */
2124	esiop_script_write(sc, esiop_target->lun_table_offset,
2125	    esiop_target->target_c.id);
2126	/* Record this table in the target DSA table */
2127	esiop_script_write(sc,
2128	    sc->sc_target_table_offset + target,
2129	    (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2130	    sc->sc_c.sc_scriptaddr);
2131	/* if we have a tag table, register it */
2132	for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2133		esiop_lun = esiop_target->esiop_lun[lun];
2134		if (esiop_lun == NULL)
2135			continue;
2136		if (esiop_lun->lun_tagtbl)
2137			esiop_script_write(sc, esiop_target->lun_table_offset +
2138			   lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2139			    esiop_lun->lun_tagtbl->tbl_dsa);
2140	}
2141	esiop_script_sync(sc,
2142	    BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
2143}
2144
2145#ifdef SIOP_STATS
2146void
2147esiop_printstats()
2148{
2149	printf("esiop_stat_intr %d\n", esiop_stat_intr);
2150	printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2151	printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2152	printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2153	printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2154	printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2155	printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2156}
2157#endif
2158