1/*	$NetBSD$	*/
2
3/*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30#include <sys/cdefs.h>
31__KERNEL_RCSID(0, "$NetBSD$");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/device.h>
36#include <sys/malloc.h>
37#include <sys/buf.h>
38#include <sys/kernel.h>
39
40#include <machine/endian.h>
41#include <sys/bus.h>
42
43#include <dev/microcode/siop/esiop.out>
44
45#include <dev/scsipi/scsi_all.h>
46#include <dev/scsipi/scsi_message.h>
47#include <dev/scsipi/scsipi_all.h>
48
49#include <dev/scsipi/scsiconf.h>
50
51#include <dev/ic/siopreg.h>
52#include <dev/ic/siopvar_common.h>
53#include <dev/ic/esiopvar.h>
54
55#include "opt_siop.h"
56
57/*
58#define SIOP_DEBUG
59#define SIOP_DEBUG_DR
60#define SIOP_DEBUG_INTR
61#define SIOP_DEBUG_SCHED
62#define SIOP_DUMP_SCRIPT
63*/
64
65#define SIOP_STATS
66
67#ifndef SIOP_DEFAULT_TARGET
68#define SIOP_DEFAULT_TARGET 7
69#endif
70
71/* number of cmd descriptors per block */
72#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
73
74void	esiop_reset(struct esiop_softc *);
75void	esiop_checkdone(struct esiop_softc *);
76void	esiop_handle_reset(struct esiop_softc *);
77void	esiop_scsicmd_end(struct esiop_cmd *, int);
78void	esiop_unqueue(struct esiop_softc *, int, int);
79int	esiop_handle_qtag_reject(struct esiop_cmd *);
80static void	esiop_start(struct esiop_softc *, struct esiop_cmd *);
81void	esiop_timeout(void *);
82void	esiop_scsipi_request(struct scsipi_channel *,
83			scsipi_adapter_req_t, void *);
84void	esiop_dump_script(struct esiop_softc *);
85void	esiop_morecbd(struct esiop_softc *);
86void	esiop_moretagtbl(struct esiop_softc *);
87void	siop_add_reselsw(struct esiop_softc *, int);
88void	esiop_target_register(struct esiop_softc *, uint32_t);
89
90void    esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
91
92#ifdef SIOP_STATS
93static int esiop_stat_intr = 0;
94static int esiop_stat_intr_shortxfer = 0;
95static int esiop_stat_intr_sdp = 0;
96static int esiop_stat_intr_done = 0;
97static int esiop_stat_intr_xferdisc = 0;
98static int esiop_stat_intr_lunresel = 0;
99static int esiop_stat_intr_qfull = 0;
100void esiop_printstats(void);
101#define INCSTAT(x) x++
102#else
103#define INCSTAT(x)
104#endif
105
106static inline void esiop_script_sync(struct esiop_softc *, int);
107static inline void
108esiop_script_sync(struct esiop_softc *sc, int ops)
109{
110
111	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
112		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
113		    PAGE_SIZE, ops);
114}
115
116static inline uint32_t esiop_script_read(struct esiop_softc *, u_int);
117static inline uint32_t
118esiop_script_read(struct esiop_softc *sc, u_int offset)
119{
120
121	if (sc->sc_c.features & SF_CHIP_RAM) {
122		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
123		    offset * 4);
124	} else {
125		return le32toh(sc->sc_c.sc_script[offset]);
126	}
127}
128
129static inline void esiop_script_write(struct esiop_softc *, u_int,
130	uint32_t);
131static inline void
132esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val)
133{
134
135	if (sc->sc_c.features & SF_CHIP_RAM) {
136		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137		    offset * 4, val);
138	} else {
139		sc->sc_c.sc_script[offset] = htole32(val);
140	}
141}
142
143void
144esiop_attach(struct esiop_softc *sc)
145{
146	struct esiop_dsatbl *tagtbl_donering;
147
148	if (siop_common_attach(&sc->sc_c) != 0 )
149		return;
150
151	TAILQ_INIT(&sc->free_list);
152	TAILQ_INIT(&sc->cmds);
153	TAILQ_INIT(&sc->free_tagtbl);
154	TAILQ_INIT(&sc->tag_tblblk);
155	sc->sc_currschedslot = 0;
156#ifdef SIOP_DEBUG
157	aprint_debug_dev(sc->sc_c.sc_dev,
158	    "script size = %d, PHY addr=0x%x, VIRT=%p\n",
159	    (int)sizeof(esiop_script),
160	    (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
161#endif
162
163	sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
164	sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
165
166	/*
167	 * get space for the CMD done slot. For this we use a tag table entry.
168	 * It's the same size and allows us to not waste 3/4 of a page
169	 */
170#ifdef DIAGNOSTIC
171	if (ESIOP_NTAG != A_ndone_slots) {
172		aprint_error_dev(sc->sc_c.sc_dev,
173		     "size of tag DSA table different from the done ring\n");
174		return;
175	}
176#endif
177	esiop_moretagtbl(sc);
178	tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
179	if (tagtbl_donering == NULL) {
180		aprint_error_dev(sc->sc_c.sc_dev,
181		    "no memory for command done ring\n");
182		return;
183	}
184	TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
185	sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
186	sc->sc_done_offset = tagtbl_donering->tbl_offset;
187	sc->sc_done_slot = &tagtbl_donering->tbl[0];
188
189	/* Do a bus reset, so that devices fall back to narrow/async */
190	siop_resetbus(&sc->sc_c);
191	/*
192	 * siop_reset() will reset the chip, thus clearing pending interrupts
193	 */
194	esiop_reset(sc);
195#ifdef SIOP_DUMP_SCRIPT
196	esiop_dump_script(sc);
197#endif
198
199	config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
200}
201
202void
203esiop_reset(struct esiop_softc *sc)
204{
205	int i, j;
206	uint32_t addr;
207	uint32_t msgin_addr, sem_addr;
208
209	siop_common_reset(&sc->sc_c);
210
211	/*
212	 * we copy the script at the beggining of RAM. Then there is 4 bytes
213	 * for messages in, and 4 bytes for semaphore
214	 */
215	sc->sc_free_offset = __arraycount(esiop_script);
216	msgin_addr =
217	    sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
218	sc->sc_free_offset += 1;
219	sc->sc_semoffset = sc->sc_free_offset;
220	sem_addr =
221	    sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
222	sc->sc_free_offset += 1;
223	/* then we have the scheduler ring */
224	sc->sc_shedoffset = sc->sc_free_offset;
225	sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
226	/* then the targets DSA table */
227	sc->sc_target_table_offset = sc->sc_free_offset;
228	sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
229	/* copy and patch the script */
230	if (sc->sc_c.features & SF_CHIP_RAM) {
231		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
232		    esiop_script,
233		    __arraycount(esiop_script));
234		for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
235			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
236			    E_tlq_offset_Used[j] * 4,
237			    sizeof(struct siop_common_xfer));
238		}
239		for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
240			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
241			    E_saved_offset_offset_Used[j] * 4,
242			    sizeof(struct siop_common_xfer) + 4);
243		}
244		for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
245			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
246			    E_abs_msgin2_Used[j] * 4, msgin_addr);
247		}
248		for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
249			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
250			    E_abs_sem_Used[j] * 4, sem_addr);
251		}
252
253		if (sc->sc_c.features & SF_CHIP_LED0) {
254			bus_space_write_region_4(sc->sc_c.sc_ramt,
255			    sc->sc_c.sc_ramh,
256			    Ent_led_on1, esiop_led_on,
257			    __arraycount(esiop_led_on));
258			bus_space_write_region_4(sc->sc_c.sc_ramt,
259			    sc->sc_c.sc_ramh,
260			    Ent_led_on2, esiop_led_on,
261			    __arraycount(esiop_led_on));
262			bus_space_write_region_4(sc->sc_c.sc_ramt,
263			    sc->sc_c.sc_ramh,
264			    Ent_led_off, esiop_led_off,
265			    __arraycount(esiop_led_off));
266		}
267	} else {
268		for (j = 0; j < __arraycount(esiop_script); j++) {
269			sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
270		}
271		for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
272			sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
273			    htole32(sizeof(struct siop_common_xfer));
274		}
275		for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
276			sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
277			    htole32(sizeof(struct siop_common_xfer) + 4);
278		}
279		for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
280			sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
281			    htole32(msgin_addr);
282		}
283		for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
284			sc->sc_c.sc_script[E_abs_sem_Used[j]] =
285			    htole32(sem_addr);
286		}
287
288		if (sc->sc_c.features & SF_CHIP_LED0) {
289			for (j = 0; j < __arraycount(esiop_led_on); j++)
290				sc->sc_c.sc_script[
291				    Ent_led_on1 / sizeof(esiop_led_on[0]) + j
292				    ] = htole32(esiop_led_on[j]);
293			for (j = 0; j < __arraycount(esiop_led_on); j++)
294				sc->sc_c.sc_script[
295				    Ent_led_on2 / sizeof(esiop_led_on[0]) + j
296				    ] = htole32(esiop_led_on[j]);
297			for (j = 0; j < __arraycount(esiop_led_off); j++)
298				sc->sc_c.sc_script[
299				    Ent_led_off / sizeof(esiop_led_off[0]) + j
300				    ] = htole32(esiop_led_off[j]);
301		}
302	}
303	/* get base of scheduler ring */
304	addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t);
305	/* init scheduler */
306	for (i = 0; i < A_ncmd_slots; i++) {
307		esiop_script_write(sc,
308		    sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
309	}
310	sc->sc_currschedslot = 0;
311	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
312	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
313	/*
314	 * 0x78000000 is a 'move data8 to reg'. data8 is the second
315	 * octet, reg offset is the third.
316	 */
317	esiop_script_write(sc, Ent_cmdr0 / 4,
318	    0x78640000 | ((addr & 0x000000ff) <<  8));
319	esiop_script_write(sc, Ent_cmdr1 / 4,
320	    0x78650000 | ((addr & 0x0000ff00)      ));
321	esiop_script_write(sc, Ent_cmdr2 / 4,
322	    0x78660000 | ((addr & 0x00ff0000) >>  8));
323	esiop_script_write(sc, Ent_cmdr3 / 4,
324	    0x78670000 | ((addr & 0xff000000) >> 16));
325	/* done ring */
326	for (i = 0; i < A_ndone_slots; i++)
327		sc->sc_done_slot[i] = 0;
328	bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
329	    sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
330	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
331	addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
332	sc->sc_currdoneslot = 0;
333	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
334	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
335	esiop_script_write(sc, Ent_doner0 / 4,
336	    0x786c0000 | ((addr & 0x000000ff) <<  8));
337	esiop_script_write(sc, Ent_doner1 / 4,
338	    0x786d0000 | ((addr & 0x0000ff00)      ));
339	esiop_script_write(sc, Ent_doner2 / 4,
340	    0x786e0000 | ((addr & 0x00ff0000) >>  8));
341	esiop_script_write(sc, Ent_doner3 / 4,
342	    0x786f0000 | ((addr & 0xff000000) >> 16));
343
344	/* set flags */
345	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
346	/* write pointer of base of target DSA table */
347	addr = (sc->sc_target_table_offset * sizeof(uint32_t)) +
348	    sc->sc_c.sc_scriptaddr;
349	esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
350	    esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
351	    ((addr & 0x000000ff) <<  8));
352	esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
353	    esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
354	    ((addr & 0x0000ff00)      ));
355	esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
356	    esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
357	    ((addr & 0x00ff0000) >>  8));
358	esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
359	    esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
360	    ((addr & 0xff000000) >> 16));
361#ifdef SIOP_DEBUG
362	printf("%s: target table offset %d free offset %d\n",
363	    device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset,
364	    sc->sc_free_offset);
365#endif
366
367	/* register existing targets */
368	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
369		if (sc->sc_c.targets[i])
370			esiop_target_register(sc, i);
371	}
372	/* start script */
373	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
374		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
375		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
376	}
377	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
378	    sc->sc_c.sc_scriptaddr + Ent_reselect);
379}
380
381#if 0
382#define CALL_SCRIPT(ent) do {						\
383	printf ("start script DSA 0x%lx DSP 0x%lx\n",			\
384	    esiop_cmd->cmd_c.dsa,					\
385	    sc->sc_c.sc_scriptaddr + ent);				\
386	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,		\
387	    SIOP_DSP, sc->sc_c.sc_scriptaddr + ent);			\
388} while (/* CONSTCOND */0)
389#else
390#define CALL_SCRIPT(ent) do {						\
391	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,		\
392	    SIOP_DSP, sc->sc_c.sc_scriptaddr + ent);			\
393} while (/* CONSTCOND */0)
394#endif
395
396int
397esiop_intr(void *v)
398{
399	struct esiop_softc *sc = v;
400	struct esiop_target *esiop_target;
401	struct esiop_cmd *esiop_cmd;
402	struct esiop_lun *esiop_lun;
403	struct scsipi_xfer *xs;
404	int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
405	uint32_t irqcode;
406	int need_reset = 0;
407	int offset, target, lun, tag;
408	uint32_t tflags;
409	uint32_t addr;
410	int freetarget = 0;
411	int slot;
412	int retval = 0;
413
414again:
415	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
416	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
417		return retval;
418	}
419	retval = 1;
420	INCSTAT(esiop_stat_intr);
421	esiop_checkdone(sc);
422	if (istat & ISTAT_INTF) {
423		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
424		    SIOP_ISTAT, ISTAT_INTF);
425		goto again;
426	}
427
428	if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
429	    (ISTAT_DIP | ISTAT_ABRT)) {
430		/* clear abort */
431		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
432		    SIOP_ISTAT, 0);
433	}
434
435	/* get CMD from T/L/Q */
436	tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437	    SIOP_SCRATCHC);
438#ifdef SIOP_DEBUG_INTR
439		printf("interrupt, istat=0x%x tflags=0x%x "
440		    "DSA=0x%x DSP=0x%lx\n", istat, tflags,
441		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
442		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
443			SIOP_DSP) -
444		    sc->sc_c.sc_scriptaddr));
445#endif
446	target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
447	if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
448	lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
449	if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
450	tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
451
452	if (target >= 0 && lun >= 0) {
453		esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
454		if (esiop_target == NULL) {
455			printf("esiop_target (target %d) not valid\n", target);
456			goto none;
457		}
458		esiop_lun = esiop_target->esiop_lun[lun];
459		if (esiop_lun == NULL) {
460			printf("esiop_lun (target %d lun %d) not valid\n",
461			    target, lun);
462			goto none;
463		}
464		esiop_cmd =
465		    (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
466		if (esiop_cmd == NULL) {
467			printf("esiop_cmd (target %d lun %d tag %d)"
468			    " not valid\n",
469			    target, lun, tag);
470			goto none;
471		}
472		xs = esiop_cmd->cmd_c.xs;
473#ifdef DIAGNOSTIC
474		if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
475			printf("esiop_cmd (target %d lun %d) "
476			    "not active (%d)\n", target, lun,
477			    esiop_cmd->cmd_c.status);
478			goto none;
479		}
480#endif
481		esiop_table_sync(esiop_cmd,
482		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
483	} else {
484none:
485		xs = NULL;
486		esiop_target = NULL;
487		esiop_lun = NULL;
488		esiop_cmd = NULL;
489	}
490	if (istat & ISTAT_DIP) {
491		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
492		    SIOP_DSTAT);
493		if (dstat & DSTAT_ABRT) {
494			/* was probably generated by a bus reset IOCTL */
495			if ((dstat & DSTAT_DFE) == 0)
496				siop_clearfifo(&sc->sc_c);
497			goto reset;
498		}
499		if (dstat & DSTAT_SSI) {
500			printf("single step dsp 0x%08x dsa 0x08%x\n",
501			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
502			    sc->sc_c.sc_rh, SIOP_DSP) -
503			    sc->sc_c.sc_scriptaddr),
504			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
505				SIOP_DSA));
506			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
507			    (istat & ISTAT_SIP) == 0) {
508				bus_space_write_1(sc->sc_c.sc_rt,
509				    sc->sc_c.sc_rh, SIOP_DCNTL,
510				    bus_space_read_1(sc->sc_c.sc_rt,
511				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
512			}
513			return 1;
514		}
515
516		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
517		printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev));
518		if (dstat & DSTAT_IID)
519			printf(" Illegal instruction");
520		if (dstat & DSTAT_BF)
521			printf(" bus fault");
522		if (dstat & DSTAT_MDPE)
523			printf(" parity");
524		if (dstat & DSTAT_DFE)
525			printf(" DMA fifo empty");
526		else
527			siop_clearfifo(&sc->sc_c);
528		printf(", DSP=0x%x DSA=0x%x: ",
529		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
530			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
531		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
532		if (esiop_cmd)
533			printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
534			    target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
535			    le32toh(esiop_cmd->cmd_tables->status));
536		else
537			printf(" current T/L/Q invalid\n");
538		need_reset = 1;
539		}
540	}
541	if (istat & ISTAT_SIP) {
542		if (istat & ISTAT_DIP)
543			delay(10);
544		/*
545		 * Can't read sist0 & sist1 independently, or we have to
546		 * insert delay
547		 */
548		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
549		    SIOP_SIST0);
550		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
551		    SIOP_SSTAT1);
552#ifdef SIOP_DEBUG_INTR
553		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
554		    "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
555		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
556		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
557			SIOP_DSP) -
558		    sc->sc_c.sc_scriptaddr));
559#endif
560		if (sist & SIST0_RST) {
561			esiop_handle_reset(sc);
562			/* no table to flush here */
563			return 1;
564		}
565		if (sist & SIST0_SGE) {
566			if (esiop_cmd)
567				scsipi_printaddr(xs->xs_periph);
568			else
569				printf("%s:", device_xname(sc->sc_c.sc_dev));
570			printf("scsi gross error\n");
571			if (esiop_target)
572				esiop_target->target_c.flags &= ~TARF_DT;
573#ifdef SIOP_DEBUG
574			printf("DSA=0x%x DSP=0x%lx\n",
575			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576			    SIOP_DSA),
577			    (u_long)(bus_space_read_4(sc->sc_c.sc_rt,
578				sc->sc_c.sc_rh, SIOP_DSP) -
579			    sc->sc_c.sc_scriptaddr));
580			printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
581			    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
582			    SIOP_SDID),
583			    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
584			    SIOP_SCNTL3),
585			    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586			    SIOP_SXFER),
587			    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
588			    SIOP_SCNTL4));
589
590#endif
591			goto reset;
592		}
593		if ((sist & SIST0_MA) && need_reset == 0) {
594			if (esiop_cmd) {
595				int scratchc0;
596				dstat = bus_space_read_1(sc->sc_c.sc_rt,
597				    sc->sc_c.sc_rh, SIOP_DSTAT);
598				/*
599				 * first restore DSA, in case we were in a S/G
600				 * operation.
601				 */
602				bus_space_write_4(sc->sc_c.sc_rt,
603				    sc->sc_c.sc_rh,
604				    SIOP_DSA, esiop_cmd->cmd_c.dsa);
605				scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
606				    sc->sc_c.sc_rh, SIOP_SCRATCHC);
607				switch (sstat1 & SSTAT1_PHASE_MASK) {
608				case SSTAT1_PHASE_STATUS:
609				/*
610				 * previous phase may be aborted for any reason
611				 * ( for example, the target has less data to
612				 * transfer than requested). Compute resid and
613				 * just go to status, the command should
614				 * terminate.
615				 */
616					INCSTAT(esiop_stat_intr_shortxfer);
617					if (scratchc0 & A_f_c_data)
618						siop_ma(&esiop_cmd->cmd_c);
619					else if ((dstat & DSTAT_DFE) == 0)
620						siop_clearfifo(&sc->sc_c);
621					CALL_SCRIPT(Ent_status);
622					return 1;
623				case SSTAT1_PHASE_MSGIN:
624				/*
625				 * target may be ready to disconnect
626				 * Compute resid which would be used later
627				 * if a save data pointer is needed.
628				 */
629					INCSTAT(esiop_stat_intr_xferdisc);
630					if (scratchc0 & A_f_c_data)
631						siop_ma(&esiop_cmd->cmd_c);
632					else if ((dstat & DSTAT_DFE) == 0)
633						siop_clearfifo(&sc->sc_c);
634					bus_space_write_1(sc->sc_c.sc_rt,
635					    sc->sc_c.sc_rh, SIOP_SCRATCHC,
636					    scratchc0 & ~A_f_c_data);
637					CALL_SCRIPT(Ent_msgin);
638					return 1;
639				}
640				aprint_error_dev(sc->sc_c.sc_dev,
641				    "unexpected phase mismatch %d\n",
642				    sstat1 & SSTAT1_PHASE_MASK);
643			} else {
644				aprint_error_dev(sc->sc_c.sc_dev,
645				    "phase mismatch without command\n");
646			}
647			need_reset = 1;
648		}
649		if (sist & SIST0_PAR) {
650			/* parity error, reset */
651			if (esiop_cmd)
652				scsipi_printaddr(xs->xs_periph);
653			else
654				printf("%s:", device_xname(sc->sc_c.sc_dev));
655			printf("parity error\n");
656			if (esiop_target)
657				esiop_target->target_c.flags &= ~TARF_DT;
658			goto reset;
659		}
660		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
661			/*
662			 * selection time out, assume there's no device here
663			 * We also have to update the ring pointer ourselve
664			 */
665			slot = bus_space_read_1(sc->sc_c.sc_rt,
666			    sc->sc_c.sc_rh, SIOP_SCRATCHE);
667			esiop_script_sync(sc,
668			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
669#ifdef SIOP_DEBUG_SCHED
670			printf("sel timeout target %d, slot %d\n",
671			    target, slot);
672#endif
673			/*
674			 * mark this slot as free, and advance to next slot
675			 */
676			esiop_script_write(sc,
677			    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
678			    A_f_cmd_free);
679			addr = bus_space_read_4(sc->sc_c.sc_rt,
680				    sc->sc_c.sc_rh, SIOP_SCRATCHD);
681			if (slot < (A_ncmd_slots - 1)) {
682				bus_space_write_1(sc->sc_c.sc_rt,
683				    sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
684				addr = addr + sizeof(struct esiop_slot);
685			} else {
686				bus_space_write_1(sc->sc_c.sc_rt,
687				    sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
688				addr = sc->sc_c.sc_scriptaddr +
689				    sc->sc_shedoffset * sizeof(uint32_t);
690			}
691			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
692			    SIOP_SCRATCHD, addr);
693			esiop_script_sync(sc,
694			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
695			if (esiop_cmd) {
696				esiop_cmd->cmd_c.status = CMDST_DONE;
697				xs->error = XS_SELTIMEOUT;
698				freetarget = 1;
699				goto end;
700			} else {
701				printf("%s: selection timeout without "
702				    "command, target %d (sdid 0x%x), "
703				    "slot %d\n",
704				    device_xname(sc->sc_c.sc_dev), target,
705				    bus_space_read_1(sc->sc_c.sc_rt,
706				    sc->sc_c.sc_rh, SIOP_SDID), slot);
707				need_reset = 1;
708			}
709		}
710		if (sist & SIST0_UDC) {
711			/*
712			 * unexpected disconnect. Usually the target signals
713			 * a fatal condition this way. Attempt to get sense.
714			 */
715			 if (esiop_cmd) {
716				esiop_cmd->cmd_tables->status =
717				    htole32(SCSI_CHECK);
718				goto end;
719			}
720			aprint_error_dev(sc->sc_c.sc_dev,
721			    "unexpected disconnect without command\n");
722			goto reset;
723		}
724		if (sist & (SIST1_SBMC << 8)) {
725			/* SCSI bus mode change */
726			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
727				goto reset;
728			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
729				/*
730				 * we have a script interrupt, it will
731				 * restart the script.
732				 */
733				goto scintr;
734			}
735			/*
736			 * else we have to restart it ourselve, at the
737			 * interrupted instruction.
738			 */
739			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
740			    SIOP_DSP,
741			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
742			    SIOP_DSP) - 8);
743			return 1;
744		}
745		/* Else it's an unhandled exception (for now). */
746		aprint_error_dev(sc->sc_c.sc_dev,
747		    "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
748		    "DSA=0x%x DSP=0x%x\n", sist,
749		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
750			SIOP_SSTAT1),
751		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
752		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
753			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
754		if (esiop_cmd) {
755			esiop_cmd->cmd_c.status = CMDST_DONE;
756			xs->error = XS_SELTIMEOUT;
757			goto end;
758		}
759		need_reset = 1;
760	}
761	if (need_reset) {
762reset:
763		/* fatal error, reset the bus */
764		siop_resetbus(&sc->sc_c);
765		/* no table to flush here */
766		return 1;
767	}
768
769scintr:
770	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
771		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
772		    SIOP_DSPS);
773#ifdef SIOP_DEBUG_INTR
774		printf("script interrupt 0x%x\n", irqcode);
775#endif
776		/*
777		 * no command, or an inactive command is only valid for a
778		 * reselect interrupt
779		 */
780		if ((irqcode & 0x80) == 0) {
781			if (esiop_cmd == NULL) {
782				aprint_error_dev(sc->sc_c.sc_dev,
783			"script interrupt (0x%x) with invalid DSA !!!\n",
784				    irqcode);
785				goto reset;
786			}
787			if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
788				aprint_error_dev(sc->sc_c.sc_dev,
789				    "command with invalid status "
790				    "(IRQ code 0x%x current status %d) !\n",
791				    irqcode, esiop_cmd->cmd_c.status);
792				xs = NULL;
793			}
794		}
795		switch(irqcode) {
796		case A_int_err:
797			printf("error, DSP=0x%x\n",
798			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
799			    sc->sc_c.sc_rh, SIOP_DSP) -
800			    sc->sc_c.sc_scriptaddr));
801			if (xs) {
802				xs->error = XS_SELTIMEOUT;
803				goto end;
804			} else {
805				goto reset;
806			}
807		case A_int_msgin:
808		{
809			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
810			    sc->sc_c.sc_rh, SIOP_SFBR);
811			if (msgin == MSG_MESSAGE_REJECT) {
812				int msg, extmsg;
813				if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
814					/*
815					 * message was part of a identify +
816					 * something else. Identify shouldn't
817					 * have been rejected.
818					 */
819					msg =
820					    esiop_cmd->cmd_tables->msg_out[1];
821					extmsg =
822					    esiop_cmd->cmd_tables->msg_out[3];
823				} else {
824					msg =
825					    esiop_cmd->cmd_tables->msg_out[0];
826					extmsg =
827					    esiop_cmd->cmd_tables->msg_out[2];
828				}
829				if (msg == MSG_MESSAGE_REJECT) {
830					/* MSG_REJECT  for a MSG_REJECT  !*/
831					if (xs)
832						scsipi_printaddr(xs->xs_periph);
833					else
834						printf("%s: ", device_xname(
835						    sc->sc_c.sc_dev));
836					printf("our reject message was "
837					    "rejected\n");
838					goto reset;
839				}
840				if (msg == MSG_EXTENDED &&
841				    extmsg == MSG_EXT_WDTR) {
842					/* WDTR rejected, initiate sync */
843					if ((esiop_target->target_c.flags &
844					   TARF_SYNC) == 0) {
845						esiop_target->target_c.status =
846						    TARST_OK;
847						siop_update_xfer_mode(&sc->sc_c,
848						    target);
849						/* no table to flush here */
850						CALL_SCRIPT(Ent_msgin_ack);
851						return 1;
852					}
853					esiop_target->target_c.status =
854					    TARST_SYNC_NEG;
855					siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
856					    sc->sc_c.st_minsync,
857					    sc->sc_c.maxoff);
858					esiop_table_sync(esiop_cmd,
859					    BUS_DMASYNC_PREREAD |
860					    BUS_DMASYNC_PREWRITE);
861					CALL_SCRIPT(Ent_send_msgout);
862					return 1;
863				} else if (msg == MSG_EXTENDED &&
864				    extmsg == MSG_EXT_SDTR) {
865					/* sync rejected */
866					esiop_target->target_c.offset = 0;
867					esiop_target->target_c.period = 0;
868					esiop_target->target_c.status =
869					    TARST_OK;
870					siop_update_xfer_mode(&sc->sc_c,
871					    target);
872					/* no table to flush here */
873					CALL_SCRIPT(Ent_msgin_ack);
874					return 1;
875				} else if (msg == MSG_EXTENDED &&
876				    extmsg == MSG_EXT_PPR) {
877					/* PPR rejected */
878					esiop_target->target_c.offset = 0;
879					esiop_target->target_c.period = 0;
880					esiop_target->target_c.status =
881					    TARST_OK;
882					siop_update_xfer_mode(&sc->sc_c,
883					    target);
884					/* no table to flush here */
885					CALL_SCRIPT(Ent_msgin_ack);
886					return 1;
887				} else if (msg == MSG_SIMPLE_Q_TAG ||
888				    msg == MSG_HEAD_OF_Q_TAG ||
889				    msg == MSG_ORDERED_Q_TAG) {
890					if (esiop_handle_qtag_reject(
891					    esiop_cmd) == -1)
892						goto reset;
893					CALL_SCRIPT(Ent_msgin_ack);
894					return 1;
895				}
896				if (xs)
897					scsipi_printaddr(xs->xs_periph);
898				else
899					printf("%s: ",
900					    device_xname(sc->sc_c.sc_dev));
901				if (msg == MSG_EXTENDED) {
902					printf("scsi message reject, extended "
903					    "message sent was 0x%x\n", extmsg);
904				} else {
905					printf("scsi message reject, message "
906					    "sent was 0x%x\n", msg);
907				}
908				/* no table to flush here */
909				CALL_SCRIPT(Ent_msgin_ack);
910				return 1;
911			}
912			if (msgin == MSG_IGN_WIDE_RESIDUE) {
913			/* use the extmsgdata table to get the second byte */
914				esiop_cmd->cmd_tables->t_extmsgdata.count =
915				    htole32(1);
916				esiop_table_sync(esiop_cmd,
917				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
918				CALL_SCRIPT(Ent_get_extmsgdata);
919				return 1;
920			}
921			if (xs)
922				scsipi_printaddr(xs->xs_periph);
923			else
924				printf("%s: ", device_xname(sc->sc_c.sc_dev));
925			printf("unhandled message 0x%x\n", msgin);
926			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
927			esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
928			esiop_table_sync(esiop_cmd,
929			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
930			CALL_SCRIPT(Ent_send_msgout);
931			return 1;
932		}
933		case A_int_extmsgin:
934#ifdef SIOP_DEBUG_INTR
935			printf("extended message: msg 0x%x len %d\n",
936			    esiop_cmd->cmd_tables->msg_in[2],
937			    esiop_cmd->cmd_tables->msg_in[1]);
938#endif
939			if (esiop_cmd->cmd_tables->msg_in[1] >
940			    sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
941				aprint_error_dev(sc->sc_c.sc_dev,
942				    "extended message too big (%d)\n",
943				    esiop_cmd->cmd_tables->msg_in[1]);
944			esiop_cmd->cmd_tables->t_extmsgdata.count =
945			    htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
946			esiop_table_sync(esiop_cmd,
947			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
948			CALL_SCRIPT(Ent_get_extmsgdata);
949			return 1;
950		case A_int_extmsgdata:
951#ifdef SIOP_DEBUG_INTR
952			{
953			int i;
954			printf("extended message: 0x%x, data:",
955			    esiop_cmd->cmd_tables->msg_in[2]);
956			for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
957			    i++)
958				printf(" 0x%x",
959				    esiop_cmd->cmd_tables->msg_in[i]);
960			printf("\n");
961			}
962#endif
963			if (esiop_cmd->cmd_tables->msg_in[0] ==
964			    MSG_IGN_WIDE_RESIDUE) {
965			/* we got the second byte of MSG_IGN_WIDE_RESIDUE */
966				if (esiop_cmd->cmd_tables->msg_in[3] != 1)
967					printf("MSG_IGN_WIDE_RESIDUE: "
968					     "bad len %d\n",
969					     esiop_cmd->cmd_tables->msg_in[3]);
970				switch (siop_iwr(&esiop_cmd->cmd_c)) {
971				case SIOP_NEG_MSGOUT:
972					esiop_table_sync(esiop_cmd,
973					    BUS_DMASYNC_PREREAD |
974					    BUS_DMASYNC_PREWRITE);
975					CALL_SCRIPT(Ent_send_msgout);
976					return 1;
977				case SIOP_NEG_ACK:
978					CALL_SCRIPT(Ent_msgin_ack);
979					return 1;
980				default:
981					panic("invalid retval from "
982					    "siop_iwr()");
983				}
984				return 1;
985			}
986			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
987				switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
988				case SIOP_NEG_MSGOUT:
989					esiop_update_scntl3(sc,
990					    esiop_cmd->cmd_c.siop_target);
991					esiop_table_sync(esiop_cmd,
992					    BUS_DMASYNC_PREREAD |
993					    BUS_DMASYNC_PREWRITE);
994					CALL_SCRIPT(Ent_send_msgout);
995					return 1;
996				case SIOP_NEG_ACK:
997					esiop_update_scntl3(sc,
998					    esiop_cmd->cmd_c.siop_target);
999					CALL_SCRIPT(Ent_msgin_ack);
1000					return 1;
1001				default:
1002					panic("invalid retval from "
1003					    "siop_wdtr_neg()");
1004				}
1005				return 1;
1006			}
1007			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1008				switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1009				case SIOP_NEG_MSGOUT:
1010					esiop_update_scntl3(sc,
1011					    esiop_cmd->cmd_c.siop_target);
1012					esiop_table_sync(esiop_cmd,
1013					    BUS_DMASYNC_PREREAD |
1014					    BUS_DMASYNC_PREWRITE);
1015					CALL_SCRIPT(Ent_send_msgout);
1016					return 1;
1017				case SIOP_NEG_ACK:
1018					esiop_update_scntl3(sc,
1019					    esiop_cmd->cmd_c.siop_target);
1020					CALL_SCRIPT(Ent_msgin_ack);
1021					return 1;
1022				default:
1023					panic("invalid retval from "
1024					    "siop_wdtr_neg()");
1025				}
1026				return 1;
1027			}
1028			if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1029				switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1030				case SIOP_NEG_MSGOUT:
1031					esiop_update_scntl3(sc,
1032					    esiop_cmd->cmd_c.siop_target);
1033					esiop_table_sync(esiop_cmd,
1034					    BUS_DMASYNC_PREREAD |
1035					    BUS_DMASYNC_PREWRITE);
1036					CALL_SCRIPT(Ent_send_msgout);
1037					return 1;
1038				case SIOP_NEG_ACK:
1039					esiop_update_scntl3(sc,
1040					    esiop_cmd->cmd_c.siop_target);
1041					CALL_SCRIPT(Ent_msgin_ack);
1042					return 1;
1043				default:
1044					panic("invalid retval from "
1045					    "siop_wdtr_neg()");
1046				}
1047				return 1;
1048			}
1049			/* send a message reject */
1050			esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1051			esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1052			esiop_table_sync(esiop_cmd,
1053			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1054			CALL_SCRIPT(Ent_send_msgout);
1055			return 1;
1056		case A_int_disc:
1057			INCSTAT(esiop_stat_intr_sdp);
1058			offset = bus_space_read_1(sc->sc_c.sc_rt,
1059			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1060#ifdef SIOP_DEBUG_DR
1061			printf("disconnect offset %d\n", offset);
1062#endif
1063			siop_sdp(&esiop_cmd->cmd_c, offset);
1064			/* we start again with no offset */
1065			ESIOP_XFER(esiop_cmd, saved_offset) =
1066			    htole32(SIOP_NOOFFSET);
1067			esiop_table_sync(esiop_cmd,
1068			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1069			CALL_SCRIPT(Ent_script_sched);
1070			return 1;
1071		case A_int_resfail:
1072			printf("reselect failed\n");
1073			CALL_SCRIPT(Ent_script_sched);
1074			return 1;
1075		case A_int_done:
1076			if (xs == NULL) {
1077				printf("%s: done without command\n",
1078				    device_xname(sc->sc_c.sc_dev));
1079				CALL_SCRIPT(Ent_script_sched);
1080				return 1;
1081			}
1082#ifdef SIOP_DEBUG_INTR
1083			printf("done, DSA=0x%lx target id 0x%x last msg "
1084			    "in=0x%x status=0x%x\n",
1085			    (u_long)esiop_cmd->cmd_c.dsa,
1086			    le32toh(esiop_cmd->cmd_tables->id),
1087			    esiop_cmd->cmd_tables->msg_in[0],
1088			    le32toh(esiop_cmd->cmd_tables->status));
1089#endif
1090			INCSTAT(esiop_stat_intr_done);
1091			esiop_cmd->cmd_c.status = CMDST_DONE;
1092			goto end;
1093		default:
1094			printf("unknown irqcode %x\n", irqcode);
1095			if (xs) {
1096				xs->error = XS_SELTIMEOUT;
1097				goto end;
1098			}
1099			goto reset;
1100		}
1101		return 1;
1102	}
1103	/* We just should't get there */
1104	panic("siop_intr: I shouldn't be there !");
1105
1106end:
1107	/*
1108	 * restart the script now if command completed properly
1109	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1110	 * queue
1111	 */
1112	xs->status = le32toh(esiop_cmd->cmd_tables->status);
1113#ifdef SIOP_DEBUG_INTR
1114	printf("esiop_intr end: status %d\n", xs->status);
1115#endif
1116	if (tag >= 0)
1117		esiop_lun->tactive[tag] = NULL;
1118	else
1119		esiop_lun->active = NULL;
1120	offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1121	    SIOP_SCRATCHA + 1);
1122	/*
1123	 * if we got a disconnect between the last data phase
1124	 * and the status phase, offset will be 0. In this
1125	 * case, cmd_tables->saved_offset will have the proper value
1126	 * if it got updated by the controller
1127	 */
1128	if (offset == 0 &&
1129	    ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1130		offset =
1131		    (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1132
1133	esiop_scsicmd_end(esiop_cmd, offset);
1134	if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1135		esiop_del_dev(sc, target, lun);
1136	CALL_SCRIPT(Ent_script_sched);
1137	return 1;
1138}
1139
1140void
1141esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset)
1142{
1143	struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1144	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1145
1146	siop_update_resid(&esiop_cmd->cmd_c, offset);
1147
1148	switch(xs->status) {
1149	case SCSI_OK:
1150		xs->error = XS_NOERROR;
1151		break;
1152	case SCSI_BUSY:
1153		xs->error = XS_BUSY;
1154		break;
1155	case SCSI_CHECK:
1156		xs->error = XS_BUSY;
1157		/* remove commands in the queue and scheduler */
1158		esiop_unqueue(sc, xs->xs_periph->periph_target,
1159		    xs->xs_periph->periph_lun);
1160		break;
1161	case SCSI_QUEUE_FULL:
1162		INCSTAT(esiop_stat_intr_qfull);
1163#ifdef SIOP_DEBUG
1164		printf("%s:%d:%d: queue full (tag %d)\n",
1165		    device_xname(sc->sc_c.sc_dev),
1166		    xs->xs_periph->periph_target,
1167		    xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1168#endif
1169		xs->error = XS_BUSY;
1170		break;
1171	case SCSI_SIOP_NOCHECK:
1172		/*
1173		 * don't check status, xs->error is already valid
1174		 */
1175		break;
1176	case SCSI_SIOP_NOSTATUS:
1177		/*
1178		 * the status byte was not updated, cmd was
1179		 * aborted
1180		 */
1181		xs->error = XS_SELTIMEOUT;
1182		break;
1183	default:
1184		scsipi_printaddr(xs->xs_periph);
1185		printf("invalid status code %d\n", xs->status);
1186		xs->error = XS_DRIVER_STUFFUP;
1187	}
1188	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1189		bus_dmamap_sync(sc->sc_c.sc_dmat,
1190		    esiop_cmd->cmd_c.dmamap_data, 0,
1191		    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1192		    (xs->xs_control & XS_CTL_DATA_IN) ?
1193		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1194		bus_dmamap_unload(sc->sc_c.sc_dmat,
1195		    esiop_cmd->cmd_c.dmamap_data);
1196	}
1197	bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1198	if ((xs->xs_control & XS_CTL_POLL) == 0)
1199		callout_stop(&xs->xs_callout);
1200	esiop_cmd->cmd_c.status = CMDST_FREE;
1201	TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1202#if 0
1203	if (xs->resid != 0)
1204		printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1205#endif
1206	scsipi_done (xs);
1207}
1208
1209void
1210esiop_checkdone(struct esiop_softc *sc)
1211{
1212	int target, lun, tag;
1213	struct esiop_target *esiop_target;
1214	struct esiop_lun *esiop_lun;
1215	struct esiop_cmd *esiop_cmd;
1216	uint32_t slot;
1217	int needsync = 0;
1218	int status;
1219	uint32_t sem, offset;
1220
1221	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1222	sem = esiop_script_read(sc, sc->sc_semoffset);
1223	esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1224	if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1225		/*
1226		 * at last one command have been started,
1227		 * so we should have free slots now
1228		 */
1229		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1230		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1231	}
1232	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1233
1234	if ((sem & A_sem_done) == 0) {
1235		/* no pending done command */
1236		return;
1237	}
1238
1239	bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1240	    sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
1241	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1242next:
1243	if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1244		if (needsync)
1245			bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1246			    sc->sc_done_offset,
1247			    A_ndone_slots * sizeof(uint32_t),
1248			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1249		return;
1250	}
1251
1252	needsync = 1;
1253
1254	slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1255	sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1256	sc->sc_currdoneslot += 1;
1257	if (sc->sc_currdoneslot == A_ndone_slots)
1258		sc->sc_currdoneslot = 0;
1259
1260	target =  (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1261	lun =  (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1262	tag =  (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1263
1264	esiop_target = (target >= 0) ?
1265	    (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1266	if (esiop_target == NULL) {
1267		printf("esiop_target (target %d) not valid\n", target);
1268		goto next;
1269	}
1270	esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1271	if (esiop_lun == NULL) {
1272		printf("esiop_lun (target %d lun %d) not valid\n",
1273		    target, lun);
1274		goto next;
1275	}
1276	esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1277	if (esiop_cmd == NULL) {
1278		printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1279		    target, lun, tag);
1280			goto next;
1281	}
1282
1283	esiop_table_sync(esiop_cmd,
1284	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1285	status = le32toh(esiop_cmd->cmd_tables->status);
1286#ifdef DIAGNOSTIC
1287	if (status != SCSI_OK) {
1288		printf("command for T/L/Q %d/%d/%d status %d\n",
1289		    target, lun, tag, status);
1290		goto next;
1291	}
1292
1293#endif
1294	/* Ok, this command has been handled */
1295	esiop_cmd->cmd_c.xs->status = status;
1296	if (tag >= 0)
1297		esiop_lun->tactive[tag] = NULL;
1298	else
1299		esiop_lun->active = NULL;
1300	/*
1301	 * scratcha was eventually saved in saved_offset by script.
1302	 * fetch offset from it
1303	 */
1304	offset = 0;
1305	if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1306		offset =
1307		    (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1308	esiop_scsicmd_end(esiop_cmd, offset);
1309	goto next;
1310}
1311
1312void
1313esiop_unqueue(struct esiop_softc *sc, int target, int lun)
1314{
1315	int slot, tag;
1316	uint32_t slotdsa;
1317	struct esiop_cmd *esiop_cmd;
1318	struct esiop_lun *esiop_lun =
1319	    ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1320
1321	/* first make sure to read valid data */
1322	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1323
1324	for (tag = 0; tag < ESIOP_NTAG; tag++) {
1325		/* look for commands in the scheduler, not yet started */
1326		if (esiop_lun->tactive[tag] == NULL)
1327			continue;
1328		esiop_cmd = esiop_lun->tactive[tag];
1329		for (slot = 0; slot < A_ncmd_slots; slot++) {
1330			slotdsa = esiop_script_read(sc,
1331			    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1332			/* if the slot has any flag, it won't match the DSA */
1333			if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1334				/* Mark this slot as ignore */
1335				esiop_script_write(sc,
1336				    sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1337				    esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1338				/* ask to requeue */
1339				esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1340				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1341				esiop_lun->tactive[tag] = NULL;
1342				esiop_scsicmd_end(esiop_cmd, 0);
1343				break;
1344			}
1345		}
1346	}
1347	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1348}
1349
1350/*
1351 * handle a rejected queue tag message: the command will run untagged,
1352 * has to adjust the reselect script.
1353 */
1354
1355
1356int
1357esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd)
1358{
1359	struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1360	int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1361	int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1362	int tag = esiop_cmd->cmd_tables->msg_out[2];
1363	struct esiop_target *esiop_target =
1364	    (struct esiop_target*)sc->sc_c.targets[target];
1365	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1366
1367#ifdef SIOP_DEBUG
1368	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1369	    device_xname(sc->sc_c.sc_dev), target, lun, tag,
1370	    esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status);
1371#endif
1372
1373	if (esiop_lun->active != NULL) {
1374		aprint_error_dev(sc->sc_c.sc_dev,
1375		    "untagged command already running for target %d "
1376		    "lun %d (status %d)\n",
1377		    target, lun, esiop_lun->active->cmd_c.status);
1378		return -1;
1379	}
1380	/* clear tag slot */
1381	esiop_lun->tactive[tag] = NULL;
1382	/* add command to non-tagged slot */
1383	esiop_lun->active = esiop_cmd;
1384	esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1385	esiop_cmd->cmd_c.tag = -1;
1386	/* update DSA table */
1387	esiop_script_write(sc, esiop_target->lun_table_offset +
1388	    lun * 2 + A_target_luntbl / sizeof(uint32_t),
1389	    esiop_cmd->cmd_c.dsa);
1390	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1391	return 0;
1392}
1393
1394/*
1395 * handle a bus reset: reset chip, unqueue all active commands, free all
1396 * target struct and report lossage to upper layer.
1397 * As the upper layer may requeue immediatly we have to first store
1398 * all active commands in a temporary queue.
1399 */
1400void
1401esiop_handle_reset(struct esiop_softc *sc)
1402{
1403	struct esiop_cmd *esiop_cmd;
1404	struct esiop_lun *esiop_lun;
1405	int target, lun, tag;
1406	/*
1407	 * scsi bus reset. reset the chip and restart
1408	 * the queue. Need to clean up all active commands
1409	 */
1410	printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1411	/* stop, reset and restart the chip */
1412	esiop_reset(sc);
1413
1414	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1415		/* chip has been reset, all slots are free now */
1416		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1417		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1418	}
1419	/*
1420	 * Process all commands: first commands completes, then commands
1421	 * being executed
1422	 */
1423	esiop_checkdone(sc);
1424	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1425		struct esiop_target *esiop_target =
1426		    (struct esiop_target *)sc->sc_c.targets[target];
1427		if (esiop_target == NULL)
1428			continue;
1429		for (lun = 0; lun < 8; lun++) {
1430			esiop_lun = esiop_target->esiop_lun[lun];
1431			if (esiop_lun == NULL)
1432				continue;
1433			for (tag = -1; tag <
1434			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1435			    ESIOP_NTAG : 0);
1436			    tag++) {
1437				if (tag >= 0)
1438					esiop_cmd = esiop_lun->tactive[tag];
1439				else
1440					esiop_cmd = esiop_lun->active;
1441				if (esiop_cmd == NULL)
1442					continue;
1443				scsipi_printaddr(
1444				    esiop_cmd->cmd_c.xs->xs_periph);
1445				printf("command with tag id %d reset\n", tag);
1446				esiop_cmd->cmd_c.xs->error =
1447				    (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1448				    XS_TIMEOUT : XS_RESET;
1449				esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1450				if (tag >= 0)
1451					esiop_lun->tactive[tag] = NULL;
1452				else
1453					esiop_lun->active = NULL;
1454				esiop_cmd->cmd_c.status = CMDST_DONE;
1455				esiop_scsicmd_end(esiop_cmd, 0);
1456			}
1457		}
1458		sc->sc_c.targets[target]->status = TARST_ASYNC;
1459		sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1460		sc->sc_c.targets[target]->period =
1461		    sc->sc_c.targets[target]->offset = 0;
1462		siop_update_xfer_mode(&sc->sc_c, target);
1463	}
1464
1465	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1466}
1467
1468void
1469esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1470    void *arg)
1471{
1472	struct scsipi_xfer *xs;
1473	struct scsipi_periph *periph;
1474	struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1475	struct esiop_cmd *esiop_cmd;
1476	struct esiop_target *esiop_target;
1477	int s, error, i;
1478	int target;
1479	int lun;
1480
1481	switch (req) {
1482	case ADAPTER_REQ_RUN_XFER:
1483		xs = arg;
1484		periph = xs->xs_periph;
1485		target = periph->periph_target;
1486		lun = periph->periph_lun;
1487
1488		s = splbio();
1489		/*
1490		 * first check if there are pending complete commands.
1491		 * this can free us some resources (in the rings for example).
1492		 * we have to lock it to avoid recursion.
1493		 */
1494		if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1495			sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1496			esiop_checkdone(sc);
1497			sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1498		}
1499#ifdef SIOP_DEBUG_SCHED
1500		printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1501		    xs->xs_tag_type, xs->xs_tag_id);
1502#endif
1503		esiop_cmd = TAILQ_FIRST(&sc->free_list);
1504		if (esiop_cmd == NULL) {
1505			xs->error = XS_RESOURCE_SHORTAGE;
1506			scsipi_done(xs);
1507			splx(s);
1508			return;
1509		}
1510		TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1511#ifdef DIAGNOSTIC
1512		if (esiop_cmd->cmd_c.status != CMDST_FREE)
1513			panic("siop_scsicmd: new cmd not free");
1514#endif
1515		esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1516		if (esiop_target == NULL) {
1517#ifdef SIOP_DEBUG
1518			printf("%s: alloc siop_target for target %d\n",
1519				device_xname(sc->sc_c.sc_dev), target);
1520#endif
1521			sc->sc_c.targets[target] =
1522			    malloc(sizeof(struct esiop_target),
1523				M_DEVBUF, M_NOWAIT | M_ZERO);
1524			if (sc->sc_c.targets[target] == NULL) {
1525				aprint_error_dev(sc->sc_c.sc_dev,
1526				    "can't malloc memory for "
1527				    "target %d\n",
1528				    target);
1529				xs->error = XS_RESOURCE_SHORTAGE;
1530				scsipi_done(xs);
1531				TAILQ_INSERT_TAIL(&sc->free_list,
1532				    esiop_cmd, next);
1533				splx(s);
1534				return;
1535			}
1536			esiop_target =
1537			    (struct esiop_target*)sc->sc_c.targets[target];
1538			esiop_target->target_c.status = TARST_PROBING;
1539			esiop_target->target_c.flags = 0;
1540			esiop_target->target_c.id =
1541			    sc->sc_c.clock_div << 24; /* scntl3 */
1542			esiop_target->target_c.id |=  target << 16; /* id */
1543			/* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1544
1545			for (i=0; i < 8; i++)
1546				esiop_target->esiop_lun[i] = NULL;
1547			esiop_target_register(sc, target);
1548		}
1549		if (esiop_target->esiop_lun[lun] == NULL) {
1550			esiop_target->esiop_lun[lun] =
1551			    malloc(sizeof(struct esiop_lun), M_DEVBUF,
1552			    M_NOWAIT|M_ZERO);
1553			if (esiop_target->esiop_lun[lun] == NULL) {
1554				aprint_error_dev(sc->sc_c.sc_dev,
1555				    "can't alloc esiop_lun for "
1556				    "target %d lun %d\n",
1557				    target, lun);
1558				xs->error = XS_RESOURCE_SHORTAGE;
1559				scsipi_done(xs);
1560				TAILQ_INSERT_TAIL(&sc->free_list,
1561				    esiop_cmd, next);
1562				splx(s);
1563				return;
1564			}
1565		}
1566		esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1567		esiop_cmd->cmd_c.xs = xs;
1568		esiop_cmd->cmd_c.flags = 0;
1569		esiop_cmd->cmd_c.status = CMDST_READY;
1570
1571		/* load the DMA maps */
1572		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1573		    esiop_cmd->cmd_c.dmamap_cmd,
1574		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1575		if (error) {
1576			aprint_error_dev(sc->sc_c.sc_dev,
1577			    "unable to load cmd DMA map: %d\n",
1578			    error);
1579			xs->error = (error == EAGAIN) ?
1580			    XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1581			scsipi_done(xs);
1582			esiop_cmd->cmd_c.status = CMDST_FREE;
1583			TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1584			splx(s);
1585			return;
1586		}
1587		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1588			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1589			    esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1590			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1591			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1592			     BUS_DMA_READ : BUS_DMA_WRITE));
1593			if (error) {
1594				aprint_error_dev(sc->sc_c.sc_dev,
1595				    "unable to load data DMA map: %d\n",
1596				    error);
1597				xs->error = (error == EAGAIN) ?
1598				    XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1599				scsipi_done(xs);
1600				bus_dmamap_unload(sc->sc_c.sc_dmat,
1601				    esiop_cmd->cmd_c.dmamap_cmd);
1602				esiop_cmd->cmd_c.status = CMDST_FREE;
1603				TAILQ_INSERT_TAIL(&sc->free_list,
1604				    esiop_cmd, next);
1605				splx(s);
1606				return;
1607			}
1608			bus_dmamap_sync(sc->sc_c.sc_dmat,
1609			    esiop_cmd->cmd_c.dmamap_data, 0,
1610			    esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1611			    (xs->xs_control & XS_CTL_DATA_IN) ?
1612			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1613		}
1614		bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1615		    0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1616		    BUS_DMASYNC_PREWRITE);
1617
1618		if (xs->xs_tag_type)
1619			esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1620		else
1621			esiop_cmd->cmd_c.tag = -1;
1622		siop_setuptables(&esiop_cmd->cmd_c);
1623		ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1624		ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1625		ESIOP_XFER(esiop_cmd, tlq) |=
1626		    htole32((target << 8) | (lun << 16));
1627		if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1628			ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1629			ESIOP_XFER(esiop_cmd, tlq) |=
1630			    htole32(esiop_cmd->cmd_c.tag << 24);
1631		}
1632
1633		esiop_table_sync(esiop_cmd,
1634		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1635		esiop_start(sc, esiop_cmd);
1636		if (xs->xs_control & XS_CTL_POLL) {
1637			/* poll for command completion */
1638			while ((xs->xs_status & XS_STS_DONE) == 0) {
1639				delay(1000);
1640				esiop_intr(sc);
1641			}
1642		}
1643		splx(s);
1644		return;
1645
1646	case ADAPTER_REQ_GROW_RESOURCES:
1647#ifdef SIOP_DEBUG
1648		printf("%s grow resources (%d)\n",
1649		    device_xname(sc->sc_c.sc_dev),
1650		    sc->sc_c.sc_adapt.adapt_openings);
1651#endif
1652		esiop_morecbd(sc);
1653		return;
1654
1655	case ADAPTER_REQ_SET_XFER_MODE:
1656	    {
1657		struct scsipi_xfer_mode *xm = arg;
1658		if (sc->sc_c.targets[xm->xm_target] == NULL)
1659			return;
1660		s = splbio();
1661		if (xm->xm_mode & PERIPH_CAP_TQING) {
1662			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1663			/* allocate tag tables for this device */
1664			for (lun = 0;
1665			    lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1666				if (scsipi_lookup_periph(chan,
1667				    xm->xm_target, lun) != NULL)
1668					esiop_add_dev(sc, xm->xm_target, lun);
1669			}
1670		}
1671		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1672		    (sc->sc_c.features & SF_BUS_WIDE))
1673			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1674		if (xm->xm_mode & PERIPH_CAP_SYNC)
1675			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1676		if ((xm->xm_mode & PERIPH_CAP_DT) &&
1677		    (sc->sc_c.features & SF_CHIP_DT))
1678			sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1679		if ((xm->xm_mode &
1680		    (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1681		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1682			sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1683
1684		splx(s);
1685	    }
1686	}
1687}
1688
1689static void
1690esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd)
1691{
1692	struct esiop_lun *esiop_lun;
1693	struct esiop_target *esiop_target;
1694	int timeout;
1695	int target, lun, slot;
1696
1697	/*
1698	 * first make sure to read valid data
1699	 */
1700	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1701
1702	/*
1703	 * We use a circular queue here. sc->sc_currschedslot points to a
1704	 * free slot, unless we have filled the queue. Check this.
1705	 */
1706	slot = sc->sc_currschedslot;
1707	if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1708	    A_f_cmd_free) == 0) {
1709		/*
1710		 * no more free slot, no need to continue. freeze the queue
1711		 * and requeue this command.
1712		 */
1713		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1714		sc->sc_flags |= SCF_CHAN_NOSLOT;
1715		esiop_script_write(sc, sc->sc_semoffset,
1716		    esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1717		esiop_script_sync(sc,
1718		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1719		esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1720		esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1721		esiop_scsicmd_end(esiop_cmd, 0);
1722		return;
1723	}
1724	/* OK, we can use this slot */
1725
1726	target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1727	lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1728	esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1729	esiop_lun = esiop_target->esiop_lun[lun];
1730	/* if non-tagged command active, panic: this shouldn't happen */
1731	if (esiop_lun->active != NULL) {
1732		panic("esiop_start: tagged cmd while untagged running");
1733	}
1734#ifdef DIAGNOSTIC
1735	/* sanity check the tag if needed */
1736	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1737		if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1738		    esiop_cmd->cmd_c.tag < 0) {
1739			scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1740			printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1741			panic("esiop_start: invalid tag id");
1742		}
1743		if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1744			panic("esiop_start: tag not free");
1745	}
1746#endif
1747#ifdef SIOP_DEBUG_SCHED
1748	printf("using slot %d for DSA 0x%lx\n", slot,
1749	    (u_long)esiop_cmd->cmd_c.dsa);
1750#endif
1751	/* mark command as active */
1752	if (esiop_cmd->cmd_c.status == CMDST_READY)
1753		esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1754	else
1755		panic("esiop_start: bad status");
1756	/* DSA table for reselect */
1757	if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1758		esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1759		/* DSA table for reselect */
1760		esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1761		    htole32(esiop_cmd->cmd_c.dsa);
1762		bus_dmamap_sync(sc->sc_c.sc_dmat,
1763		    esiop_lun->lun_tagtbl->tblblk->blkmap,
1764		    esiop_lun->lun_tagtbl->tbl_offset,
1765		    sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1766	} else {
1767		esiop_lun->active = esiop_cmd;
1768		esiop_script_write(sc,
1769		    esiop_target->lun_table_offset +
1770		    lun * 2 + A_target_luntbl / sizeof(uint32_t),
1771		    esiop_cmd->cmd_c.dsa);
1772	}
1773	/* scheduler slot: DSA */
1774	esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1775	    esiop_cmd->cmd_c.dsa);
1776	/* make sure SCRIPT processor will read valid data */
1777	esiop_script_sync(sc, BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1778	/* handle timeout */
1779	if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1780		/* start exire timer */
1781		timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1782		if (timeout == 0)
1783			timeout = 1;
1784		callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1785		    timeout, esiop_timeout, esiop_cmd);
1786	}
1787	/* Signal script it has some work to do */
1788	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1789	    SIOP_ISTAT, ISTAT_SIGP);
1790	/* update the current slot, and wait for IRQ */
1791	sc->sc_currschedslot++;
1792	if (sc->sc_currschedslot >= A_ncmd_slots)
1793		sc->sc_currschedslot = 0;
1794}
1795
1796void
1797esiop_timeout(void *v)
1798{
1799	struct esiop_cmd *esiop_cmd = v;
1800	struct esiop_softc *sc =
1801	    (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1802	int s;
1803#ifdef SIOP_DEBUG
1804	int slot, slotdsa;
1805#endif
1806
1807	s = splbio();
1808	esiop_table_sync(esiop_cmd,
1809	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1810	scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1811#ifdef SIOP_DEBUG
1812	printf("command timeout (status %d)\n",
1813	    le32toh(esiop_cmd->cmd_tables->status));
1814
1815	esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1816	for (slot = 0; slot < A_ncmd_slots; slot++) {
1817		slotdsa = esiop_script_read(sc,
1818		    sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1819		if ((slotdsa & 0x01) == 0)
1820			printf("slot %d not free (0x%x)\n", slot, slotdsa);
1821	}
1822	printf("istat 0x%x ",
1823	    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1824	printf("DSP 0x%lx DSA 0x%x\n",
1825	    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP)
1826	    - sc->sc_c.sc_scriptaddr),
1827	    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1828	(void)bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1829	printf("istat 0x%x\n",
1830	    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1831#else
1832	printf("command timeout, CDB: ");
1833	scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1834	printf("\n");
1835#endif
1836	/* reset the scsi bus */
1837	siop_resetbus(&sc->sc_c);
1838
1839	/* deactivate callout */
1840	callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1841	/*
1842	 * mark command has being timed out and just return;
1843	 * the bus reset will generate an interrupt,
1844	 * it will be handled in siop_intr()
1845	 */
1846	esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1847	splx(s);
1848}
1849
1850void
1851esiop_dump_script(struct esiop_softc *sc)
1852{
1853	int i;
1854
1855	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1856		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1857		    esiop_script_read(sc, i),
1858		    esiop_script_read(sc, i + 1));
1859		if ((esiop_script_read(sc, i) & 0xe0000000) == 0xc0000000) {
1860			i++;
1861			printf(" 0x%08x", esiop_script_read(sc, i + 1));
1862		}
1863		printf("\n");
1864	}
1865}
1866
1867void
1868esiop_morecbd(struct esiop_softc *sc)
1869{
1870	int error, i, s;
1871	bus_dma_segment_t seg;
1872	int rseg;
1873	struct esiop_cbd *newcbd;
1874	struct esiop_xfer *xfer;
1875	bus_addr_t dsa;
1876
1877	/* allocate a new list head */
1878	newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1879	if (newcbd == NULL) {
1880		aprint_error_dev(sc->sc_c.sc_dev,
1881		    "can't allocate memory for command descriptors "
1882		    "head\n");
1883		return;
1884	}
1885
1886	/* allocate cmd list */
1887	newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1888	    M_DEVBUF, M_NOWAIT|M_ZERO);
1889	if (newcbd->cmds == NULL) {
1890		aprint_error_dev(sc->sc_c.sc_dev,
1891		    "can't allocate memory for command descriptors\n");
1892		goto bad3;
1893	}
1894	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1895	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
1896	if (error) {
1897		aprint_error_dev(sc->sc_c.sc_dev,
1898		    "unable to allocate cbd DMA memory, error = %d\n",
1899		    error);
1900		goto bad2;
1901	}
1902	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1903	    (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1904	if (error) {
1905		aprint_error_dev(sc->sc_c.sc_dev,
1906		    "unable to map cbd DMA memory, error = %d\n",
1907		    error);
1908		goto bad2;
1909	}
1910	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1911	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1912	if (error) {
1913		aprint_error_dev(sc->sc_c.sc_dev,
1914		    "unable to create cbd DMA map, error = %d\n", error);
1915		goto bad1;
1916	}
1917	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1918	    newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1919	if (error) {
1920		aprint_error_dev(sc->sc_c.sc_dev,
1921		    "unable to load cbd DMA map, error = %d\n", error);
1922		goto bad0;
1923	}
1924#ifdef SIOP_DEBUG
1925	aprint_debug_dev(sc->sc_c.sc_dev, "alloc newcdb at PHY addr 0x%lx\n",
1926	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1927#endif
1928	for (i = 0; i < SIOP_NCMDPB; i++) {
1929		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1930		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1931		    &newcbd->cmds[i].cmd_c.dmamap_data);
1932		if (error) {
1933			aprint_error_dev(sc->sc_c.sc_dev,
1934			    "unable to create data DMA map for cbd: "
1935			    "error %d\n", error);
1936			goto bad0;
1937		}
1938		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1939		    sizeof(struct scsipi_generic), 1,
1940		    sizeof(struct scsipi_generic), 0,
1941		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1942		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1943		if (error) {
1944			aprint_error_dev(sc->sc_c.sc_dev,
1945			    "unable to create cmd DMA map for cbd %d\n", error);
1946			goto bad0;
1947		}
1948		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1949		newcbd->cmds[i].esiop_cbdp = newcbd;
1950		xfer = &newcbd->xfers[i];
1951		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1952		memset(newcbd->cmds[i].cmd_tables, 0,
1953		    sizeof(struct esiop_xfer));
1954		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1955		    i * sizeof(struct esiop_xfer);
1956		newcbd->cmds[i].cmd_c.dsa = dsa;
1957		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1958		xfer->siop_tables.t_msgout.count= htole32(1);
1959		xfer->siop_tables.t_msgout.addr = htole32(dsa);
1960		xfer->siop_tables.t_msgin.count= htole32(1);
1961		xfer->siop_tables.t_msgin.addr = htole32(dsa +
1962		    offsetof(struct siop_common_xfer, msg_in));
1963		xfer->siop_tables.t_extmsgin.count= htole32(2);
1964		xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1965		    offsetof(struct siop_common_xfer, msg_in) + 1);
1966		xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1967		    offsetof(struct siop_common_xfer, msg_in) + 3);
1968		xfer->siop_tables.t_status.count= htole32(1);
1969		xfer->siop_tables.t_status.addr = htole32(dsa +
1970		    offsetof(struct siop_common_xfer, status));
1971
1972		s = splbio();
1973		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1974		splx(s);
1975#ifdef SIOP_DEBUG
1976		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1977		    le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1978		    le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1979		    le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1980#endif
1981	}
1982	s = splbio();
1983	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1984	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1985	splx(s);
1986	return;
1987bad0:
1988	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1989	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1990bad1:
1991	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1992bad2:
1993	free(newcbd->cmds, M_DEVBUF);
1994bad3:
1995	free(newcbd, M_DEVBUF);
1996}
1997
1998void
1999esiop_moretagtbl(struct esiop_softc *sc)
2000{
2001	int error, i, j, s;
2002	bus_dma_segment_t seg;
2003	int rseg;
2004	struct esiop_dsatblblk *newtblblk;
2005	struct esiop_dsatbl *newtbls;
2006	uint32_t *tbls;
2007
2008	/* allocate a new list head */
2009	newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2010	    M_DEVBUF, M_NOWAIT|M_ZERO);
2011	if (newtblblk == NULL) {
2012		aprint_error_dev(sc->sc_c.sc_dev,
2013		    "can't allocate memory for tag DSA table block\n");
2014		return;
2015	}
2016
2017	/* allocate tbl list */
2018	newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2019	    M_DEVBUF, M_NOWAIT|M_ZERO);
2020	if (newtbls == NULL) {
2021		aprint_error_dev(sc->sc_c.sc_dev,
2022		    "can't allocate memory for command descriptors\n");
2023		goto bad3;
2024	}
2025	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2026	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
2027	if (error) {
2028		aprint_error_dev(sc->sc_c.sc_dev,
2029		    "unable to allocate tbl DMA memory, error = %d\n", error);
2030		goto bad2;
2031	}
2032	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2033	    (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2034	if (error) {
2035		aprint_error_dev(sc->sc_c.sc_dev,
2036		    "unable to map tbls DMA memory, error = %d\n", error);
2037		goto bad2;
2038	}
2039	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2040	    BUS_DMA_NOWAIT, &newtblblk->blkmap);
2041	if (error) {
2042		aprint_error_dev(sc->sc_c.sc_dev,
2043		    "unable to create tbl DMA map, error = %d\n", error);
2044		goto bad1;
2045	}
2046	error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2047	    tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2048	if (error) {
2049		aprint_error_dev(sc->sc_c.sc_dev,
2050		    "unable to load tbl DMA map, error = %d\n", error);
2051		goto bad0;
2052	}
2053#ifdef SIOP_DEBUG
2054	printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2055	    device_xname(sc->sc_c.sc_dev),
2056	    (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2057#endif
2058	for (i = 0; i < ESIOP_NTPB; i++) {
2059		newtbls[i].tblblk = newtblblk;
2060		newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2061		newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t);
2062		newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2063		    newtbls[i].tbl_offset;
2064		for (j = 0; j < ESIOP_NTAG; j++)
2065			newtbls[i].tbl[j] = j;
2066		s = splbio();
2067		TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2068		splx(s);
2069	}
2070	s = splbio();
2071	TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2072	splx(s);
2073	return;
2074bad0:
2075	bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2076	bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2077bad1:
2078	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2079bad2:
2080	free(newtbls, M_DEVBUF);
2081bad3:
2082	free(newtblblk, M_DEVBUF);
2083}
2084
2085void
2086esiop_update_scntl3(struct esiop_softc *sc,
2087    struct siop_common_target *_siop_target)
2088{
2089	struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2090
2091	esiop_script_write(sc, esiop_target->lun_table_offset,
2092	    esiop_target->target_c.id);
2093	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2094}
2095
2096void
2097esiop_add_dev(struct esiop_softc *sc, int target, int lun)
2098{
2099	struct esiop_target *esiop_target =
2100	    (struct esiop_target *)sc->sc_c.targets[target];
2101	struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2102
2103	if (esiop_lun->lun_tagtbl != NULL)
2104		return; /* already allocated */
2105
2106	/* we need a tag DSA table */
2107	esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2108	if (esiop_lun->lun_tagtbl == NULL) {
2109		esiop_moretagtbl(sc);
2110		esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2111		if (esiop_lun->lun_tagtbl == NULL) {
2112			/* no resources, run untagged */
2113			esiop_target->target_c.flags &= ~TARF_TAG;
2114			return;
2115		}
2116	}
2117	TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2118	/* Update LUN DSA table */
2119	esiop_script_write(sc, esiop_target->lun_table_offset +
2120	   lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2121	    esiop_lun->lun_tagtbl->tbl_dsa);
2122	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2123}
2124
2125void
2126esiop_del_dev(struct esiop_softc *sc, int target, int lun)
2127{
2128	struct esiop_target *esiop_target;
2129
2130#ifdef SIOP_DEBUG
2131		printf("%s:%d:%d: free lun sw entry\n",
2132		    device_xname(sc->sc_c.sc_dev), target, lun);
2133#endif
2134	if (sc->sc_c.targets[target] == NULL)
2135		return;
2136	esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2137	free(esiop_target->esiop_lun[lun], M_DEVBUF);
2138	esiop_target->esiop_lun[lun] = NULL;
2139}
2140
2141void
2142esiop_target_register(struct esiop_softc *sc, uint32_t target)
2143{
2144	struct esiop_target *esiop_target =
2145	    (struct esiop_target *)sc->sc_c.targets[target];
2146	struct esiop_lun *esiop_lun;
2147	int lun;
2148
2149	/* get a DSA table for this target */
2150	esiop_target->lun_table_offset = sc->sc_free_offset;
2151	sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2152#ifdef SIOP_DEBUG
2153	printf("%s: lun table for target %d offset %d free offset %d\n",
2154	    device_xname(sc->sc_c.sc_dev), target,
2155	    esiop_target->lun_table_offset,
2156	    sc->sc_free_offset);
2157#endif
2158	/* first 32 bytes are ID (for select) */
2159	esiop_script_write(sc, esiop_target->lun_table_offset,
2160	    esiop_target->target_c.id);
2161	/* Record this table in the target DSA table */
2162	esiop_script_write(sc,
2163	    sc->sc_target_table_offset + target,
2164	    (esiop_target->lun_table_offset * sizeof(uint32_t)) +
2165	    sc->sc_c.sc_scriptaddr);
2166	/* if we have a tag table, register it */
2167	for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2168		esiop_lun = esiop_target->esiop_lun[lun];
2169		if (esiop_lun == NULL)
2170			continue;
2171		if (esiop_lun->lun_tagtbl)
2172			esiop_script_write(sc, esiop_target->lun_table_offset +
2173			   lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2174			    esiop_lun->lun_tagtbl->tbl_dsa);
2175	}
2176	esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2177}
2178
2179#ifdef SIOP_STATS
2180void
2181esiop_printstats(void)
2182{
2183
2184	printf("esiop_stat_intr %d\n", esiop_stat_intr);
2185	printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2186	printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2187	printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2188	printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2189	printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2190	printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2191}
2192#endif
2193