siop.c revision 1.56
1/*	$NetBSD: siop.c,v 1.56 2002/04/23 10:38:37 bouyer Exp $	*/
2
3/*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33/* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.56 2002/04/23 10:38:37 bouyer Exp $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/device.h>
41#include <sys/malloc.h>
42#include <sys/buf.h>
43#include <sys/kernel.h>
44
45#include <uvm/uvm_extern.h>
46
47#include <machine/endian.h>
48#include <machine/bus.h>
49
50#include <dev/microcode/siop/siop.out>
51
52#include <dev/scsipi/scsi_all.h>
53#include <dev/scsipi/scsi_message.h>
54#include <dev/scsipi/scsipi_all.h>
55
56#include <dev/scsipi/scsiconf.h>
57
58#include <dev/ic/siopreg.h>
59#include <dev/ic/siopvar_common.h>
60#include <dev/ic/siopvar.h>
61
62#include "opt_siop.h"
63
64#ifndef DEBUG
65#undef DEBUG
66#endif
67#undef SIOP_DEBUG
68#undef SIOP_DEBUG_DR
69#undef SIOP_DEBUG_INTR
70#undef SIOP_DEBUG_SCHED
71#undef DUMP_SCRIPT
72
73#define SIOP_STATS
74
75#ifndef SIOP_DEFAULT_TARGET
76#define SIOP_DEFAULT_TARGET 7
77#endif
78
79/* number of cmd descriptors per block */
80#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82/* Number of scheduler slot (needs to match script) */
83#define SIOP_NSLOTS 40
84
85void	siop_reset __P((struct siop_softc *));
86void	siop_handle_reset __P((struct siop_softc *));
87int	siop_handle_qtag_reject __P((struct siop_cmd *));
88void	siop_scsicmd_end __P((struct siop_cmd *));
89void	siop_unqueue __P((struct siop_softc *, int, int));
90static void	siop_start __P((struct siop_softc *, struct siop_cmd *));
91void 	siop_timeout __P((void *));
92int	siop_scsicmd __P((struct scsipi_xfer *));
93void	siop_scsipi_request __P((struct scsipi_channel *,
94			scsipi_adapter_req_t, void *));
95void	siop_dump_script __P((struct siop_softc *));
96void	siop_morecbd __P((struct siop_softc *));
97struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98void	siop_add_reselsw __P((struct siop_softc *, int));
99void	siop_update_scntl3 __P((struct siop_softc *,
100			struct siop_common_target *));
101
102#ifdef SIOP_STATS
103static int siop_stat_intr = 0;
104static int siop_stat_intr_shortxfer = 0;
105static int siop_stat_intr_sdp = 0;
106static int siop_stat_intr_done = 0;
107static int siop_stat_intr_xferdisc = 0;
108static int siop_stat_intr_lunresel = 0;
109static int siop_stat_intr_qfull = 0;
110void siop_printstats __P((void));
111#define INCSTAT(x) x++
112#else
113#define INCSTAT(x)
114#endif
115
116static __inline__ void siop_script_sync __P((struct siop_softc *, int));
117static __inline__ void
118siop_script_sync(sc, ops)
119	struct siop_softc *sc;
120	int ops;
121{
122	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124		    PAGE_SIZE, ops);
125}
126
127static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
128static __inline__ u_int32_t
129siop_script_read(sc, offset)
130	struct siop_softc *sc;
131	u_int offset;
132{
133	if (sc->sc_c.features & SF_CHIP_RAM) {
134		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135		    offset * 4);
136	} else {
137		return le32toh(sc->sc_c.sc_script[offset]);
138	}
139}
140
141static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
142	u_int32_t));
143static __inline__ void
144siop_script_write(sc, offset, val)
145	struct siop_softc *sc;
146	u_int offset;
147	u_int32_t val;
148{
149	if (sc->sc_c.features & SF_CHIP_RAM) {
150		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151		    offset * 4, val);
152	} else {
153		sc->sc_c.sc_script[offset] = htole32(val);
154	}
155}
156
157void
158siop_attach(sc)
159	struct siop_softc *sc;
160{
161	int error, i;
162	bus_dma_segment_t seg;
163	int rseg;
164
165	/*
166	 * Allocate DMA-safe memory for the script and map it.
167	 */
168	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
169		error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
170		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
171		if (error) {
172			printf("%s: unable to allocate script DMA memory, "
173			    "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
174			return;
175		}
176		error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
177		    (caddr_t *)&sc->sc_c.sc_script,
178		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
179		if (error) {
180			printf("%s: unable to map script DMA memory, "
181			    "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
182			return;
183		}
184		error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
185		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
186		if (error) {
187			printf("%s: unable to create script DMA map, "
188			    "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
189			return;
190		}
191		error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
192		    sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
193		if (error) {
194			printf("%s: unable to load script DMA map, "
195			    "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
196			return;
197		}
198		sc->sc_c.sc_scriptaddr =
199		    sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
200		sc->sc_c.ram_size = PAGE_SIZE;
201	}
202	TAILQ_INIT(&sc->free_list);
203	TAILQ_INIT(&sc->cmds);
204	TAILQ_INIT(&sc->lunsw_list);
205	sc->sc_currschedslot = 0;
206#ifdef SIOP_DEBUG
207	printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
208	    sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
209	    (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
210#endif
211
212	sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
213	sc->sc_c.sc_adapt.adapt_nchannels = 1;
214	sc->sc_c.sc_adapt.adapt_openings = 0;
215	sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
216	sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
217	sc->sc_c.sc_adapt.adapt_minphys = minphys;
218	sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
219
220	memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
221	sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
222	sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
223	sc->sc_c.sc_chan.chan_channel = 0;
224	sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
225	sc->sc_c.sc_chan.chan_ntargets =
226	    (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
227	sc->sc_c.sc_chan.chan_nluns = 8;
228	sc->sc_c.sc_chan.chan_id =
229	    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
230	if (sc->sc_c.sc_chan.chan_id == 0 ||
231	    sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
232		sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
233
234	for (i = 0; i < 16; i++)
235		sc->sc_c.targets[i] = NULL;
236
237	/* find min/max sync period for this chip */
238	sc->sc_c.maxsync = 0;
239	sc->sc_c.minsync = 255;
240	for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
241		if (sc->sc_c.clock_period != scf_period[i].clock)
242			continue;
243		if (sc->sc_c.maxsync < scf_period[i].period)
244			sc->sc_c.maxsync = scf_period[i].period;
245		if (sc->sc_c.minsync > scf_period[i].period)
246			sc->sc_c.minsync = scf_period[i].period;
247	}
248	if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
249		panic("siop: can't find my sync parameters\n");
250	/* Do a bus reset, so that devices fall back to narrow/async */
251	siop_resetbus(&sc->sc_c);
252	/*
253	 * siop_reset() will reset the chip, thus clearing pending interrupts
254	 */
255	siop_reset(sc);
256#ifdef DUMP_SCRIPT
257	siop_dump_script(sc);
258#endif
259
260	config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
261}
262
263void
264siop_reset(sc)
265	struct siop_softc *sc;
266{
267	int i, j;
268	struct siop_lunsw *lunsw;
269
270	siop_common_reset(&sc->sc_c);
271
272	/* copy and patch the script */
273	if (sc->sc_c.features & SF_CHIP_RAM) {
274		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
275		    siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
276		for (j = 0; j <
277		    (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
278		    j++) {
279			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
280			    E_abs_msgin_Used[j] * 4,
281			    sc->sc_c.sc_scriptaddr + Ent_msgin_space);
282		}
283		if (sc->sc_c.features & SF_CHIP_LED0) {
284			bus_space_write_region_4(sc->sc_c.sc_ramt,
285			    sc->sc_c.sc_ramh,
286			    Ent_led_on1, siop_led_on,
287			    sizeof(siop_led_on) / sizeof(siop_led_on[0]));
288			bus_space_write_region_4(sc->sc_c.sc_ramt,
289			    sc->sc_c.sc_ramh,
290			    Ent_led_on2, siop_led_on,
291			    sizeof(siop_led_on) / sizeof(siop_led_on[0]));
292			bus_space_write_region_4(sc->sc_c.sc_ramt,
293			    sc->sc_c.sc_ramh,
294			    Ent_led_off, siop_led_off,
295			    sizeof(siop_led_off) / sizeof(siop_led_off[0]));
296		}
297	} else {
298		for (j = 0;
299		    j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
300			sc->sc_c.sc_script[j] = htole32(siop_script[j]);
301		}
302		for (j = 0; j <
303		    (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
304		    j++) {
305			sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
306			    htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
307		}
308		if (sc->sc_c.features & SF_CHIP_LED0) {
309			for (j = 0; j < (sizeof(siop_led_on) /
310			    sizeof(siop_led_on[0])); j++)
311				sc->sc_c.sc_script[
312				    Ent_led_on1 / sizeof(siop_led_on[0]) + j
313				    ] = htole32(siop_led_on[j]);
314			for (j = 0; j < (sizeof(siop_led_on) /
315			    sizeof(siop_led_on[0])); j++)
316				sc->sc_c.sc_script[
317				    Ent_led_on2 / sizeof(siop_led_on[0]) + j
318				    ] = htole32(siop_led_on[j]);
319			for (j = 0; j < (sizeof(siop_led_off) /
320			    sizeof(siop_led_off[0])); j++)
321				sc->sc_c.sc_script[
322				   Ent_led_off / sizeof(siop_led_off[0]) + j
323				   ] = htole32(siop_led_off[j]);
324		}
325	}
326	sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
327	sc->script_free_hi = sc->sc_c.ram_size / 4;
328
329	/* free used and unused lun switches */
330	while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
331#ifdef SIOP_DEBUG
332		printf("%s: free lunsw at offset %d\n",
333				sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
334#endif
335		TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
336		free(lunsw, M_DEVBUF);
337	}
338	TAILQ_INIT(&sc->lunsw_list);
339	/* restore reselect switch */
340	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
341		struct siop_target *target;
342		if (sc->sc_c.targets[i] == NULL)
343			continue;
344#ifdef SIOP_DEBUG
345		printf("%s: restore sw for target %d\n",
346				sc->sc_c.sc_dev.dv_xname, i);
347#endif
348		target = (struct siop_target *)sc->sc_c.targets[i];
349		free(target->lunsw, M_DEVBUF);
350		target->lunsw = siop_get_lunsw(sc);
351		if (target->lunsw == NULL) {
352			printf("%s: can't alloc lunsw for target %d\n",
353			    sc->sc_c.sc_dev.dv_xname, i);
354			break;
355		}
356		siop_add_reselsw(sc, i);
357	}
358
359	/* start script */
360	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
361		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
362		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
363	}
364	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
365	    sc->sc_c.sc_scriptaddr + Ent_reselect);
366}
367
368#if 0
369#define CALL_SCRIPT(ent) do {\
370	printf ("start script DSA 0x%lx DSP 0x%lx\n", \
371	    siop_cmd->cmd_c.dsa, \
372	    sc->sc_c.sc_scriptaddr + ent); \
373bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
374} while (0)
375#else
376#define CALL_SCRIPT(ent) do {\
377bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
378} while (0)
379#endif
380
381int
382siop_intr(v)
383	void *v;
384{
385	struct siop_softc *sc = v;
386	struct siop_target *siop_target;
387	struct siop_cmd *siop_cmd;
388	struct siop_lun *siop_lun;
389	struct scsipi_xfer *xs;
390	int istat, sist, sstat1, dstat;
391	u_int32_t irqcode;
392	int need_reset = 0;
393	int offset, target, lun, tag;
394	bus_addr_t dsa;
395	struct siop_cbd *cbdp;
396	int freetarget = 0;
397	int restart = 0;
398
399	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
400	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
401		return 0;
402	INCSTAT(siop_stat_intr);
403	if (istat & ISTAT_INTF) {
404		printf("INTRF\n");
405		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
406		    SIOP_ISTAT, ISTAT_INTF);
407	}
408	/* use DSA to find the current siop_cmd */
409	dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
410	for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
411	    cbdp = TAILQ_NEXT(cbdp, next)) {
412		if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
413	    	    dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
414			dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
415			siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
416			siop_table_sync(siop_cmd,
417			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
418			break;
419		}
420	}
421	if (cbdp == NULL) {
422		siop_cmd = NULL;
423	}
424	if (siop_cmd) {
425		xs = siop_cmd->cmd_c.xs;
426		siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
427		target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
428		lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
429		tag = siop_cmd->cmd_c.tag;
430		siop_lun = siop_target->siop_lun[lun];
431#ifdef DIAGNOSTIC
432		if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
433 			printf("siop_cmd (lun %d) for DSA 0x%x "
434			    "not active (%d)\n", lun, (u_int)dsa,
435			    siop_cmd->cmd_c.status);
436			xs = NULL;
437			siop_target = NULL;
438			target = -1;
439			lun = -1;
440			tag = -1;
441			siop_lun = NULL;
442			siop_cmd = NULL;
443		} else if (siop_lun->siop_tag[tag].active != siop_cmd) {
444			printf("siop_cmd (lun %d tag %d) not in siop_lun "
445			    "active (%p != %p)\n", lun, tag, siop_cmd,
446			    siop_lun->siop_tag[tag].active);
447		}
448#endif
449	} else {
450		xs = NULL;
451		siop_target = NULL;
452		target = -1;
453		lun = -1;
454		tag = -1;
455		siop_lun = NULL;
456	}
457	if (istat & ISTAT_DIP) {
458		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
459		    SIOP_DSTAT);
460		if (dstat & DSTAT_SSI) {
461			printf("single step dsp 0x%08x dsa 0x08%x\n",
462			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
463			    sc->sc_c.sc_rh, SIOP_DSP) -
464			    sc->sc_c.sc_scriptaddr),
465			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
466				SIOP_DSA));
467			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
468			    (istat & ISTAT_SIP) == 0) {
469				bus_space_write_1(sc->sc_c.sc_rt,
470				    sc->sc_c.sc_rh, SIOP_DCNTL,
471				    bus_space_read_1(sc->sc_c.sc_rt,
472				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
473			}
474			return 1;
475		}
476		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
477		printf("DMA IRQ:");
478		if (dstat & DSTAT_IID)
479			printf(" Illegal instruction");
480		if (dstat & DSTAT_ABRT)
481			printf(" abort");
482		if (dstat & DSTAT_BF)
483			printf(" bus fault");
484		if (dstat & DSTAT_MDPE)
485			printf(" parity");
486		if (dstat & DSTAT_DFE)
487			printf(" dma fifo empty");
488		printf(", DSP=0x%x DSA=0x%x: ",
489		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
490			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
491		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
492		if (siop_cmd)
493			printf("last msg_in=0x%x status=0x%x\n",
494			    siop_cmd->cmd_tables->msg_in[0],
495			    le32toh(siop_cmd->cmd_tables->status));
496		else
497			printf("%s: current DSA invalid\n",
498			    sc->sc_c.sc_dev.dv_xname);
499		need_reset = 1;
500		}
501	}
502	if (istat & ISTAT_SIP) {
503		if (istat & ISTAT_DIP)
504			delay(10);
505		/*
506		 * Can't read sist0 & sist1 independantly, or we have to
507		 * insert delay
508		 */
509		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
510		    SIOP_SIST0);
511		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
512		    SIOP_SSTAT1);
513#ifdef SIOP_DEBUG_INTR
514		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
515		    "DSA=0x%x DSP=0x%lx\n", sist,
516		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
517			SIOP_SSTAT1),
518		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
519		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
520			SIOP_DSP) -
521		    sc->sc_c.sc_scriptaddr));
522#endif
523		if (sist & SIST0_RST) {
524			siop_handle_reset(sc);
525			/* no table to flush here */
526			return 1;
527		}
528		if (sist & SIST0_SGE) {
529			if (siop_cmd)
530				scsipi_printaddr(xs->xs_periph);
531			else
532				printf("%s:", sc->sc_c.sc_dev.dv_xname);
533			printf("scsi gross error\n");
534			goto reset;
535		}
536		if ((sist & SIST0_MA) && need_reset == 0) {
537			if (siop_cmd) {
538				int scratcha0;
539				dstat = bus_space_read_1(sc->sc_c.sc_rt,
540				    sc->sc_c.sc_rh, SIOP_DSTAT);
541				/*
542				 * first restore DSA, in case we were in a S/G
543				 * operation.
544				 */
545				bus_space_write_4(sc->sc_c.sc_rt,
546				    sc->sc_c.sc_rh,
547				    SIOP_DSA, siop_cmd->cmd_c.dsa);
548				scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
549				    sc->sc_c.sc_rh, SIOP_SCRATCHA);
550				switch (sstat1 & SSTAT1_PHASE_MASK) {
551				case SSTAT1_PHASE_STATUS:
552				/*
553				 * previous phase may be aborted for any reason
554				 * ( for example, the target has less data to
555				 * transfer than requested). Just go to status
556				 * and the command should terminate.
557				 */
558					INCSTAT(siop_stat_intr_shortxfer);
559					if ((dstat & DSTAT_DFE) == 0)
560						siop_clearfifo(&sc->sc_c);
561					/* no table to flush here */
562					CALL_SCRIPT(Ent_status);
563					return 1;
564				case SSTAT1_PHASE_MSGIN:
565					/*
566					 * target may be ready to disconnect
567					 * Save data pointers just in case.
568					 */
569					INCSTAT(siop_stat_intr_xferdisc);
570					if (scratcha0 & A_flag_data)
571						siop_sdp(&siop_cmd->cmd_c);
572					else if ((dstat & DSTAT_DFE) == 0)
573						siop_clearfifo(&sc->sc_c);
574					bus_space_write_1(sc->sc_c.sc_rt,
575					    sc->sc_c.sc_rh, SIOP_SCRATCHA,
576					    scratcha0 & ~A_flag_data);
577					siop_table_sync(siop_cmd,
578					    BUS_DMASYNC_PREREAD |
579					    BUS_DMASYNC_PREWRITE);
580					CALL_SCRIPT(Ent_msgin);
581					return 1;
582				}
583				printf("%s: unexpected phase mismatch %d\n",
584				    sc->sc_c.sc_dev.dv_xname,
585				    sstat1 & SSTAT1_PHASE_MASK);
586			} else {
587				printf("%s: phase mismatch without command\n",
588				    sc->sc_c.sc_dev.dv_xname);
589			}
590			need_reset = 1;
591		}
592		if (sist & SIST0_PAR) {
593			/* parity error, reset */
594			if (siop_cmd)
595				scsipi_printaddr(xs->xs_periph);
596			else
597				printf("%s:", sc->sc_c.sc_dev.dv_xname);
598			printf("parity error\n");
599			goto reset;
600		}
601		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
602			/* selection time out, assume there's no device here */
603			if (siop_cmd) {
604				siop_cmd->cmd_c.status = CMDST_DONE;
605				xs->error = XS_SELTIMEOUT;
606				freetarget = 1;
607				goto end;
608			} else {
609				printf("%s: selection timeout without "
610				    "command\n", sc->sc_c.sc_dev.dv_xname);
611				need_reset = 1;
612			}
613		}
614		if (sist & SIST0_UDC) {
615			/*
616			 * unexpected disconnect. Usually the target signals
617			 * a fatal condition this way. Attempt to get sense.
618			 */
619			 if (siop_cmd) {
620				siop_cmd->cmd_tables->status =
621				    htole32(SCSI_CHECK);
622				goto end;
623			}
624			printf("%s: unexpected disconnect without "
625			    "command\n", sc->sc_c.sc_dev.dv_xname);
626			goto reset;
627		}
628		if (sist & (SIST1_SBMC << 8)) {
629			/* SCSI bus mode change */
630			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
631				goto reset;
632			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
633				/*
634				 * we have a script interrupt, it will
635				 * restart the script.
636				 */
637				goto scintr;
638			}
639			/*
640			 * else we have to restart it ourselve, at the
641			 * interrupted instruction.
642			 */
643			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
644			    SIOP_DSP,
645			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
646			    SIOP_DSP) - 8);
647			return 1;
648		}
649		/* Else it's an unhandled exeption (for now). */
650		printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
651		    "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
652		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
653			SIOP_SSTAT1),
654		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
655		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
656			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
657		if (siop_cmd) {
658			siop_cmd->cmd_c.status = CMDST_DONE;
659			xs->error = XS_SELTIMEOUT;
660			goto end;
661		}
662		need_reset = 1;
663	}
664	if (need_reset) {
665reset:
666		/* fatal error, reset the bus */
667		siop_resetbus(&sc->sc_c);
668		/* no table to flush here */
669		return 1;
670	}
671
672scintr:
673	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
674		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
675		    SIOP_DSPS);
676#ifdef SIOP_DEBUG_INTR
677		printf("script interrupt 0x%x\n", irqcode);
678#endif
679		/*
680		 * no command, or an inactive command is only valid for a
681		 * reselect interrupt
682		 */
683		if ((irqcode & 0x80) == 0) {
684			if (siop_cmd == NULL) {
685				printf(
686			"%s: script interrupt (0x%x) with invalid DSA !!!\n",
687				    sc->sc_c.sc_dev.dv_xname, irqcode);
688				goto reset;
689			}
690			if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
691				printf("%s: command with invalid status "
692				    "(IRQ code 0x%x current status %d) !\n",
693				    sc->sc_c.sc_dev.dv_xname,
694				    irqcode, siop_cmd->cmd_c.status);
695				xs = NULL;
696			}
697		}
698		switch(irqcode) {
699		case A_int_err:
700			printf("error, DSP=0x%x\n",
701			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
702			    sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
703			if (xs) {
704				xs->error = XS_SELTIMEOUT;
705				goto end;
706			} else {
707				goto reset;
708			}
709		case A_int_reseltarg:
710			printf("%s: reselect with invalid target\n",
711				    sc->sc_c.sc_dev.dv_xname);
712			goto reset;
713		case A_int_resellun:
714			INCSTAT(siop_stat_intr_lunresel);
715			target = bus_space_read_1(sc->sc_c.sc_rt,
716			    sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
717			lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
718			    SIOP_SCRATCHA + 1);
719			tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
720			    SIOP_SCRATCHA + 2);
721			siop_target =
722			    (struct siop_target *)sc->sc_c.targets[target];
723			if (siop_target == NULL) {
724				printf("%s: reselect with invalid target %d\n",
725				    sc->sc_c.sc_dev.dv_xname, target);
726				goto reset;
727			}
728			siop_lun = siop_target->siop_lun[lun];
729			if (siop_lun == NULL) {
730				printf("%s: target %d reselect with invalid "
731				    "lun %d\n", sc->sc_c.sc_dev.dv_xname,
732				    target, lun);
733				goto reset;
734			}
735			if (siop_lun->siop_tag[tag].active == NULL) {
736				printf("%s: target %d lun %d tag %d reselect "
737				    "without command\n",
738				    sc->sc_c.sc_dev.dv_xname,
739				    target, lun, tag);
740				goto reset;
741			}
742			siop_cmd = siop_lun->siop_tag[tag].active;
743			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
744			    SIOP_DSP, siop_cmd->cmd_c.dsa +
745			    sizeof(struct siop_common_xfer) +
746			    Ent_ldsa_reload_dsa);
747			siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
748			return 1;
749		case A_int_reseltag:
750			printf("%s: reselect with invalid tag\n",
751				    sc->sc_c.sc_dev.dv_xname);
752			goto reset;
753		case A_int_msgin:
754		{
755			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
756			    sc->sc_c.sc_rh, SIOP_SFBR);
757			if (msgin == MSG_MESSAGE_REJECT) {
758				int msg, extmsg;
759				if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
760					/*
761					 * message was part of a identify +
762					 * something else. Identify shoudl't
763					 * have been rejected.
764					 */
765					msg =
766					    siop_cmd->cmd_tables->msg_out[1];
767					extmsg =
768					    siop_cmd->cmd_tables->msg_out[3];
769				} else {
770					msg = siop_cmd->cmd_tables->msg_out[0];
771					extmsg =
772					    siop_cmd->cmd_tables->msg_out[2];
773				}
774				if (msg == MSG_MESSAGE_REJECT) {
775					/* MSG_REJECT  for a MSG_REJECT  !*/
776					if (xs)
777						scsipi_printaddr(xs->xs_periph);
778					else
779						printf("%s: ",
780						   sc->sc_c.sc_dev.dv_xname);
781					printf("our reject message was "
782					    "rejected\n");
783					goto reset;
784				}
785				if (msg == MSG_EXTENDED &&
786				    extmsg == MSG_EXT_WDTR) {
787					/* WDTR rejected, initiate sync */
788					if ((siop_target->target_c.flags &
789					   TARF_SYNC) == 0) {
790						siop_target->target_c.status =
791						    TARST_OK;
792						siop_update_xfer_mode(&sc->sc_c,
793						    target);
794						/* no table to flush here */
795						CALL_SCRIPT(Ent_msgin_ack);
796						return 1;
797					}
798					siop_target->target_c.status =
799					    TARST_SYNC_NEG;
800					siop_sdtr_msg(&siop_cmd->cmd_c, 0,
801					    sc->sc_c.minsync, sc->sc_c.maxoff);
802					siop_table_sync(siop_cmd,
803					    BUS_DMASYNC_PREREAD |
804					    BUS_DMASYNC_PREWRITE);
805					CALL_SCRIPT(Ent_send_msgout);
806					return 1;
807				} else if (msg == MSG_EXTENDED &&
808				    extmsg == MSG_EXT_SDTR) {
809					/* sync rejected */
810					siop_target->target_c.offset = 0;
811					siop_target->target_c.period = 0;
812					siop_target->target_c.status = TARST_OK;
813					siop_update_xfer_mode(&sc->sc_c,
814					    target);
815					/* no table to flush here */
816					CALL_SCRIPT(Ent_msgin_ack);
817					return 1;
818				} else if (msg == MSG_SIMPLE_Q_TAG ||
819				    msg == MSG_HEAD_OF_Q_TAG ||
820				    msg == MSG_ORDERED_Q_TAG) {
821					if (siop_handle_qtag_reject(
822					    siop_cmd) == -1)
823						goto reset;
824					CALL_SCRIPT(Ent_msgin_ack);
825					return 1;
826				}
827				if (xs)
828					scsipi_printaddr(xs->xs_periph);
829				else
830					printf("%s: ",
831					    sc->sc_c.sc_dev.dv_xname);
832				if (msg == MSG_EXTENDED) {
833					printf("scsi message reject, extended "
834					    "message sent was 0x%x\n", extmsg);
835				} else {
836					printf("scsi message reject, message "
837					    "sent was 0x%x\n", msg);
838				}
839				/* no table to flush here */
840				CALL_SCRIPT(Ent_msgin_ack);
841				return 1;
842			}
843			if (xs)
844				scsipi_printaddr(xs->xs_periph);
845			else
846				printf("%s: ", sc->sc_c.sc_dev.dv_xname);
847			printf("unhandled message 0x%x\n",
848			    siop_cmd->cmd_tables->msg_in[0]);
849			siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
850			siop_cmd->cmd_tables->t_msgout.count= htole32(1);
851			siop_table_sync(siop_cmd,
852			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
853			CALL_SCRIPT(Ent_send_msgout);
854			return 1;
855		}
856		case A_int_extmsgin:
857#ifdef SIOP_DEBUG_INTR
858			printf("extended message: msg 0x%x len %d\n",
859			    siop_cmd->cmd_tables->msg_in[2],
860			    siop_cmd->cmd_tables->msg_in[1]);
861#endif
862			if (siop_cmd->cmd_tables->msg_in[1] > 6)
863				printf("%s: extended message too big (%d)\n",
864				    sc->sc_c.sc_dev.dv_xname,
865				    siop_cmd->cmd_tables->msg_in[1]);
866			siop_cmd->cmd_tables->t_extmsgdata.count =
867			    htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
868			siop_table_sync(siop_cmd,
869			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870			CALL_SCRIPT(Ent_get_extmsgdata);
871			return 1;
872		case A_int_extmsgdata:
873#ifdef SIOP_DEBUG_INTR
874			{
875			int i;
876			printf("extended message: 0x%x, data:",
877			    siop_cmd->cmd_tables->msg_in[2]);
878			for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
879			    i++)
880				printf(" 0x%x",
881				    siop_cmd->cmd_tables->msg_in[i]);
882			printf("\n");
883			}
884#endif
885			if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
886				switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
887				case SIOP_NEG_MSGOUT:
888					siop_update_scntl3(sc,
889					    siop_cmd->cmd_c.siop_target);
890					siop_table_sync(siop_cmd,
891					    BUS_DMASYNC_PREREAD |
892					    BUS_DMASYNC_PREWRITE);
893					CALL_SCRIPT(Ent_send_msgout);
894					return(1);
895				case SIOP_NEG_ACK:
896					siop_update_scntl3(sc,
897					    siop_cmd->cmd_c.siop_target);
898					CALL_SCRIPT(Ent_msgin_ack);
899					return(1);
900				default:
901					panic("invalid retval from "
902					    "siop_wdtr_neg()");
903				}
904				return(1);
905			}
906			if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
907				switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
908				case SIOP_NEG_MSGOUT:
909					siop_update_scntl3(sc,
910					    siop_cmd->cmd_c.siop_target);
911					siop_table_sync(siop_cmd,
912					    BUS_DMASYNC_PREREAD |
913					    BUS_DMASYNC_PREWRITE);
914					CALL_SCRIPT(Ent_send_msgout);
915					return(1);
916				case SIOP_NEG_ACK:
917					siop_update_scntl3(sc,
918					    siop_cmd->cmd_c.siop_target);
919					CALL_SCRIPT(Ent_msgin_ack);
920					return(1);
921				default:
922					panic("invalid retval from "
923					    "siop_wdtr_neg()");
924				}
925				return(1);
926			}
927			/* send a message reject */
928			siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
929			siop_cmd->cmd_tables->t_msgout.count = htole32(1);
930			siop_table_sync(siop_cmd,
931			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
932			CALL_SCRIPT(Ent_send_msgout);
933			return 1;
934		case A_int_disc:
935			INCSTAT(siop_stat_intr_sdp);
936			offset = bus_space_read_1(sc->sc_c.sc_rt,
937			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
938#ifdef SIOP_DEBUG_DR
939			printf("disconnect offset %d\n", offset);
940#endif
941			if (offset > SIOP_NSG) {
942				printf("%s: bad offset for disconnect (%d)\n",
943				    sc->sc_c.sc_dev.dv_xname, offset);
944				goto reset;
945			}
946			/*
947			 * offset == SIOP_NSG may be a valid condition if
948			 * we get a sdp when the xfer is done.
949			 * Don't call memmove in this case.
950			 */
951			if (offset < SIOP_NSG) {
952				memmove(&siop_cmd->cmd_tables->data[0],
953				    &siop_cmd->cmd_tables->data[offset],
954				    (SIOP_NSG - offset) * sizeof(scr_table_t));
955				siop_table_sync(siop_cmd,
956				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
957			}
958			CALL_SCRIPT(Ent_script_sched);
959			return 1;
960		case A_int_resfail:
961			printf("reselect failed\n");
962			CALL_SCRIPT(Ent_script_sched);
963			return  1;
964		case A_int_done:
965			if (xs == NULL) {
966				printf("%s: done without command, DSA=0x%lx\n",
967				    sc->sc_c.sc_dev.dv_xname,
968				    (u_long)siop_cmd->cmd_c.dsa);
969				siop_cmd->cmd_c.status = CMDST_FREE;
970				CALL_SCRIPT(Ent_script_sched);
971				return 1;
972			}
973#ifdef SIOP_DEBUG_INTR
974			printf("done, DSA=0x%lx target id 0x%x last msg "
975			    "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
976			    le32toh(siop_cmd->cmd_tables->id),
977			    siop_cmd->cmd_tables->msg_in[0],
978			    le32toh(siop_cmd->cmd_tables->status));
979#endif
980			INCSTAT(siop_stat_intr_done);
981			siop_cmd->cmd_c.status = CMDST_DONE;
982			goto end;
983		default:
984			printf("unknown irqcode %x\n", irqcode);
985			if (xs) {
986				xs->error = XS_SELTIMEOUT;
987				goto end;
988			}
989			goto reset;
990		}
991		return 1;
992	}
993	/* We just should't get there */
994	panic("siop_intr: I shouldn't be there !");
995	return 1;
996end:
997	/*
998	 * restart the script now if command completed properly
999	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1000	 * queue
1001	 */
1002	xs->status = le32toh(siop_cmd->cmd_tables->status);
1003	if (xs->status == SCSI_OK)
1004		CALL_SCRIPT(Ent_script_sched);
1005	else
1006		restart = 1;
1007	siop_lun->siop_tag[tag].active = NULL;
1008	siop_scsicmd_end(siop_cmd);
1009	if (freetarget && siop_target->target_c.status == TARST_PROBING)
1010		siop_del_dev(sc, target, lun);
1011	if (restart)
1012		CALL_SCRIPT(Ent_script_sched);
1013	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1014		/* a command terminated, so we have free slots now */
1015		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1016		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1017	}
1018
1019	return 1;
1020}
1021
1022void
1023siop_scsicmd_end(siop_cmd)
1024	struct siop_cmd *siop_cmd;
1025{
1026	struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1027	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1028
1029	switch(xs->status) {
1030	case SCSI_OK:
1031		xs->error = XS_NOERROR;
1032		break;
1033	case SCSI_BUSY:
1034		xs->error = XS_BUSY;
1035		break;
1036	case SCSI_CHECK:
1037		xs->error = XS_BUSY;
1038		/* remove commands in the queue and scheduler */
1039		siop_unqueue(sc, xs->xs_periph->periph_target,
1040		    xs->xs_periph->periph_lun);
1041		break;
1042	case SCSI_QUEUE_FULL:
1043		INCSTAT(siop_stat_intr_qfull);
1044#ifdef SIOP_DEBUG
1045		printf("%s:%d:%d: queue full (tag %d)\n",
1046		    sc->sc_c.sc_dev.dv_xname,
1047		    xs->xs_periph->periph_target,
1048		    xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1049#endif
1050		xs->error = XS_BUSY;
1051		break;
1052	case SCSI_SIOP_NOCHECK:
1053		/*
1054		 * don't check status, xs->error is already valid
1055		 */
1056		break;
1057	case SCSI_SIOP_NOSTATUS:
1058		/*
1059		 * the status byte was not updated, cmd was
1060		 * aborted
1061		 */
1062		xs->error = XS_SELTIMEOUT;
1063		break;
1064	default:
1065		xs->error = XS_DRIVER_STUFFUP;
1066	}
1067	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1068		bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1069		    siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1070		    (xs->xs_control & XS_CTL_DATA_IN) ?
1071		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1072		bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1073	}
1074	bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1075	callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1076	siop_cmd->cmd_c.status = CMDST_FREE;
1077	TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1078	xs->resid = 0;
1079	scsipi_done (xs);
1080}
1081
1082void
1083siop_unqueue(sc, target, lun)
1084	struct siop_softc *sc;
1085	int target;
1086	int lun;
1087{
1088 	int slot, tag;
1089	struct siop_cmd *siop_cmd;
1090	struct siop_lun *siop_lun =
1091	    ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1092
1093	/* first make sure to read valid data */
1094	siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1095
1096	for (tag = 1; tag < SIOP_NTAG; tag++) {
1097		/* look for commands in the scheduler, not yet started */
1098		if (siop_lun->siop_tag[tag].active == NULL)
1099			continue;
1100		siop_cmd = siop_lun->siop_tag[tag].active;
1101		for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1102			if (siop_script_read(sc,
1103			    (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1104			    siop_cmd->cmd_c.dsa +
1105			    sizeof(struct siop_common_xfer) +
1106			    Ent_ldsa_select)
1107				break;
1108		}
1109		if (slot >  sc->sc_currschedslot)
1110			continue; /* didn't find it */
1111		if (siop_script_read(sc,
1112		    (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1113			continue; /* already started */
1114		/* clear the slot */
1115		siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1116		    0x80000000);
1117		/* ask to requeue */
1118		siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1119		siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1120		siop_lun->siop_tag[tag].active = NULL;
1121		siop_scsicmd_end(siop_cmd);
1122	}
1123	/* update sc_currschedslot */
1124	sc->sc_currschedslot = 0;
1125	for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1126		if (siop_script_read(sc,
1127		    (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1128			sc->sc_currschedslot = slot;
1129	}
1130}
1131
1132/*
1133 * handle a rejected queue tag message: the command will run untagged,
1134 * has to adjust the reselect script.
1135 */
1136int
1137siop_handle_qtag_reject(siop_cmd)
1138	struct siop_cmd *siop_cmd;
1139{
1140	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1141	int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1142	int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1143	int tag = siop_cmd->cmd_tables->msg_out[2];
1144	struct siop_lun *siop_lun =
1145	    ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1146
1147#ifdef SIOP_DEBUG
1148	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1149	    sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1150	    siop_cmd->cmd_c.status);
1151#endif
1152
1153	if (siop_lun->siop_tag[0].active != NULL) {
1154		printf("%s: untagged command already running for target %d "
1155		    "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1156		    target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1157		return -1;
1158	}
1159	/* clear tag slot */
1160	siop_lun->siop_tag[tag].active = NULL;
1161	/* add command to non-tagged slot */
1162	siop_lun->siop_tag[0].active = siop_cmd;
1163	siop_cmd->cmd_c.tag = 0;
1164	/* adjust reselect script if there is one */
1165	if (siop_lun->siop_tag[0].reseloff > 0) {
1166		siop_script_write(sc,
1167		    siop_lun->siop_tag[0].reseloff + 1,
1168		    siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1169		    Ent_ldsa_reload_dsa);
1170		siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1171	}
1172	return 0;
1173}
1174
1175/*
1176 * handle a bus reset: reset chip, unqueue all active commands, free all
1177 * target struct and report loosage to upper layer.
1178 * As the upper layer may requeue immediatly we have to first store
1179 * all active commands in a temporary queue.
1180 */
1181void
1182siop_handle_reset(sc)
1183	struct siop_softc *sc;
1184{
1185	struct siop_cmd *siop_cmd;
1186	struct siop_lun *siop_lun;
1187	int target, lun, tag;
1188	/*
1189	 * scsi bus reset. reset the chip and restart
1190	 * the queue. Need to clean up all active commands
1191	 */
1192	printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1193	/* stop, reset and restart the chip */
1194	siop_reset(sc);
1195	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1196		/* chip has been reset, all slots are free now */
1197		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1198		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1199	}
1200	/*
1201	 * Process all commands: first commmands being executed
1202	 */
1203	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1204	    target++) {
1205		if (sc->sc_c.targets[target] == NULL)
1206			continue;
1207		for (lun = 0; lun < 8; lun++) {
1208			struct siop_target *siop_target =
1209			    (struct siop_target *)sc->sc_c.targets[target];
1210			siop_lun = siop_target->siop_lun[lun];
1211			if (siop_lun == NULL)
1212				continue;
1213			for (tag = 0; tag <
1214			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1215			    SIOP_NTAG : 1);
1216			    tag++) {
1217				siop_cmd = siop_lun->siop_tag[tag].active;
1218				if (siop_cmd == NULL)
1219					continue;
1220				scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1221				printf("command with tag id %d reset\n", tag);
1222				siop_cmd->cmd_c.xs->error =
1223				    (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1224		    		    XS_TIMEOUT : XS_RESET;
1225				siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1226				siop_lun->siop_tag[tag].active = NULL;
1227				siop_cmd->cmd_c.status = CMDST_DONE;
1228				siop_scsicmd_end(siop_cmd);
1229			}
1230		}
1231		sc->sc_c.targets[target]->status = TARST_ASYNC;
1232		sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1233		sc->sc_c.targets[target]->period =
1234		    sc->sc_c.targets[target]->offset = 0;
1235		siop_update_xfer_mode(&sc->sc_c, target);
1236	}
1237
1238	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1239}
1240
1241void
1242siop_scsipi_request(chan, req, arg)
1243	struct scsipi_channel *chan;
1244	scsipi_adapter_req_t req;
1245	void *arg;
1246{
1247	struct scsipi_xfer *xs;
1248	struct scsipi_periph *periph;
1249	struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1250	struct siop_cmd *siop_cmd;
1251	struct siop_target *siop_target;
1252	int s, error, i;
1253	int target;
1254	int lun;
1255
1256	switch (req) {
1257	case ADAPTER_REQ_RUN_XFER:
1258		xs = arg;
1259		periph = xs->xs_periph;
1260		target = periph->periph_target;
1261		lun = periph->periph_lun;
1262
1263		s = splbio();
1264#ifdef SIOP_DEBUG_SCHED
1265		printf("starting cmd for %d:%d\n", target, lun);
1266#endif
1267		siop_cmd = TAILQ_FIRST(&sc->free_list);
1268		if (siop_cmd == NULL) {
1269			xs->error = XS_RESOURCE_SHORTAGE;
1270			scsipi_done(xs);
1271			splx(s);
1272			return;
1273		}
1274		TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1275#ifdef DIAGNOSTIC
1276		if (siop_cmd->cmd_c.status != CMDST_FREE)
1277			panic("siop_scsicmd: new cmd not free");
1278#endif
1279		siop_target = (struct siop_target*)sc->sc_c.targets[target];
1280		if (siop_target == NULL) {
1281#ifdef SIOP_DEBUG
1282			printf("%s: alloc siop_target for target %d\n",
1283				sc->sc_c.sc_dev.dv_xname, target);
1284#endif
1285			sc->sc_c.targets[target] =
1286			    malloc(sizeof(struct siop_target),
1287				M_DEVBUF, M_NOWAIT);
1288			if (sc->sc_c.targets[target] == NULL) {
1289				printf("%s: can't malloc memory for "
1290				    "target %d\n", sc->sc_c.sc_dev.dv_xname,
1291				    target);
1292				xs->error = XS_RESOURCE_SHORTAGE;
1293				scsipi_done(xs);
1294				splx(s);
1295				return;
1296			}
1297			siop_target =
1298			    (struct siop_target*)sc->sc_c.targets[target];
1299			siop_target->target_c.status = TARST_PROBING;
1300			siop_target->target_c.flags = 0;
1301			siop_target->target_c.id =
1302			    sc->sc_c.clock_div << 24; /* scntl3 */
1303			siop_target->target_c.id |=  target << 16; /* id */
1304			/* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1305
1306			/* get a lun switch script */
1307			siop_target->lunsw = siop_get_lunsw(sc);
1308			if (siop_target->lunsw == NULL) {
1309				printf("%s: can't alloc lunsw for target %d\n",
1310				    sc->sc_c.sc_dev.dv_xname, target);
1311				xs->error = XS_RESOURCE_SHORTAGE;
1312				scsipi_done(xs);
1313				splx(s);
1314				return;
1315			}
1316			for (i=0; i < 8; i++)
1317				siop_target->siop_lun[i] = NULL;
1318			siop_add_reselsw(sc, target);
1319		}
1320		if (siop_target->siop_lun[lun] == NULL) {
1321			siop_target->siop_lun[lun] =
1322			    malloc(sizeof(struct siop_lun), M_DEVBUF,
1323			    M_NOWAIT|M_ZERO);
1324			if (siop_target->siop_lun[lun] == NULL) {
1325				printf("%s: can't alloc siop_lun for "
1326				    "target %d lun %d\n",
1327				    sc->sc_c.sc_dev.dv_xname, target, lun);
1328				xs->error = XS_RESOURCE_SHORTAGE;
1329				scsipi_done(xs);
1330				splx(s);
1331				return;
1332			}
1333		}
1334		siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1335		siop_cmd->cmd_c.xs = xs;
1336		siop_cmd->cmd_c.flags = 0;
1337		siop_cmd->cmd_c.status = CMDST_READY;
1338
1339		/* load the DMA maps */
1340		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1341		    siop_cmd->cmd_c.dmamap_cmd,
1342		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1343		if (error) {
1344			printf("%s: unable to load cmd DMA map: %d\n",
1345			    sc->sc_c.sc_dev.dv_xname, error);
1346			xs->error = XS_DRIVER_STUFFUP;
1347			scsipi_done(xs);
1348			splx(s);
1349			return;
1350		}
1351		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1352			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1353			    siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1354			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1355			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1356			     BUS_DMA_READ : BUS_DMA_WRITE));
1357			if (error) {
1358				printf("%s: unable to load cmd DMA map: %d",
1359				    sc->sc_c.sc_dev.dv_xname, error);
1360				xs->error = XS_DRIVER_STUFFUP;
1361				scsipi_done(xs);
1362				bus_dmamap_unload(sc->sc_c.sc_dmat,
1363				    siop_cmd->cmd_c.dmamap_cmd);
1364				splx(s);
1365				return;
1366			}
1367			bus_dmamap_sync(sc->sc_c.sc_dmat,
1368			    siop_cmd->cmd_c.dmamap_data, 0,
1369			    siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1370			    (xs->xs_control & XS_CTL_DATA_IN) ?
1371			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1372		}
1373		bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1374		    siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1375		    BUS_DMASYNC_PREWRITE);
1376
1377		if (xs->xs_tag_type) {
1378			/* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1379			siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1380		} else {
1381			siop_cmd->cmd_c.tag = 0;
1382		}
1383		siop_setuptables(&siop_cmd->cmd_c);
1384		siop_table_sync(siop_cmd,
1385		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386		siop_start(sc, siop_cmd);
1387		if (xs->xs_control & XS_CTL_POLL) {
1388			/* poll for command completion */
1389			while ((xs->xs_status & XS_STS_DONE) == 0) {
1390				delay(1000);
1391				siop_intr(sc);
1392			}
1393		}
1394		splx(s);
1395		return;
1396
1397	case ADAPTER_REQ_GROW_RESOURCES:
1398#ifdef SIOP_DEBUG
1399		printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1400		    sc->sc_c.sc_adapt.adapt_openings);
1401#endif
1402		siop_morecbd(sc);
1403		return;
1404
1405	case ADAPTER_REQ_SET_XFER_MODE:
1406	{
1407		struct scsipi_xfer_mode *xm = arg;
1408		if (sc->sc_c.targets[xm->xm_target] == NULL)
1409			return;
1410		s = splbio();
1411		if (xm->xm_mode & PERIPH_CAP_TQING)
1412			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1413		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1414		    (sc->sc_c.features & SF_BUS_WIDE))
1415			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1416		if (xm->xm_mode & PERIPH_CAP_SYNC)
1417			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1418		if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1419		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1420			sc->sc_c.targets[xm->xm_target]->status =
1421			    TARST_ASYNC;
1422
1423		for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1424			if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1425				/* allocate a lun sw entry for this device */
1426				siop_add_dev(sc, xm->xm_target, lun);
1427		}
1428
1429		splx(s);
1430	}
1431	}
1432}
1433
1434static void
1435siop_start(sc, siop_cmd)
1436	struct siop_softc *sc;
1437	struct siop_cmd *siop_cmd;
1438{
1439	struct siop_lun *siop_lun;
1440	struct siop_xfer *siop_xfer;
1441	u_int32_t dsa;
1442	int timeout;
1443	int target, lun, slot;
1444
1445	/*
1446	 * first make sure to read valid data
1447	 */
1448	siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1449
1450	/*
1451	 * The queue management here is a bit tricky: the script always looks
1452	 * at the slot from first to last, so if we always use the first
1453	 * free slot commands can stay at the tail of the queue ~forever.
1454	 * The algorithm used here is to restart from the head when we know
1455	 * that the queue is empty, and only add commands after the last one.
1456	 * When we're at the end of the queue wait for the script to clear it.
1457	 * The best thing to do here would be to implement a circular queue,
1458	 * but using only 53c720 features this can be "interesting".
1459	 * A mid-way solution could be to implement 2 queues and swap orders.
1460	 */
1461	slot = sc->sc_currschedslot;
1462	/*
1463	 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1464	 * free. As this is the last used slot, all previous slots are free,
1465	 * we can restart from 0.
1466	 */
1467	if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1468	    0x80000000) {
1469		slot = sc->sc_currschedslot = 0;
1470	} else {
1471		slot++;
1472	}
1473	target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1474	lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1475	siop_lun =
1476	    ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1477	/* if non-tagged command active, panic: this shouldn't happen */
1478	if (siop_lun->siop_tag[0].active != NULL) {
1479		panic("siop_start: tagged cmd while untagged running");
1480	}
1481#ifdef DIAGNOSTIC
1482	/* sanity check the tag if needed */
1483	if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1484		if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1485			panic("siop_start: tag not free");
1486		if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1487			scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1488			printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1489			panic("siop_start: invalid tag id");
1490		}
1491	}
1492#endif
1493	/*
1494	 * find a free scheduler slot and load it.
1495	 */
1496	for (; slot < SIOP_NSLOTS; slot++) {
1497		/*
1498		 * If cmd if 0x80000000 the slot is free
1499		 */
1500		if (siop_script_read(sc,
1501		    (Ent_script_sched_slot0 / 4) + slot * 2) ==
1502		    0x80000000)
1503			break;
1504	}
1505	if (slot == SIOP_NSLOTS) {
1506		/*
1507		 * no more free slot, no need to continue. freeze the queue
1508		 * and requeue this command.
1509		 */
1510		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1511		sc->sc_flags |= SCF_CHAN_NOSLOT;
1512		siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1513		siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1514		siop_scsicmd_end(siop_cmd);
1515		return;
1516	}
1517#ifdef SIOP_DEBUG_SCHED
1518	printf("using slot %d for DSA 0x%lx\n", slot,
1519	    (u_long)siop_cmd->cmd_c.dsa);
1520#endif
1521	/* mark command as active */
1522	if (siop_cmd->cmd_c.status == CMDST_READY)
1523		siop_cmd->cmd_c.status = CMDST_ACTIVE;
1524	else
1525		panic("siop_start: bad status");
1526	siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1527	/* patch scripts with DSA addr */
1528	dsa = siop_cmd->cmd_c.dsa;
1529	/* first reselect switch, if we have an entry */
1530	if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1531		siop_script_write(sc,
1532		    siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1533		    dsa + sizeof(struct siop_common_xfer) +
1534		    Ent_ldsa_reload_dsa);
1535	/* CMD script: MOVE MEMORY addr */
1536	siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1537	siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1538	   htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1539		siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1540	/* scheduler slot: JUMP ldsa_select */
1541	siop_script_write(sc,
1542	    (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1543	    dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1544	/* handle timeout */
1545	if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1546		/* start exire timer */
1547		timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1548		if (timeout == 0)
1549			timeout = 1;
1550		callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1551		    timeout, siop_timeout, siop_cmd);
1552	}
1553	/*
1554	 * Change JUMP cmd so that this slot will be handled
1555	 */
1556	siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1557	    0x80080000);
1558	sc->sc_currschedslot = slot;
1559
1560	/* make sure SCRIPT processor will read valid data */
1561	siop_script_sync(sc,BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1562	/* Signal script it has some work to do */
1563	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1564	    SIOP_ISTAT, ISTAT_SIGP);
1565	/* and wait for IRQ */
1566	return;
1567}
1568
1569void
1570siop_timeout(v)
1571	void *v;
1572{
1573	struct siop_cmd *siop_cmd = v;
1574	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1575	int s;
1576
1577	scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1578	printf("command timeout\n");
1579
1580	s = splbio();
1581	/* reset the scsi bus */
1582	siop_resetbus(&sc->sc_c);
1583
1584	/* deactivate callout */
1585	callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1586	/* mark command as being timed out; siop_intr will handle it */
1587	/*
1588	 * mark command has being timed out and just return;
1589	 * the bus reset will generate an interrupt,
1590	 * it will be handled in siop_intr()
1591	 */
1592	siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1593	splx(s);
1594	return;
1595
1596}
1597
1598void
1599siop_dump_script(sc)
1600	struct siop_softc *sc;
1601{
1602	int i;
1603	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1604		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1605		    le32toh(sc->sc_c.sc_script[i]),
1606		    le32toh(sc->sc_c.sc_script[i+1]));
1607		if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1608		    0xc0000000) {
1609			i++;
1610			printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1611		}
1612		printf("\n");
1613	}
1614}
1615
1616void
1617siop_morecbd(sc)
1618	struct siop_softc *sc;
1619{
1620	int error, i, j, s;
1621	bus_dma_segment_t seg;
1622	int rseg;
1623	struct siop_cbd *newcbd;
1624	struct siop_xfer *xfer;
1625	bus_addr_t dsa;
1626	u_int32_t *scr;
1627
1628	/* allocate a new list head */
1629	newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1630	if (newcbd == NULL) {
1631		printf("%s: can't allocate memory for command descriptors "
1632		    "head\n", sc->sc_c.sc_dev.dv_xname);
1633		return;
1634	}
1635
1636	/* allocate cmd list */
1637	newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1638	    M_DEVBUF, M_NOWAIT|M_ZERO);
1639	if (newcbd->cmds == NULL) {
1640		printf("%s: can't allocate memory for command descriptors\n",
1641		    sc->sc_c.sc_dev.dv_xname);
1642		goto bad3;
1643	}
1644	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1645	    1, &rseg, BUS_DMA_NOWAIT);
1646	if (error) {
1647		printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1648		    sc->sc_c.sc_dev.dv_xname, error);
1649		goto bad2;
1650	}
1651	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1652	    (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1653	if (error) {
1654		printf("%s: unable to map cbd DMA memory, error = %d\n",
1655		    sc->sc_c.sc_dev.dv_xname, error);
1656		goto bad2;
1657	}
1658	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1659	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1660	if (error) {
1661		printf("%s: unable to create cbd DMA map, error = %d\n",
1662		    sc->sc_c.sc_dev.dv_xname, error);
1663		goto bad1;
1664	}
1665	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1666	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1667	if (error) {
1668		printf("%s: unable to load cbd DMA map, error = %d\n",
1669		    sc->sc_c.sc_dev.dv_xname, error);
1670		goto bad0;
1671	}
1672#ifdef DEBUG
1673	printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1674	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1675#endif
1676	for (i = 0; i < SIOP_NCMDPB; i++) {
1677		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1678		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1679		    &newcbd->cmds[i].cmd_c.dmamap_data);
1680		if (error) {
1681			printf("%s: unable to create data DMA map for cbd: "
1682			    "error %d\n",
1683			    sc->sc_c.sc_dev.dv_xname, error);
1684			goto bad0;
1685		}
1686		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1687		    sizeof(struct scsipi_generic), 1,
1688		    sizeof(struct scsipi_generic), 0,
1689		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1690		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1691		if (error) {
1692			printf("%s: unable to create cmd DMA map for cbd %d\n",
1693			    sc->sc_c.sc_dev.dv_xname, error);
1694			goto bad0;
1695		}
1696		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1697		newcbd->cmds[i].siop_cbdp = newcbd;
1698		xfer = &newcbd->xfers[i];
1699		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1700		memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1701		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1702		    i * sizeof(struct siop_xfer);
1703		newcbd->cmds[i].cmd_c.dsa = dsa;
1704		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1705		xfer->siop_tables.t_msgout.count= htole32(1);
1706		xfer->siop_tables.t_msgout.addr = htole32(dsa);
1707		xfer->siop_tables.t_msgin.count= htole32(1);
1708		xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1709		xfer->siop_tables.t_extmsgin.count= htole32(2);
1710		xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1711		xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1712		xfer->siop_tables.t_status.count= htole32(1);
1713		xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1714
1715		/* The select/reselect script */
1716		scr = &xfer->resel[0];
1717		for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1718			scr[j] = htole32(load_dsa[j]);
1719		/*
1720		 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1721		 * octet, reg offset is the third.
1722		 */
1723		scr[Ent_rdsa0 / 4] =
1724		    htole32(0x78100000 | ((dsa & 0x000000ff) <<  8));
1725		scr[Ent_rdsa1 / 4] =
1726		    htole32(0x78110000 | ( dsa & 0x0000ff00       ));
1727		scr[Ent_rdsa2 / 4] =
1728		    htole32(0x78120000 | ((dsa & 0x00ff0000) >>  8));
1729		scr[Ent_rdsa3 / 4] =
1730		    htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1731		scr[E_ldsa_abs_reselected_Used[0]] =
1732		    htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1733		scr[E_ldsa_abs_reselect_Used[0]] =
1734		    htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1735		scr[E_ldsa_abs_selected_Used[0]] =
1736		    htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1737		scr[E_ldsa_abs_data_Used[0]] =
1738		    htole32(dsa + sizeof(struct siop_common_xfer) +
1739		    Ent_ldsa_data);
1740		/* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1741		scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1742		s = splbio();
1743		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1744		splx(s);
1745#ifdef SIOP_DEBUG
1746		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1747		    le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1748		    le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1749		    le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1750#endif
1751	}
1752	s = splbio();
1753	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1754	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1755	splx(s);
1756	return;
1757bad0:
1758	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1759	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1760bad1:
1761	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1762bad2:
1763	free(newcbd->cmds, M_DEVBUF);
1764bad3:
1765	free(newcbd, M_DEVBUF);
1766	return;
1767}
1768
1769struct siop_lunsw *
1770siop_get_lunsw(sc)
1771	struct siop_softc *sc;
1772{
1773	struct siop_lunsw *lunsw;
1774	int i;
1775
1776	if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1777	    sc->script_free_hi)
1778		return NULL;
1779	lunsw = TAILQ_FIRST(&sc->lunsw_list);
1780	if (lunsw != NULL) {
1781#ifdef SIOP_DEBUG
1782		printf("siop_get_lunsw got lunsw at offset %d\n",
1783		    lunsw->lunsw_off);
1784#endif
1785		TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1786		return lunsw;
1787	}
1788	lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1789	if (lunsw == NULL)
1790		return NULL;
1791#ifdef SIOP_DEBUG
1792	printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1793#endif
1794	if (sc->sc_c.features & SF_CHIP_RAM) {
1795		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1796		    sc->script_free_lo * 4, lun_switch,
1797		    sizeof(lun_switch) / sizeof(lun_switch[0]));
1798		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1799		    (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1800		    sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1801	} else {
1802		for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1803		    i++)
1804			sc->sc_c.sc_script[sc->script_free_lo + i] =
1805			    htole32(lun_switch[i]);
1806		sc->sc_c.sc_script[
1807		    sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1808		    htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1809	}
1810	lunsw->lunsw_off = sc->script_free_lo;
1811	lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1812	sc->script_free_lo += lunsw->lunsw_size;
1813	siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1814	return lunsw;
1815}
1816
1817void
1818siop_add_reselsw(sc, target)
1819	struct siop_softc *sc;
1820	int target;
1821{
1822	int i;
1823	struct siop_target *siop_target;
1824	struct siop_lun *siop_lun;
1825
1826	siop_target = (struct siop_target *)sc->sc_c.targets[target];
1827	/*
1828	 * add an entry to resel switch
1829	 */
1830	siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1831	for (i = 0; i < 15; i++) {
1832		siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1833		if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1834		    == 0xff) { /* it's free */
1835#ifdef SIOP_DEBUG
1836			printf("siop: target %d slot %d offset %d\n",
1837			    target, i, siop_target->reseloff);
1838#endif
1839			/* JUMP abs_foo, IF target | 0x80; */
1840			siop_script_write(sc, siop_target->reseloff,
1841			    0x800c0080 | target);
1842			siop_script_write(sc, siop_target->reseloff + 1,
1843			    sc->sc_c.sc_scriptaddr +
1844			    siop_target->lunsw->lunsw_off * 4 +
1845			    Ent_lun_switch_entry);
1846			break;
1847		}
1848	}
1849	if (i == 15) /* no free slot, shouldn't happen */
1850		panic("siop: resel switch full");
1851
1852	sc->sc_ntargets++;
1853	for (i = 0; i < 8; i++) {
1854		siop_lun = siop_target->siop_lun[i];
1855		if (siop_lun == NULL)
1856			continue;
1857		if (siop_lun->reseloff > 0) {
1858			siop_lun->reseloff = 0;
1859			siop_add_dev(sc, target, i);
1860		}
1861	}
1862	siop_update_scntl3(sc, sc->sc_c.targets[target]);
1863	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1864}
1865
1866void
1867siop_update_scntl3(sc, _siop_target)
1868	struct siop_softc *sc;
1869	struct siop_common_target *_siop_target;
1870{
1871	struct siop_target *siop_target = (struct siop_target *)_siop_target;
1872	/* MOVE target->id >> 24 TO SCNTL3 */
1873	siop_script_write(sc,
1874	    siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1875	    0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1876	/* MOVE target->id >> 8 TO SXFER */
1877	siop_script_write(sc,
1878	    siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1879	    0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1880	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1881}
1882
1883void
1884siop_add_dev(sc, target, lun)
1885	struct siop_softc *sc;
1886	int target;
1887	int lun;
1888{
1889	struct siop_lunsw *lunsw;
1890	struct siop_target *siop_target =
1891	    (struct siop_target *)sc->sc_c.targets[target];
1892	struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1893	int i, ntargets;
1894
1895	if (siop_lun->reseloff > 0)
1896		return;
1897	lunsw = siop_target->lunsw;
1898	if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1899		/*
1900		 * can't extend this slot. Probably not worth trying to deal
1901		 * with this case
1902		 */
1903#ifdef DEBUG
1904		printf("%s:%d:%d: can't allocate a lun sw slot\n",
1905		    sc->sc_c.sc_dev.dv_xname, target, lun);
1906#endif
1907		return;
1908	}
1909	/* count how many free targets we still have to probe */
1910	ntargets =  sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1911
1912	/*
1913	 * we need 8 bytes for the lun sw additionnal entry, and
1914	 * eventually sizeof(tag_switch) for the tag switch entry.
1915	 * Keep enouth free space for the free targets that could be
1916	 * probed later.
1917	 */
1918	if (sc->script_free_lo + 2 +
1919	    (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1920	    ((siop_target->target_c.flags & TARF_TAG) ?
1921	    sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1922	    sc->script_free_hi)) {
1923		/*
1924		 * not enouth space, probably not worth dealing with it.
1925		 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1926		 */
1927#ifdef DEBUG
1928		printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1929		    sc->sc_c.sc_dev.dv_xname, target, lun);
1930#endif
1931		return;
1932	}
1933#ifdef SIOP_DEBUG
1934	printf("%s:%d:%d: allocate lun sw entry\n",
1935	    sc->sc_c.sc_dev.dv_xname, target, lun);
1936#endif
1937	/* INT int_resellun */
1938	siop_script_write(sc, sc->script_free_lo, 0x98080000);
1939	siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1940	/* Now the slot entry: JUMP abs_foo, IF lun */
1941	siop_script_write(sc, sc->script_free_lo - 2,
1942	    0x800c0000 | lun);
1943	siop_script_write(sc, sc->script_free_lo - 1, 0);
1944	siop_lun->reseloff = sc->script_free_lo - 2;
1945	lunsw->lunsw_size += 2;
1946	sc->script_free_lo += 2;
1947	if (siop_target->target_c.flags & TARF_TAG) {
1948		/* we need a tag switch */
1949		sc->script_free_hi -=
1950		    sizeof(tag_switch) / sizeof(tag_switch[0]);
1951		if (sc->sc_c.features & SF_CHIP_RAM) {
1952			bus_space_write_region_4(sc->sc_c.sc_ramt,
1953			    sc->sc_c.sc_ramh,
1954			    sc->script_free_hi * 4, tag_switch,
1955			    sizeof(tag_switch) / sizeof(tag_switch[0]));
1956		} else {
1957			for(i = 0;
1958			    i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1959			    i++) {
1960				sc->sc_c.sc_script[sc->script_free_hi + i] =
1961				    htole32(tag_switch[i]);
1962			}
1963		}
1964		siop_script_write(sc,
1965		    siop_lun->reseloff + 1,
1966		    sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1967		    Ent_tag_switch_entry);
1968
1969		for (i = 0; i < SIOP_NTAG; i++) {
1970			siop_lun->siop_tag[i].reseloff =
1971			    sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1972		}
1973	} else {
1974		/* non-tag case; just work with the lun switch */
1975		siop_lun->siop_tag[0].reseloff =
1976		    siop_target->siop_lun[lun]->reseloff;
1977	}
1978	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1979}
1980
1981void
1982siop_del_dev(sc, target, lun)
1983	struct siop_softc *sc;
1984	int target;
1985	int lun;
1986{
1987	int i;
1988	struct siop_target *siop_target;
1989#ifdef SIOP_DEBUG
1990		printf("%s:%d:%d: free lun sw entry\n",
1991		    sc->sc_c.sc_dev.dv_xname, target, lun);
1992#endif
1993	if (sc->sc_c.targets[target] == NULL)
1994		return;
1995	siop_target = (struct siop_target *)sc->sc_c.targets[target];
1996	free(siop_target->siop_lun[lun], M_DEVBUF);
1997	siop_target->siop_lun[lun] = NULL;
1998	/* XXX compact sw entry too ? */
1999	/* check if we can free the whole target */
2000	for (i = 0; i < 8; i++) {
2001		if (siop_target->siop_lun[i] != NULL)
2002			return;
2003	}
2004#ifdef SIOP_DEBUG
2005	printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2006	    sc->sc_c.sc_dev.dv_xname, target, lun,
2007	    sc->sc_c.targets[target]->lunsw->lunsw_off);
2008#endif
2009	/*
2010	 * nothing here, free the target struct and resel
2011	 * switch entry
2012	 */
2013	siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2014	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2015	TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2016	free(sc->sc_c.targets[target], M_DEVBUF);
2017	sc->sc_c.targets[target] = NULL;
2018	sc->sc_ntargets--;
2019}
2020
2021#ifdef SIOP_STATS
2022void
2023siop_printstats()
2024{
2025	printf("siop_stat_intr %d\n", siop_stat_intr);
2026	printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2027	printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2028	printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2029	printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2030	printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2031	printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2032}
2033#endif
2034