twe.c revision 1.21
1/*	$OpenBSD: twe.c,v 1.21 2003/06/02 19:24:22 mickey Exp $	*/
2
3/*
4 * Copyright (c) 2000-2002 Michael Shalayeff.  All rights reserved.
5 *
6 * The SCSI emulation layer is derived from gdt(4) driver,
7 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* #define	TWE_DEBUG */
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/buf.h>
36#include <sys/device.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/proc.h>
40#include <sys/kthread.h>
41
42#include <machine/bus.h>
43
44#include <scsi/scsi_all.h>
45#include <scsi/scsi_disk.h>
46#include <scsi/scsiconf.h>
47
48#include <dev/ic/twereg.h>
49#include <dev/ic/twevar.h>
50
51#ifdef TWE_DEBUG
52#define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
53#define	TWE_D_CMD	0x0001
54#define	TWE_D_INTR	0x0002
55#define	TWE_D_MISC	0x0004
56#define	TWE_D_DMA	0x0008
57#define	TWE_D_AEN	0x0010
58int twe_debug = 0;
59#else
60#define	TWE_DPRINTF(m,a)	/* m, a */
61#endif
62
63struct cfdriver twe_cd = {
64	NULL, "twe", DV_DULL
65};
66
67int	twe_scsi_cmd(struct scsi_xfer *);
68
69struct scsi_adapter twe_switch = {
70	twe_scsi_cmd, tweminphys, 0, 0,
71};
72
73struct scsi_device twe_dev = {
74	NULL, NULL, NULL, NULL
75};
76
77static __inline struct twe_ccb *twe_get_ccb(struct twe_softc *sc);
78static __inline void twe_put_ccb(struct twe_ccb *ccb);
79void twe_dispose(struct twe_softc *sc);
80int  twe_cmd(struct twe_ccb *ccb, int flags, int wait);
81int  twe_start(struct twe_ccb *ccb, int wait);
82int  twe_complete(struct twe_ccb *ccb);
83int  twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
84void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size);
85void twe_thread_create(void *v);
86void twe_thread(void *v);
87
88
89static __inline struct twe_ccb *
90twe_get_ccb(sc)
91	struct twe_softc *sc;
92{
93	struct twe_ccb *ccb;
94
95	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
96	if (ccb)
97		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
98	return ccb;
99}
100
101static __inline void
102twe_put_ccb(ccb)
103	struct twe_ccb *ccb;
104{
105	struct twe_softc *sc = ccb->ccb_sc;
106
107	ccb->ccb_state = TWE_CCB_FREE;
108	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
109}
110
111void
112twe_dispose(sc)
113	struct twe_softc *sc;
114{
115	register struct twe_ccb *ccb;
116	if (sc->sc_cmdmap != NULL) {
117		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
118		/* traverse the ccbs and destroy the maps */
119		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
120			if (ccb->ccb_dmamap)
121				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
122	}
123	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
124	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
125	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
126}
127
128int
129twe_attach(sc)
130	struct twe_softc *sc;
131{
132	/* this includes a buffer for drive config req, and a capacity req */
133	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
134	struct twe_param *pb = (void *)
135	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
136	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
137	struct twe_ccb	*ccb;
138	struct twe_cmd	*cmd;
139	u_int32_t	status;
140	int		error, i, retry, nunits, nseg;
141	const char	*errstr;
142	twe_lock_t	lock;
143	paddr_t		pa;
144
145	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
146	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
147	if (error) {
148		printf(": cannot allocate commands (%d)\n", error);
149		return (1);
150	}
151
152	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
153	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
154	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
155	if (error) {
156		printf(": cannot map commands (%d)\n", error);
157		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
158		return (1);
159	}
160
161	error = bus_dmamap_create(sc->dmat,
162	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
163	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
164	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
165	if (error) {
166		printf(": cannot create ccb cmd dmamap (%d)\n", error);
167		twe_dispose(sc);
168		return (1);
169	}
170	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
171	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
172	if (error) {
173		printf(": cannot load command dma map (%d)\n", error);
174		twe_dispose(sc);
175		return (1);
176	}
177
178	TAILQ_INIT(&sc->sc_ccb2q);
179	TAILQ_INIT(&sc->sc_ccbq);
180	TAILQ_INIT(&sc->sc_free_ccb);
181	TAILQ_INIT(&sc->sc_done_ccb);
182
183	lockinit(&sc->sc_lock, PWAIT, "twelk", 0, 0);
184
185	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
186	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);;
187	for (cmd = sc->sc_cmds + sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
188	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
189
190		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
191		ccb = &sc->sc_ccbs[cmd->cmd_index];
192		error = bus_dmamap_create(sc->dmat,
193		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
194		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
195		if (error) {
196			printf(": cannot create ccb dmamap (%d)\n", error);
197			twe_dispose(sc);
198			return (1);
199		}
200		ccb->ccb_sc = sc;
201		ccb->ccb_cmd = cmd;
202		ccb->ccb_cmdpa = pa;
203		ccb->ccb_state = TWE_CCB_FREE;
204		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
205	}
206
207	for (errstr = NULL, retry = 3; retry--; ) {
208		int		veseen_srst;
209		u_int16_t	aen;
210
211		if (errstr)
212			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
213
214		for (i = 350000; i--; DELAY(100)) {
215			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
216			if (status & TWE_STAT_CPURDY)
217				break;
218		}
219
220		if (!(status & TWE_STAT_CPURDY)) {
221			errstr = ": card CPU is not ready\n";
222			continue;
223		}
224
225		/* soft reset, disable ints */
226		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
227		    TWE_CTRL_SRST |
228		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
229		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
230		    TWE_CTRL_MINT);
231
232		for (i = 350000; i--; DELAY(100)) {
233			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
234			if (status & TWE_STAT_ATTNI)
235				break;
236		}
237
238		if (!(status & TWE_STAT_ATTNI)) {
239			errstr = ": cannot get card's attention\n";
240			continue;
241		}
242
243		/* drain aen queue */
244		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
245
246			if ((ccb = twe_get_ccb(sc)) == NULL) {
247				errstr = ": out of ccbs\n";
248				continue;
249			}
250
251			ccb->ccb_xs = NULL;
252			ccb->ccb_data = pb;
253			ccb->ccb_length = TWE_SECTOR_SIZE;
254			ccb->ccb_state = TWE_CCB_READY;
255			cmd = ccb->ccb_cmd;
256			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
257			cmd->cmd_op = TWE_CMD_GPARAM;
258			cmd->cmd_param.count = 1;
259
260			pb->table_id = TWE_PARAM_AEN;
261			pb->param_id = 2;
262			pb->param_size = 2;
263
264			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
265				errstr = ": error draining attention queue\n";
266				break;
267			}
268			aen = *(u_int16_t *)pb->data;
269			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
270			if (aen == TWE_AEN_SRST)
271				veseen_srst++;
272		}
273
274		if (!veseen_srst) {
275			errstr = ": we don't get it\n";
276			continue;
277		}
278
279		if (status & TWE_STAT_CPUERR) {
280			errstr = ": card CPU error detected\n";
281			continue;
282		}
283
284		if (status & TWE_STAT_PCIPAR) {
285			errstr = ": PCI parity error detected\n";
286			continue;
287		}
288
289		if (status & TWE_STAT_QUEUEE ) {
290			errstr = ": queuing error detected\n";
291			continue;
292		}
293
294		if (status & TWE_STAT_PCIABR) {
295			errstr = ": PCI abort\n";
296			continue;
297		}
298
299		while (!(status & TWE_STAT_RQE)) {
300			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
301			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
302		}
303
304		break;
305	}
306
307	if (retry < 0) {
308		printf(errstr);
309		twe_dispose(sc);
310		return 1;
311	}
312
313	if ((ccb = twe_get_ccb(sc)) == NULL) {
314		printf(": out of ccbs\n");
315		twe_dispose(sc);
316		return 1;
317	}
318
319	ccb->ccb_xs = NULL;
320	ccb->ccb_data = pb;
321	ccb->ccb_length = TWE_SECTOR_SIZE;
322	ccb->ccb_state = TWE_CCB_READY;
323	cmd = ccb->ccb_cmd;
324	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
325	cmd->cmd_op = TWE_CMD_GPARAM;
326	cmd->cmd_param.count = 1;
327
328	pb->table_id = TWE_PARAM_UC;
329	pb->param_id = TWE_PARAM_UC;
330	pb->param_size = TWE_MAX_UNITS;
331	if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
332		printf(": failed to fetch unit parameters\n");
333		twe_dispose(sc);
334		return 1;
335	}
336
337	/* we are assuming last read status was good */
338	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
339
340	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
341		if (pb->data[i] == 0)
342			continue;
343
344		if ((ccb = twe_get_ccb(sc)) == NULL) {
345			printf(": out of ccbs\n");
346			twe_dispose(sc);
347			return 1;
348		}
349
350		ccb->ccb_xs = NULL;
351		ccb->ccb_data = cap;
352		ccb->ccb_length = TWE_SECTOR_SIZE;
353		ccb->ccb_state = TWE_CCB_READY;
354		cmd = ccb->ccb_cmd;
355		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
356		cmd->cmd_op = TWE_CMD_GPARAM;
357		cmd->cmd_param.count = 1;
358
359		cap->table_id = TWE_PARAM_UI + i;
360		cap->param_id = 4;
361		cap->param_size = 4;	/* 4 bytes */
362		lock = TWE_LOCK(sc);
363		if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
364			TWE_UNLOCK(sc, lock);
365			printf("%s: error fetching capacity for unit %d\n",
366			    sc->sc_dev.dv_xname, i);
367			continue;
368		}
369		TWE_UNLOCK(sc, lock);
370
371		nunits++;
372		sc->sc_hdr[i].hd_present = 1;
373		sc->sc_hdr[i].hd_devtype = 0;
374		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
375		/* this is evil. they never learn */
376		if (sc->sc_hdr[i].hd_size > 0x200000) {
377			sc->sc_hdr[i].hd_secs = 63;
378			sc->sc_hdr[i].hd_heads = 255;
379		} else {
380			sc->sc_hdr[i].hd_secs = 32;
381			sc->sc_hdr[i].hd_heads = 64;
382		}
383		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d secs=%d heads=%d\n",
384		    i, sc->sc_hdr[i].hd_size, sc->sc_hdr[i].hd_secs,
385		    sc->sc_hdr[i].hd_heads));
386	}
387
388	if (!nunits)
389		nunits++;
390
391	/* TODO: fetch & print cache params? */
392
393	sc->sc_link.adapter_softc = sc;
394	sc->sc_link.adapter = &twe_switch;
395	sc->sc_link.adapter_target = TWE_MAX_UNITS;
396	sc->sc_link.device = &twe_dev;
397	sc->sc_link.openings = TWE_MAXCMDS / nunits;
398	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
399
400	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
401
402	kthread_create_deferred(twe_thread_create, sc);
403
404	return (0);
405}
406
407void
408twe_thread_create(void *v)
409{
410	struct twe_softc *sc = v;
411
412	if (kthread_create(twe_thread, sc, &sc->sc_thread,
413	    "%s", sc->sc_dev.dv_xname)) {
414		/* TODO disable twe */
415		printf("%s: failed to create kernel thread, disabled\n",
416		    sc->sc_dev.dv_xname);
417		return;
418	}
419
420	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
421	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
422	/*
423	 * ack all before enable, cannot be done in one
424	 * operation as it seems clear is not processed
425	 * if enable is specified.
426	 */
427	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
428	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
429	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
430	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
431	/* enable interrupts */
432	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
433	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
434	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
435}
436
437void
438twe_thread(v)
439	void *v;
440{
441	struct twe_softc *sc = v;
442	struct twe_ccb *ccb;
443	twe_lock_t lock;
444	u_int32_t status;
445	int err;
446
447	splbio();
448	for (;;) {
449		lock = TWE_LOCK(sc);
450
451		while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
452			ccb = TAILQ_FIRST(&sc->sc_done_ccb);
453			TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
454			if ((err = twe_done(sc, ccb)))
455				printf("%s: done failed (%d)\n",
456				    sc->sc_dev.dv_xname, err);
457		}
458
459		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
460		TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
461		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
462		while (!(status & TWE_STAT_CQF) &&
463		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
464
465			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
466			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
467
468			ccb->ccb_state = TWE_CCB_QUEUED;
469			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
470			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
471			    ccb->ccb_cmdpa);
472
473			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
474			TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
475			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
476		}
477
478		if (!TAILQ_EMPTY(&sc->sc_ccb2q))
479			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
480			    TWE_CTRL_ECMDI);
481
482		TWE_UNLOCK(sc, lock);
483		sc->sc_thread_on = 1;
484		tsleep(sc, PWAIT, "twespank", 0);
485	}
486}
487
488int
489twe_cmd(ccb, flags, wait)
490	struct twe_ccb *ccb;
491	int flags, wait;
492{
493	struct twe_softc *sc = ccb->ccb_sc;
494	bus_dmamap_t dmap;
495	struct twe_cmd *cmd;
496	struct twe_segs *sgp;
497	int error, i;
498
499	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
500		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
501		ccb->ccb_realdata = ccb->ccb_data;
502
503		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
504		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
505		    BUS_DMA_NOWAIT);
506		if (error) {
507			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
508			twe_put_ccb(ccb);
509			return (ENOMEM);
510		}
511
512		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
513		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
514		if (error) {
515			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
516			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
517			twe_put_ccb(ccb);
518			return (ENOMEM);
519		}
520		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
521	} else
522		ccb->ccb_realdata = NULL;
523
524	dmap = ccb->ccb_dmamap;
525	cmd = ccb->ccb_cmd;
526	cmd->cmd_status = 0;
527
528	if (ccb->ccb_data) {
529		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
530		    ccb->ccb_length, NULL, flags);
531		if (error) {
532			if (error == EFBIG)
533				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
534			else
535				printf("error %d loading dma map\n", error);
536
537			if (ccb->ccb_realdata) {
538				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
539				    ccb->ccb_length);
540				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
541				    ccb->ccb_2nseg);
542			}
543			twe_put_ccb(ccb);
544			return error;
545		}
546		/* load addresses into command */
547		switch (cmd->cmd_op) {
548		case TWE_CMD_GPARAM:
549		case TWE_CMD_SPARAM:
550			sgp = cmd->cmd_param.segs;
551			break;
552		case TWE_CMD_READ:
553		case TWE_CMD_WRITE:
554			sgp = cmd->cmd_io.segs;
555			break;
556		default:
557			/* no data transfer */
558			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
559			    cmd->cmd_op));
560			sgp = NULL;
561			break;
562		}
563		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
564		if (sgp) {
565			/*
566			 * we know that size is in the upper byte,
567			 * and we do not worry about overflow
568			 */
569			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
570			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
571			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
572				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
573				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
574				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
575				    dmap->dm_segs[i].ds_addr,
576				    dmap->dm_segs[i].ds_len));
577			}
578		}
579		TWE_DPRINTF(TWE_D_DMA, ("> "));
580		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
581		    BUS_DMASYNC_PREWRITE);
582	}
583	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
584	    BUS_DMASYNC_PREWRITE);
585
586	if ((error = twe_start(ccb, wait))) {
587		bus_dmamap_unload(sc->dmat, dmap);
588		if (ccb->ccb_realdata) {
589			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
590			    ccb->ccb_length);
591			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
592			    ccb->ccb_2nseg);
593		}
594		twe_put_ccb(ccb);
595		return (error);
596	}
597
598	return wait? twe_complete(ccb) : 0;
599}
600
601int
602twe_start(ccb, wait)
603	struct twe_ccb *ccb;
604	int wait;
605{
606	struct twe_softc*sc = ccb->ccb_sc;
607	struct twe_cmd	*cmd = ccb->ccb_cmd;
608	u_int32_t	status;
609	int i;
610
611	cmd->cmd_op = htole16(cmd->cmd_op);
612
613	if (!wait) {
614
615		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
616		ccb->ccb_state = TWE_CCB_PREQUEUED;
617		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
618		wakeup(sc);
619		return 0;
620	}
621
622	for (i = 1000; i--; DELAY(10)) {
623
624		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
625		if (!(status & TWE_STAT_CQF))
626			break;
627		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
628		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
629	}
630
631	if (!(status & TWE_STAT_CQF)) {
632		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
633		    ccb->ccb_cmdpa);
634
635		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
636		ccb->ccb_state = TWE_CCB_QUEUED;
637		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
638		return 0;
639
640	} else {
641
642		printf("%s: twe_start(%d) timed out\n",
643		    sc->sc_dev.dv_xname, cmd->cmd_index);
644
645		return 1;
646	}
647}
648
649int
650twe_complete(ccb)
651	struct twe_ccb *ccb;
652{
653	struct twe_softc *sc = ccb->ccb_sc;
654	struct scsi_xfer *xs = ccb->ccb_xs;
655	int i;
656
657	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
658		u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
659
660		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
661		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
662
663		while (!(status & TWE_STAT_RQE)) {
664			struct twe_ccb *ccb1;
665			u_int32_t ready;
666
667			ready = bus_space_read_4(sc->iot, sc->ioh,
668			    TWE_READYQUEUE);
669
670			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
671
672			ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
673			TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
674			ccb1->ccb_state = TWE_CCB_DONE;
675			if (!twe_done(sc, ccb1) && ccb1 == ccb) {
676				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
677				return 0;
678			}
679
680			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
681			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
682			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
683		}
684	}
685
686	return 1;
687}
688
689int
690twe_done(sc, ccb)
691	struct twe_softc *sc;
692	struct twe_ccb *ccb;
693{
694	struct twe_cmd *cmd = ccb->ccb_cmd;
695	struct scsi_xfer *xs = ccb->ccb_xs;
696	bus_dmamap_t	dmap;
697	twe_lock_t	lock;
698
699	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
700
701	if (ccb->ccb_state != TWE_CCB_DONE) {
702		printf("%s: undone ccb %d ready\n",
703		     sc->sc_dev.dv_xname, cmd->cmd_index);
704		return 1;
705	}
706
707	dmap = ccb->ccb_dmamap;
708	if (xs) {
709		if (xs->cmd->opcode != PREVENT_ALLOW &&
710		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
711			bus_dmamap_sync(sc->dmat, dmap, 0,
712			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
713			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
714			bus_dmamap_unload(sc->dmat, dmap);
715		}
716	} else {
717		switch (letoh16(cmd->cmd_op)) {
718		case TWE_CMD_GPARAM:
719		case TWE_CMD_READ:
720			bus_dmamap_sync(sc->dmat, dmap, 0,
721			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
722			bus_dmamap_unload(sc->dmat, dmap);
723			break;
724		case TWE_CMD_SPARAM:
725		case TWE_CMD_WRITE:
726			bus_dmamap_sync(sc->dmat, dmap, 0,
727			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
728			bus_dmamap_unload(sc->dmat, dmap);
729			break;
730		default:
731			/* no data */
732			break;
733		}
734	}
735
736	if (ccb->ccb_realdata) {
737		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
738		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
739		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
740	}
741
742	lock = TWE_LOCK(sc);
743	twe_put_ccb(ccb);
744
745	if (xs) {
746		xs->resid = 0;
747		xs->flags |= ITSDONE;
748		scsi_done(xs);
749	}
750	TWE_UNLOCK(sc, lock);
751
752	return 0;
753}
754
755void
756tweminphys(bp)
757	struct buf *bp;
758{
759	if (bp->b_bcount > TWE_MAXFER)
760		bp->b_bcount = TWE_MAXFER;
761	minphys(bp);
762}
763
764void
765twe_copy_internal_data(xs, v, size)
766	struct scsi_xfer *xs;
767	void *v;
768	size_t size;
769{
770	size_t copy_cnt;
771
772	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
773
774	if (!xs->datalen)
775		printf("uio move is not yet supported\n");
776	else {
777		copy_cnt = MIN(size, xs->datalen);
778		bcopy(v, xs->data, copy_cnt);
779	}
780}
781
782int
783twe_scsi_cmd(xs)
784	struct scsi_xfer *xs;
785{
786	struct scsi_link *link = xs->sc_link;
787	struct twe_softc *sc = link->adapter_softc;
788	struct twe_ccb *ccb;
789	struct twe_cmd *cmd;
790	struct scsi_inquiry_data inq;
791	struct scsi_sense_data sd;
792	struct {
793		struct scsi_mode_header hd;
794		struct scsi_blk_desc bd;
795		union scsi_disk_pages dp;
796	} mpd;
797	struct scsi_read_cap_data rcd;
798	u_int8_t target = link->target;
799	u_int32_t blockno, blockcnt;
800	struct scsi_rw *rw;
801	struct scsi_rw_big *rwb;
802	int error, op, flags, wait;
803	twe_lock_t lock;
804
805
806	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
807	    link->lun != 0) {
808		xs->error = XS_DRIVER_STUFFUP;
809		return (COMPLETE);
810	}
811
812	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
813
814	xs->error = XS_NOERROR;
815
816	switch (xs->cmd->opcode) {
817	case TEST_UNIT_READY:
818	case START_STOP:
819#if 0
820	case VERIFY:
821#endif
822		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
823		    target));
824		break;
825
826	case REQUEST_SENSE:
827		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
828		bzero(&sd, sizeof sd);
829		sd.error_code = 0x70;
830		sd.segment = 0;
831		sd.flags = SKEY_NO_SENSE;
832		*(u_int32_t*)sd.info = htole32(0);
833		sd.extra_len = 0;
834		twe_copy_internal_data(xs, &sd, sizeof sd);
835		break;
836
837	case INQUIRY:
838		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
839		    sc->sc_hdr[target].hd_devtype));
840		bzero(&inq, sizeof inq);
841		inq.device =
842		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
843		inq.dev_qual2 =
844		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
845		inq.version = 2;
846		inq.response_format = 2;
847		inq.additional_length = 32;
848		strlcpy(inq.vendor, "3WARE  ", sizeof inq.vendor);
849		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
850		    target);
851		strlcpy(inq.revision, "   ", sizeof inq.revision);
852		twe_copy_internal_data(xs, &inq, sizeof inq);
853		break;
854
855	case MODE_SENSE:
856		TWE_DPRINTF(TWE_D_CMD, ("MODE SENSE tgt %d ", target));
857
858		bzero(&mpd, sizeof mpd);
859		switch (((struct scsi_mode_sense *)xs->cmd)->page) {
860		case 4:
861			/* scsi_disk.h says this should be 0x16 */
862			mpd.dp.rigid_geometry.pg_length = 0x16;
863			mpd.hd.data_length = sizeof mpd.hd + sizeof mpd.bd +
864			    mpd.dp.rigid_geometry.pg_length;
865			mpd.hd.blk_desc_len = sizeof mpd.bd;
866
867			/* XXX */
868			mpd.hd.dev_spec =
869			    (sc->sc_hdr[target].hd_devtype & 2) ? 0x80 : 0;
870			_lto3b(TWE_SECTOR_SIZE, mpd.bd.blklen);
871			mpd.dp.rigid_geometry.pg_code = 4;
872			_lto3b(sc->sc_hdr[target].hd_size /
873			    sc->sc_hdr[target].hd_heads /
874			    sc->sc_hdr[target].hd_secs,
875			    mpd.dp.rigid_geometry.ncyl);
876			mpd.dp.rigid_geometry.nheads =
877			    sc->sc_hdr[target].hd_heads;
878			twe_copy_internal_data(xs, (u_int8_t *)&mpd,
879			    sizeof mpd);
880			break;
881
882		default:
883			printf("%s: mode sense page %d not simulated\n",
884			    sc->sc_dev.dv_xname,
885			    ((struct scsi_mode_sense *)xs->cmd)->page);
886			xs->error = XS_DRIVER_STUFFUP;
887			return (TRY_AGAIN_LATER);
888		}
889		break;
890
891	case READ_CAPACITY:
892		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
893		bzero(&rcd, sizeof rcd);
894		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
895		_lto4b(TWE_SECTOR_SIZE, rcd.length);
896		twe_copy_internal_data(xs, &rcd, sizeof rcd);
897		break;
898
899	case PREVENT_ALLOW:
900		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
901		return (COMPLETE);
902
903	case READ_COMMAND:
904	case READ_BIG:
905	case WRITE_COMMAND:
906	case WRITE_BIG:
907	case SYNCHRONIZE_CACHE:
908		lock = TWE_LOCK(sc);
909
910		flags = 0;
911		if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
912			/* A read or write operation. */
913			if (xs->cmdlen == 6) {
914				rw = (struct scsi_rw *)xs->cmd;
915				blockno = _3btol(rw->addr) &
916				    (SRW_TOPADDR << 16 | 0xffff);
917				blockcnt = rw->length ? rw->length : 0x100;
918			} else {
919				rwb = (struct scsi_rw_big *)xs->cmd;
920				blockno = _4btol(rwb->addr);
921				blockcnt = _2btol(rwb->length);
922				/* reflect DPO & FUA flags */
923				if (xs->cmd->opcode == WRITE_BIG &&
924				    rwb->byte2 & 0x18)
925					flags = TWE_FLAGS_CACHEDISABLE;
926			}
927			if (blockno >= sc->sc_hdr[target].hd_size ||
928			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
929				printf("%s: out of bounds %u-%u >= %u\n",
930				    sc->sc_dev.dv_xname, blockno, blockcnt,
931				    sc->sc_hdr[target].hd_size);
932				xs->error = XS_DRIVER_STUFFUP;
933				scsi_done(xs);
934				TWE_UNLOCK(sc, lock);
935				return (COMPLETE);
936			}
937		}
938
939		switch (xs->cmd->opcode) {
940		case READ_COMMAND:	op = TWE_CMD_READ;	break;
941		case READ_BIG:		op = TWE_CMD_READ;	break;
942		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
943		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
944		default:		op = TWE_CMD_NOP;	break;
945		}
946
947		if ((ccb = twe_get_ccb(sc)) == NULL) {
948			xs->error = XS_DRIVER_STUFFUP;
949			scsi_done(xs);
950			TWE_UNLOCK(sc, lock);
951			return (COMPLETE);
952		}
953
954		ccb->ccb_xs = xs;
955		ccb->ccb_data = xs->data;
956		ccb->ccb_length = xs->datalen;
957		ccb->ccb_state = TWE_CCB_READY;
958		cmd = ccb->ccb_cmd;
959		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
960		cmd->cmd_op = op;
961		cmd->cmd_flags = flags;
962		cmd->cmd_io.count = htole16(blockcnt);
963		cmd->cmd_io.lba = htole32(blockno);
964		wait = xs->flags & SCSI_POLL;
965		if (!sc->sc_thread_on)
966			wait |= SCSI_POLL;
967
968		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
969		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
970
971			TWE_UNLOCK(sc, lock);
972			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
973			if (xs->flags & SCSI_POLL) {
974				xs->error = XS_TIMEOUT;
975				return (TRY_AGAIN_LATER);
976			} else {
977				xs->error = XS_DRIVER_STUFFUP;
978				scsi_done(xs);
979				return (COMPLETE);
980			}
981		}
982
983		TWE_UNLOCK(sc, lock);
984
985		if (wait & SCSI_POLL)
986			return (COMPLETE);
987		else
988			return (SUCCESSFULLY_QUEUED);
989
990	default:
991		TWE_DPRINTF(TWE_D_CMD, ("unknown opc %d ", xs->cmd->opcode));
992		xs->error = XS_DRIVER_STUFFUP;
993	}
994
995	return (COMPLETE);
996}
997
998int
999twe_intr(v)
1000	void *v;
1001{
1002	struct twe_softc *sc = v;
1003	struct twe_ccb	*ccb;
1004	struct twe_cmd	*cmd;
1005	u_int32_t	status;
1006	twe_lock_t	lock;
1007	int		rv = 0;
1008
1009	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
1010	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
1011	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
1012#if 0
1013	if (status & TWE_STAT_HOSTI) {
1014
1015		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1016		    TWE_CTRL_CHOSTI);
1017	}
1018#endif
1019
1020	if (status & TWE_STAT_RDYI) {
1021
1022		while (!(status & TWE_STAT_RQE)) {
1023
1024			u_int32_t ready;
1025
1026			/*
1027			 * it seems that reading ready queue
1028			 * we get all the status bits in each ready word.
1029			 * i wonder if it's legal to use those for
1030			 * status and avoid extra read below
1031			 */
1032			ready = bus_space_read_4(sc->iot, sc->ioh,
1033			    TWE_READYQUEUE);
1034
1035			ccb = &sc->sc_ccbs[TWE_READYID(ready)];
1036			TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
1037			ccb->ccb_state = TWE_CCB_DONE;
1038			TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
1039			rv++;
1040
1041			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
1042			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
1043			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
1044		}
1045	}
1046
1047	if (status & TWE_STAT_CMDI) {
1048		rv++;
1049		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1050		    TWE_CTRL_MCMDI);
1051	}
1052
1053	if (rv)
1054		wakeup(sc);
1055
1056	if (status & TWE_STAT_ATTNI) {
1057		u_int16_t aen;
1058
1059		/*
1060		 * we know no attentions of interest right now.
1061		 * one of those would be mirror degradation i think.
1062		 * or, what else exists in there?
1063		 * maybe 3ware can answer that?
1064		 */
1065		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1066		    TWE_CTRL_CATTNI);
1067
1068		lock = TWE_LOCK(sc);
1069		for (aen = -1; aen != TWE_AEN_QEMPTY; ) {
1070			u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1071			struct twe_param *pb = (void *) (((u_long)param_buf +
1072			    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1073
1074			if ((ccb = twe_get_ccb(sc)) == NULL)
1075				break;
1076
1077			ccb->ccb_xs = NULL;
1078			ccb->ccb_data = pb;
1079			ccb->ccb_length = TWE_SECTOR_SIZE;
1080			ccb->ccb_state = TWE_CCB_READY;
1081			cmd = ccb->ccb_cmd;
1082			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1083			cmd->cmd_op = TWE_CMD_GPARAM;
1084			cmd->cmd_flags = 0;
1085			cmd->cmd_param.count = 1;
1086
1087			pb->table_id = TWE_PARAM_AEN;
1088			pb->param_id = 2;
1089			pb->param_size = 2;
1090			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
1091				printf(": error draining attention queue\n");
1092				break;
1093			}
1094			aen = *(u_int16_t *)pb->data;
1095			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
1096		}
1097		TWE_UNLOCK(sc, lock);
1098	}
1099
1100	return rv;
1101}
1102