1175702Smarius/*-
2175702Smarius *       Copyright (c) 1997 by Simon Shapiro
3175702Smarius *       All Rights Reserved
4175702Smarius *
5175702Smarius * Redistribution and use in source and binary forms, with or without
6175702Smarius * modification, are permitted provided that the following conditions
7175702Smarius * are met:
8175702Smarius * 1. Redistributions of source code must retain the above copyright
9175702Smarius *    notice, this list of conditions, and the following disclaimer,
10175702Smarius *    without modification, immediately at the beginning of the file.
11175702Smarius * 2. Redistributions in binary form must reproduce the above copyright
12175702Smarius *    notice, this list of conditions and the following disclaimer in the
13175702Smarius *    documentation and/or other materials provided with the distribution.
14175702Smarius * 3. The name of the author may not be used to endorse or promote products
15175702Smarius *    derived from this software without specific prior written permission.
16175702Smarius *
17175702Smarius * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18175702Smarius * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19175702Smarius * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20175702Smarius * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21175702Smarius * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22175702Smarius * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23175702Smarius * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24175702Smarius * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25175702Smarius * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26175702Smarius * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27175702Smarius * SUCH DAMAGE.
28175702Smarius */
29175702Smarius
30175702Smarius#include <sys/cdefs.h>
31175702Smarius__FBSDID("$FreeBSD$");
32175702Smarius
33175702Smarius/*
34175702Smarius * dpt_scsi.c: SCSI dependant code for the DPT driver
35175702Smarius *
36175702Smarius * credits:	Assisted by Mike Neuffer in the early low level DPT code
37175702Smarius *		Thanx to Mark Salyzyn of DPT for his assistance.
38175702Smarius *		Special thanx to Justin Gibbs for invaluable help in
39175702Smarius *		making this driver look and work like a FreeBSD component.
40175702Smarius *		Last but not least, many thanx to UCB and the FreeBSD
41175702Smarius *		team for creating and maintaining such a wonderful O/S.
42175702Smarius *
43175702Smarius * TODO:     * Add ISA probe code.
44175702Smarius *	     * Add driver-level RAID-0. This will allow interoperability with
45175702Smarius *	       NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
46175702Smarius *	       arrays that span controllers (Wow!).
47175702Smarius */
48175702Smarius
49175702Smarius#define _DPT_C_
50175702Smarius
51175702Smarius#include "opt_dpt.h"
52175702Smarius#include "opt_eisa.h"
53175702Smarius
54175702Smarius#include <sys/param.h>
55175702Smarius#include <sys/systm.h>
56175702Smarius#include <sys/conf.h>
57175702Smarius#include <sys/eventhandler.h>
58175702Smarius#include <sys/malloc.h>
59175702Smarius#include <sys/kernel.h>
60175702Smarius
61231914Smarius#include <sys/bus.h>
62231914Smarius
63231914Smarius#include <machine/bus.h>
64175702Smarius
65175702Smarius#include <machine/resource.h>
66175702Smarius#include <sys/rman.h>
67175702Smarius
68175702Smarius
69175702Smarius#include <cam/cam.h>
70175702Smarius#include <cam/cam_ccb.h>
71175702Smarius#include <cam/cam_sim.h>
72175702Smarius#include <cam/cam_xpt_sim.h>
73175702Smarius#include <cam/cam_debug.h>
74175702Smarius#include <cam/scsi/scsi_all.h>
75175702Smarius#include <cam/scsi/scsi_message.h>
76175702Smarius
77175702Smarius#include <vm/vm.h>
78175702Smarius#include <vm/pmap.h>
79175702Smarius
80175702Smarius#include <dev/dpt/dpt.h>
81175702Smarius
82175702Smarius/* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */
83175702Smariusdevclass_t	dpt_devclass;
84175702Smarius
85175702Smarius#define microtime_now dpt_time_now()
86175702Smarius
87175702Smarius#define dpt_inl(dpt, port)				\
88175702Smarius	bus_read_4((dpt)->io_res, (dpt)->io_offset + port)
89175702Smarius#define dpt_inb(dpt, port)				\
90175702Smarius	bus_read_1((dpt)->io_res, (dpt)->io_offset + port)
91175702Smarius#define dpt_outl(dpt, port, value)			\
92175702Smarius	bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value)
93175702Smarius#define dpt_outb(dpt, port, value)			\
94175702Smarius	bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value)
95175702Smarius
96175702Smarius/*
97227908Smarius * These will have to be setup by parameters passed at boot/load time. For
98175702Smarius * perfromance reasons, we make them constants for the time being.
99175702Smarius */
100175702Smarius#define	dpt_min_segs	DPT_MAX_SEGS
101175702Smarius#define	dpt_max_segs	DPT_MAX_SEGS
102175702Smarius
103175702Smarius/* Definitions for our use of the SIM private CCB area */
104175702Smarius#define ccb_dccb_ptr spriv_ptr0
105175702Smarius#define ccb_dpt_ptr spriv_ptr1
106175702Smarius
107175702Smarius/* ================= Private Inline Function declarations ===================*/
108175702Smariusstatic __inline int		dpt_just_reset(dpt_softc_t * dpt);
109175702Smariusstatic __inline int		dpt_raid_busy(dpt_softc_t * dpt);
110175702Smarius#ifdef DEV_EISA
111175702Smariusstatic __inline int		dpt_pio_wait (u_int32_t, u_int, u_int, u_int);
112175702Smarius#endif
113175702Smariusstatic __inline int		dpt_wait(dpt_softc_t *dpt, u_int bits,
114221407Smarius					 u_int state);
115221407Smariusstatic __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
116221407Smariusstatic __inline void		dptfreeccb(struct dpt_softc *dpt,
117221407Smarius					   struct dpt_ccb *dccb);
118231914Smariusstatic __inline bus_addr_t	dptccbvtop(struct dpt_softc *dpt,
119175702Smarius					   struct dpt_ccb *dccb);
120175702Smarius
121175702Smariusstatic __inline int		dpt_send_immediate(dpt_softc_t *dpt,
122221407Smarius						   eata_ccb_t *cmd_block,
123221407Smarius						   u_int32_t cmd_busaddr,
124221407Smarius						   u_int retries,
125221407Smarius						   u_int ifc, u_int code,
126221407Smarius						   u_int code2);
127221407Smarius
128175702Smarius/* ==================== Private Function declarations =======================*/
129175702Smariusstatic void		dptmapmem(void *arg, bus_dma_segment_t *segs,
130175702Smarius				  int nseg, int error);
131175702Smarius
132221407Smariusstatic struct sg_map_node*
133175702Smarius			dptallocsgmap(struct dpt_softc *dpt);
134175702Smarius
135175702Smariusstatic int		dptallocccbs(dpt_softc_t *dpt);
136175702Smarius
137175702Smariusstatic int		dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
138175702Smarius				     u_int32_t dccb_busaddr, u_int size,
139221407Smarius				     u_int page, u_int target, int extent);
140175702Smariusstatic void		dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
141175702Smarius					 u_int32_t dccb_busaddr,
142175702Smarius					 u_int8_t *buff);
143175702Smarius
144175702Smariusstatic void		dpt_poll(struct cam_sim *sim);
145175702Smariusstatic void		dpt_intr_locked(dpt_softc_t *dpt);
146175702Smarius
147175702Smariusstatic void		dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
148175702Smarius				      int nseg, int error);
149175702Smarius
150175702Smariusstatic void		dpt_action(struct cam_sim *sim, union ccb *ccb);
151175702Smarius
152175702Smariusstatic int		dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
153175702Smarius					      u_int32_t cmd_busaddr,
154175702Smarius					      u_int command, u_int retries,
155175702Smarius					      u_int ifc, u_int code,
156175702Smarius					      u_int code2);
157175702Smariusstatic void		dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
158175702Smarius					union ccb *ccb, u_int hba_stat,
159175702Smarius					u_int scsi_stat, u_int32_t resid);
160175702Smarius
161175702Smariusstatic void		dpttimeout(void *arg);
162175702Smariusstatic void		dptshutdown(void *arg, int howto);
163175702Smarius
164175702Smarius/* ================= Private Inline Function definitions ====================*/
165175702Smariusstatic __inline int
166175702Smariusdpt_just_reset(dpt_softc_t * dpt)
167175702Smarius{
168221407Smarius	if ((dpt_inb(dpt, 2) == 'D')
169175702Smarius	 && (dpt_inb(dpt, 3) == 'P')
170175702Smarius	 && (dpt_inb(dpt, 4) == 'T')
171175702Smarius	 && (dpt_inb(dpt, 5) == 'H'))
172175702Smarius		return (1);
173175702Smarius	else
174175702Smarius		return (0);
175175702Smarius}
176175702Smarius
177175702Smariusstatic __inline int
178175702Smariusdpt_raid_busy(dpt_softc_t * dpt)
179175702Smarius{
180175702Smarius	if ((dpt_inb(dpt, 0) == 'D')
181175702Smarius	 && (dpt_inb(dpt, 1) == 'P')
182175702Smarius	 && (dpt_inb(dpt, 2) == 'T'))
183175702Smarius		return (1);
184175702Smarius	else
185175702Smarius		return (0);
186175702Smarius}
187175702Smarius
188175702Smarius#ifdef DEV_EISA
189175702Smariusstatic __inline int
190175702Smariusdpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state)
191175702Smarius{
192175702Smarius	int   i;
193175702Smarius	u_int c;
194175702Smarius
195175702Smarius	for (i = 0; i < 20000; i++) {	/* wait 20ms for not busy */
196175702Smarius		c = inb(base + reg) & bits;
197175702Smarius		if (!(c == state))
198175702Smarius			return (0);
199175702Smarius		else
200175702Smarius			DELAY(50);
201175702Smarius	}
202175702Smarius	return (-1);
203175702Smarius}
204175702Smarius#endif
205175702Smarius
206175702Smariusstatic __inline int
207175702Smariusdpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
208175702Smarius{
209175702Smarius	int   i;
210175702Smarius	u_int c;
211175702Smarius
212175702Smarius	for (i = 0; i < 20000; i++) {	/* wait 20ms for not busy */
213175702Smarius		c = dpt_inb(dpt, HA_RSTATUS) & bits;
214175702Smarius		if (c == state)
215175702Smarius			return (0);
216175702Smarius		else
217175702Smarius			DELAY(50);
218175702Smarius	}
219175702Smarius	return (-1);
220175702Smarius}
221175702Smarius
222175702Smariusstatic __inline struct dpt_ccb*
223175702Smariusdptgetccb(struct dpt_softc *dpt)
224175702Smarius{
225175702Smarius	struct	dpt_ccb* dccb;
226175702Smarius
227175702Smarius	if (!dumping)
228175702Smarius		mtx_assert(&dpt->lock, MA_OWNED);
229175702Smarius	if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
230175702Smarius		SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
231175702Smarius		dpt->free_dccbs--;
232175702Smarius	} else if (dpt->total_dccbs < dpt->max_dccbs) {
233175702Smarius		dptallocccbs(dpt);
234175702Smarius		dccb = SLIST_FIRST(&dpt->free_dccb_list);
235175702Smarius		if (dccb == NULL)
236175702Smarius			device_printf(dpt->dev, "Can't malloc DCCB\n");
237175702Smarius		else {
238175702Smarius			SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
239175702Smarius			dpt->free_dccbs--;
240175702Smarius		}
241175702Smarius	}
242175702Smarius
243175702Smarius	return (dccb);
244175702Smarius}
245175702Smarius
246175702Smariusstatic __inline void
247175702Smariusdptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
248175702Smarius{
249175702Smarius
250175702Smarius	if (!dumping)
251175702Smarius		mtx_assert(&dpt->lock, MA_OWNED);
252175702Smarius	if ((dccb->state & DCCB_ACTIVE) != 0)
253175702Smarius		LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
254175702Smarius	if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
255175702Smarius		dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
256175702Smarius	else if (dpt->resource_shortage != 0
257175702Smarius	 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
258175702Smarius		dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
259175702Smarius		dpt->resource_shortage = FALSE;
260175702Smarius	}
261175702Smarius	dccb->state = DCCB_FREE;
262175702Smarius	SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
263175702Smarius	++dpt->free_dccbs;
264175702Smarius}
265175702Smarius
266175702Smariusstatic __inline bus_addr_t
267175702Smariusdptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
268175702Smarius{
269	return (dpt->dpt_ccb_busbase
270	      + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
271}
272
273static __inline struct dpt_ccb *
274dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr)
275{
276	return (dpt->dpt_dccbs
277	     +  ((struct dpt_ccb *)busaddr
278	       - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
279}
280
281/*
282 * Send a command for immediate execution by the DPT
283 * See above function for IMPORTANT notes.
284 */
285static __inline int
286dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
287		   u_int32_t cmd_busaddr, u_int retries,
288		   u_int ifc, u_int code, u_int code2)
289{
290	return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
291				      EATA_CMD_IMMEDIATE, retries, ifc,
292				      code, code2));
293}
294
295
296/* ===================== Private Function definitions =======================*/
297static void
298dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
299{
300	bus_addr_t *busaddrp;
301
302	busaddrp = (bus_addr_t *)arg;
303	*busaddrp = segs->ds_addr;
304}
305
306static struct sg_map_node *
307dptallocsgmap(struct dpt_softc *dpt)
308{
309	struct sg_map_node *sg_map;
310
311	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
312
313	if (sg_map == NULL)
314		return (NULL);
315
316	/* Allocate S/G space for the next batch of CCBS */
317	if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
318			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
319		free(sg_map, M_DEVBUF);
320		return (NULL);
321	}
322
323	(void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
324			      PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
325			      /*flags*/0);
326
327	SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
328
329	return (sg_map);
330}
331
332/*
333 * Allocate another chunk of CCB's. Return count of entries added.
334 */
335static int
336dptallocccbs(dpt_softc_t *dpt)
337{
338	struct dpt_ccb *next_ccb;
339	struct sg_map_node *sg_map;
340	bus_addr_t physaddr;
341	dpt_sg_t *segs;
342	int newcount;
343	int i;
344
345	if (!dumping)
346		mtx_assert(&dpt->lock, MA_OWNED);
347	next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
348
349	if (next_ccb == dpt->dpt_dccbs) {
350		/*
351		 * First time through.  Re-use the S/G
352		 * space we allocated for initialization
353		 * CCBS.
354		 */
355		sg_map = SLIST_FIRST(&dpt->sg_maps);
356	} else {
357		sg_map = dptallocsgmap(dpt);
358	}
359
360	if (sg_map == NULL)
361		return (0);
362
363	segs = sg_map->sg_vaddr;
364	physaddr = sg_map->sg_physaddr;
365
366	newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
367	for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
368		int error;
369
370		error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
371					  &next_ccb->dmamap);
372		if (error != 0)
373			break;
374		callout_init_mtx(&next_ccb->timer, &dpt->lock, 0);
375		next_ccb->sg_list = segs;
376		next_ccb->sg_busaddr = htonl(physaddr);
377		next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
378		next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
379		next_ccb->eata_ccb.cp_reqDMA =
380		    htonl(dptccbvtop(dpt, next_ccb)
381			+ offsetof(struct dpt_ccb, sense_data));
382		next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
383		next_ccb->state = DCCB_FREE;
384		next_ccb->tag = dpt->total_dccbs;
385		SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
386		segs += dpt->sgsize;
387		physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
388		dpt->dpt_ccb_busend += sizeof(*next_ccb);
389		next_ccb++;
390		dpt->total_dccbs++;
391	}
392	return (i);
393}
394
395#ifdef DEV_EISA
396dpt_conf_t *
397dpt_pio_get_conf (u_int32_t base)
398{
399	static dpt_conf_t *	conf;
400	u_int16_t *		p;
401	int			i;
402
403	/*
404	 * Allocate a dpt_conf_t
405	 */
406	if (!conf) {
407		conf = (dpt_conf_t *)malloc(sizeof(dpt_conf_t),
408						 M_DEVBUF, M_NOWAIT | M_ZERO);
409	}
410
411	/*
412	 * If we didn't get one then we probably won't ever get one.
413	 */
414	if (!conf) {
415		printf("dpt: unable to allocate dpt_conf_t\n");
416		return (NULL);
417	}
418
419	/*
420	 * Reset the controller.
421	 */
422	outb((base + HA_WCOMMAND), EATA_CMD_RESET);
423
424	/*
425	 * Wait for the controller to become ready.
426	 * For some reason there can be -no- delays after calling reset
427	 * before we wait on ready status.
428	 */
429	if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) {
430		printf("dpt: timeout waiting for controller to become ready\n");
431		return (NULL);
432	}
433
434	if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) {
435		printf("dpt: timetout waiting for adapter ready.\n");
436		return (NULL);
437	}
438
439	/*
440	 * Send the PIO_READ_CONFIG command.
441	 */
442	outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG);
443
444	/*
445	 * Read the data into the struct.
446	 */
447	p = (u_int16_t *)conf;
448	for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) {
449
450		if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) {
451			if (bootverbose)
452				printf("dpt: timeout in data read.\n");
453			return (NULL);
454		}
455
456		(*p) = inw(base + HA_RDATA);
457		p++;
458	}
459
460	if (inb(base + HA_RSTATUS) & HA_SERROR) {
461		if (bootverbose)
462			printf("dpt: error reading configuration data.\n");
463		return (NULL);
464	}
465
466#define BE_EATA_SIGNATURE	0x45415441
467#define LE_EATA_SIGNATURE	0x41544145
468
469	/*
470	 * Test to see if we have a valid card.
471	 */
472	if ((conf->signature == BE_EATA_SIGNATURE) ||
473	    (conf->signature == LE_EATA_SIGNATURE)) {
474
475		while (inb(base + HA_RSTATUS) & HA_SDRQ) {
476 			inw(base + HA_RDATA);
477		}
478
479		return (conf);
480	}
481	return (NULL);
482}
483#endif
484
485/*
486 * Read a configuration page into the supplied dpt_cont_t buffer.
487 */
488static int
489dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
490	     u_int size, u_int page, u_int target, int extent)
491{
492	eata_ccb_t *cp;
493
494	u_int8_t   status;
495
496	int	   ndx;
497	int	   result;
498
499	mtx_assert(&dpt->lock, MA_OWNED);
500	cp = &dccb->eata_ccb;
501	bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
502
503	cp->Interpret = 1;
504	cp->DataIn = 1;
505	cp->Auto_Req_Sen = 1;
506	cp->reqlen = sizeof(struct scsi_sense_data);
507
508	cp->cp_id = target;
509	cp->cp_LUN = 0;		/* In the EATA packet */
510	cp->cp_lun = 0;		/* In the SCSI command */
511
512	cp->cp_scsi_cmd = INQUIRY;
513	cp->cp_len = size;
514
515	cp->cp_extent = extent;
516
517	cp->cp_page = page;
518	cp->cp_channel = 0;	/* DNC, Interpret mode is set */
519	cp->cp_identify = 1;
520	cp->cp_datalen = htonl(size);
521
522	/*
523	 * This could be a simple for loop, but we suspected the compiler To
524	 * have optimized it a bit too much. Wait for the controller to
525	 * become ready
526	 */
527	while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
528	     && (status != (HA_SREADY | HA_SSC | HA_SERROR))
529	     && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
530	    || (dpt_wait(dpt, HA_SBUSY, 0))) {
531
532		/*
533		 * RAID Drives still Spinning up? (This should only occur if
534		 * the DPT controller is in a NON PC (PCI?) platform).
535		 */
536		if (dpt_raid_busy(dpt)) {
537			device_printf(dpt->dev,
538			    "WARNING: Get_conf() RSUS failed.\n");
539			return (0);
540		}
541	}
542
543	DptStat_Reset_BUSY(dpt->sp);
544
545	/*
546	 * XXXX We might want to do something more clever than aborting at
547	 * this point, like resetting (rebooting) the controller and trying
548	 * again.
549	 */
550	if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
551					    EATA_CMD_DMA_SEND_CP,
552					    10000, 0, 0, 0)) != 0) {
553		device_printf(dpt->dev,
554		       "WARNING: Get_conf() failed (%d) to send "
555		       "EATA_CMD_DMA_READ_CONFIG\n",
556		       result);
557		return (0);
558	}
559	/* Wait for two seconds for a response.  This can be slow  */
560	for (ndx = 0;
561	     (ndx < 20000)
562	     && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
563	     ndx++) {
564		DELAY(50);
565	}
566
567	/* Grab the status and clear interrupts */
568	status = dpt_inb(dpt, HA_RSTATUS);
569
570	/*
571	 * Check the status carefully.  Return only if the
572	 * command was successful.
573	 */
574	if (((status & HA_SERROR) == 0)
575	 && (dpt->sp->hba_stat == 0)
576	 && (dpt->sp->scsi_stat == 0)
577	 && (dpt->sp->residue_len == 0))
578		return (0);
579
580	if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
581		return (0);
582
583	return (1);
584}
585
586/* Detect Cache parameters and size */
587static void
588dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
589		 u_int8_t *buff)
590{
591	eata_ccb_t *cp;
592	u_int8_t   *param;
593	int	    bytes;
594	int	    result;
595	int	    ndx;
596	u_int8_t    status;
597
598	mtx_assert(&dpt->lock, MA_OWNED);
599
600	/*
601	 * Default setting, for best perfromance..
602	 * This is what virtually all cards default to..
603	 */
604	dpt->cache_type = DPT_CACHE_WRITEBACK;
605	dpt->cache_size = 0;
606
607	cp = &dccb->eata_ccb;
608	bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
609	bzero(buff, 512);
610
611	/* Setup the command structure */
612	cp->Interpret = 1;
613	cp->DataIn = 1;
614	cp->Auto_Req_Sen = 1;
615	cp->reqlen = sizeof(struct scsi_sense_data);
616
617	cp->cp_id = 0;		/* who cares?  The HBA will interpret.. */
618	cp->cp_LUN = 0;		/* In the EATA packet */
619	cp->cp_lun = 0;		/* In the SCSI command */
620	cp->cp_channel = 0;
621
622	cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
623	cp->cp_len = 56;
624
625	cp->cp_extent = 0;
626	cp->cp_page = 0;
627	cp->cp_identify = 1;
628	cp->cp_dispri = 1;
629
630	/*
631	 * Build the EATA Command Packet structure
632	 * for a Log Sense Command.
633	 */
634	cp->cp_cdb[0] = 0x4d;
635	cp->cp_cdb[1] = 0x0;
636	cp->cp_cdb[2] = 0x40 | 0x33;
637	cp->cp_cdb[7] = 1;
638
639	cp->cp_datalen = htonl(512);
640
641	result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
642				       EATA_CMD_DMA_SEND_CP,
643				       10000, 0, 0, 0);
644	if (result != 0) {
645		device_printf(dpt->dev,
646		       "WARNING: detect_cache() failed (%d) to send "
647		       "EATA_CMD_DMA_SEND_CP\n", result);
648		return;
649	}
650	/* Wait for two seconds for a response.  This can be slow... */
651	for (ndx = 0;
652	     (ndx < 20000) &&
653	     !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
654	     ndx++) {
655		DELAY(50);
656	}
657
658	/* Grab the status and clear interrupts */
659	status = dpt_inb(dpt, HA_RSTATUS);
660
661	/*
662	 * Sanity check
663	 */
664	if (buff[0] != 0x33) {
665		return;
666	}
667	bytes = DPT_HCP_LENGTH(buff);
668	param = DPT_HCP_FIRST(buff);
669
670	if (DPT_HCP_CODE(param) != 1) {
671		/*
672		 * DPT Log Page layout error
673		 */
674		device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n");
675		return;
676	}
677	if (!(param[4] & 0x4)) {
678		dpt->cache_type = DPT_NO_CACHE;
679		return;
680	}
681	while (DPT_HCP_CODE(param) != 6) {
682		param = DPT_HCP_NEXT(param);
683		if ((param < buff)
684		 || (param >= &buff[bytes])) {
685			return;
686		}
687	}
688
689	if (param[4] & 0x2) {
690		/*
691		 * Cache disabled
692		 */
693		dpt->cache_type = DPT_NO_CACHE;
694		return;
695	}
696
697	if (param[4] & 0x4) {
698		dpt->cache_type = DPT_CACHE_WRITETHROUGH;
699	}
700
701	/* XXX This isn't correct.  This log parameter only has two bytes.... */
702#if 0
703	dpt->cache_size = param[5]
704			| (param[6] << 8)
705			| (param[7] << 16)
706			| (param[8] << 24);
707#endif
708}
709
710static void
711dpt_poll(struct cam_sim *sim)
712{
713	dpt_intr_locked(cam_sim_softc(sim));
714}
715
716static void
717dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
718{
719	struct	 dpt_ccb *dccb;
720	union	 ccb *ccb;
721	struct	 dpt_softc *dpt;
722
723	dccb = (struct dpt_ccb *)arg;
724	ccb = dccb->ccb;
725	dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
726	if (!dumping)
727		mtx_assert(&dpt->lock, MA_OWNED);
728
729	if (error != 0) {
730		if (error != EFBIG)
731			device_printf(dpt->dev,
732			       "Unexepected error 0x%x returned from "
733			       "bus_dmamap_load\n", error);
734		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
735			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
736			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
737		}
738		dptfreeccb(dpt, dccb);
739		xpt_done(ccb);
740		return;
741	}
742
743	if (nseg != 0) {
744		dpt_sg_t *sg;
745		bus_dma_segment_t *end_seg;
746		bus_dmasync_op_t op;
747
748		end_seg = dm_segs + nseg;
749
750		/* Copy the segments into our SG list */
751		sg = dccb->sg_list;
752		while (dm_segs < end_seg) {
753			sg->seg_len = htonl(dm_segs->ds_len);
754			sg->seg_addr = htonl(dm_segs->ds_addr);
755			sg++;
756			dm_segs++;
757		}
758
759		if (nseg > 1) {
760			dccb->eata_ccb.scatter = 1;
761			dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
762			dccb->eata_ccb.cp_datalen =
763			    htonl(nseg * sizeof(dpt_sg_t));
764		} else {
765			dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
766			dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
767		}
768
769		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
770			op = BUS_DMASYNC_PREREAD;
771		else
772			op = BUS_DMASYNC_PREWRITE;
773
774		bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
775
776	} else {
777		dccb->eata_ccb.cp_dataDMA = 0;
778		dccb->eata_ccb.cp_datalen = 0;
779	}
780
781	/*
782	 * Last time we need to check if this CCB needs to
783	 * be aborted.
784	 */
785	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
786		if (nseg != 0)
787			bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
788		dptfreeccb(dpt, dccb);
789		xpt_done(ccb);
790		return;
791	}
792
793	dccb->state |= DCCB_ACTIVE;
794	ccb->ccb_h.status |= CAM_SIM_QUEUED;
795	LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
796	callout_reset(&dccb->timer, (ccb->ccb_h.timeout * hz) / 1000,
797	    dpttimeout, dccb);
798	if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
799				  dccb->eata_ccb.cp_busaddr,
800				  EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
801		ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
802		if (nseg != 0)
803			bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
804		dptfreeccb(dpt, dccb);
805		xpt_done(ccb);
806	}
807}
808
809static void
810dpt_action(struct cam_sim *sim, union ccb *ccb)
811{
812	struct	  dpt_softc *dpt;
813
814	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
815
816	dpt = (struct dpt_softc *)cam_sim_softc(sim);
817	mtx_assert(&dpt->lock, MA_OWNED);
818
819	if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
820		xpt_print_path(ccb->ccb_h.path);
821		printf("controller is shutdown. Aborting CCB.\n");
822		ccb->ccb_h.status = CAM_NO_HBA;
823		xpt_done(ccb);
824		return;
825	}
826
827	switch (ccb->ccb_h.func_code) {
828	/* Common cases first */
829	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
830	{
831		struct	ccb_scsiio *csio;
832		struct	ccb_hdr *ccbh;
833		struct	dpt_ccb *dccb;
834		struct	eata_ccb *eccb;
835
836		csio = &ccb->csio;
837		ccbh = &ccb->ccb_h;
838		/* Max CDB length is 12 bytes */
839		if (csio->cdb_len > 12) {
840			ccb->ccb_h.status = CAM_REQ_INVALID;
841			xpt_done(ccb);
842			return;
843		}
844		if ((dccb = dptgetccb(dpt)) == NULL) {
845			dpt->resource_shortage = 1;
846			xpt_freeze_simq(sim, /*count*/1);
847			ccb->ccb_h.status = CAM_REQUEUE_REQ;
848			xpt_done(ccb);
849			return;
850		}
851		eccb = &dccb->eata_ccb;
852
853		/* Link dccb and ccb so we can find one from the other */
854		dccb->ccb = ccb;
855		ccb->ccb_h.ccb_dccb_ptr = dccb;
856		ccb->ccb_h.ccb_dpt_ptr = dpt;
857
858		/*
859		 * Explicitly set all flags so that the compiler can
860		 * be smart about setting them.
861		 */
862		eccb->SCSI_Reset = 0;
863		eccb->HBA_Init = 0;
864		eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
865				   ? 0 : 1;
866		eccb->scatter = 0;
867		eccb->Quick = 0;
868		eccb->Interpret =
869		    ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
870		    ? 1 : 0;
871		eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
872		eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
873		eccb->reqlen = csio->sense_len;
874		eccb->cp_id = ccb->ccb_h.target_id;
875		eccb->cp_channel = cam_sim_bus(sim);
876		eccb->cp_LUN = ccb->ccb_h.target_lun;
877		eccb->cp_luntar = 0;
878		eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
879				? 0 : 1;
880		eccb->cp_identify = 1;
881
882		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
883		 && csio->tag_action != CAM_TAG_ACTION_NONE) {
884			eccb->cp_msg[0] = csio->tag_action;
885			eccb->cp_msg[1] = dccb->tag;
886		} else {
887			eccb->cp_msg[0] = 0;
888			eccb->cp_msg[1] = 0;
889		}
890		eccb->cp_msg[2] = 0;
891
892		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
893			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
894				bcopy(csio->cdb_io.cdb_ptr,
895				      eccb->cp_cdb, csio->cdb_len);
896			} else {
897				/* I guess I could map it in... */
898				ccb->ccb_h.status = CAM_REQ_INVALID;
899				dptfreeccb(dpt, dccb);
900				xpt_done(ccb);
901				return;
902			}
903		} else {
904			bcopy(csio->cdb_io.cdb_bytes,
905			      eccb->cp_cdb, csio->cdb_len);
906		}
907		/*
908		 * If we have any data to send with this command,
909		 * map it into bus space.
910		 */
911	        /* Only use S/G if there is a transfer */
912		if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
913			int error;
914
915			error = bus_dmamap_load_ccb(dpt->buffer_dmat,
916						    dccb->dmamap,
917						    ccb,
918						    dptexecuteccb,
919						    dccb, /*flags*/0);
920			if (error == EINPROGRESS) {
921				/*
922				 * So as to maintain ordering,
923				 * freeze the controller queue
924				 * until our mapping is
925				 * returned.
926				 */
927				xpt_freeze_simq(sim, 1);
928				dccb->state |= CAM_RELEASE_SIMQ;
929			}
930		} else {
931			/*
932			 * XXX JGibbs.
933			 * Does it want them both on or both off?
934			 * CAM_DIR_NONE is both on, so this code can
935			 * be removed if this is also what the DPT
936			 * exptects.
937			 */
938			eccb->DataOut = 0;
939			eccb->DataIn = 0;
940			dptexecuteccb(dccb, NULL, 0, 0);
941		}
942		break;
943	}
944	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
945	case XPT_ABORT:			/* Abort the specified CCB */
946		/* XXX Implement */
947		ccb->ccb_h.status = CAM_REQ_INVALID;
948		xpt_done(ccb);
949		break;
950	case XPT_SET_TRAN_SETTINGS:
951	{
952		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
953		xpt_done(ccb);
954		break;
955	}
956	case XPT_GET_TRAN_SETTINGS:
957	/* Get default/user set transfer settings for the target */
958	{
959		struct	ccb_trans_settings *cts = &ccb->cts;
960		struct ccb_trans_settings_scsi *scsi =
961		    &cts->proto_specific.scsi;
962		struct ccb_trans_settings_spi *spi =
963		    &cts->xport_specific.spi;
964
965		cts->protocol = PROTO_SCSI;
966		cts->protocol_version = SCSI_REV_2;
967		cts->transport = XPORT_SPI;
968		cts->transport_version = 2;
969
970		if (cts->type == CTS_TYPE_USER_SETTINGS) {
971			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
972			spi->bus_width = (dpt->max_id > 7)
973				       ? MSG_EXT_WDTR_BUS_8_BIT
974				       : MSG_EXT_WDTR_BUS_16_BIT;
975			spi->sync_period = 25; /* 10MHz */
976			if (spi->sync_period != 0)
977				spi->sync_offset = 15;
978			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
979
980			spi->valid = CTS_SPI_VALID_SYNC_RATE
981				| CTS_SPI_VALID_SYNC_OFFSET
982				| CTS_SPI_VALID_SYNC_RATE
983				| CTS_SPI_VALID_BUS_WIDTH
984				| CTS_SPI_VALID_DISC;
985			scsi->valid = CTS_SCSI_VALID_TQ;
986			ccb->ccb_h.status = CAM_REQ_CMP;
987		} else {
988			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
989		}
990		xpt_done(ccb);
991		break;
992	}
993	case XPT_CALC_GEOMETRY:
994	{
995		/*
996		 * XXX Use Adaptec translation until I find out how to
997		 *     get this information from the card.
998		 */
999		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1000		xpt_done(ccb);
1001		break;
1002	}
1003	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1004	{
1005		/* XXX Implement */
1006		ccb->ccb_h.status = CAM_REQ_CMP;
1007		xpt_done(ccb);
1008		break;
1009	}
1010	case XPT_TERM_IO:		/* Terminate the I/O process */
1011		/* XXX Implement */
1012		ccb->ccb_h.status = CAM_REQ_INVALID;
1013		xpt_done(ccb);
1014		break;
1015	case XPT_PATH_INQ:		/* Path routing inquiry */
1016	{
1017		struct ccb_pathinq *cpi = &ccb->cpi;
1018
1019		cpi->version_num = 1;
1020		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1021		if (dpt->max_id > 7)
1022			cpi->hba_inquiry |= PI_WIDE_16;
1023		cpi->target_sprt = 0;
1024		cpi->hba_misc = 0;
1025		cpi->hba_eng_cnt = 0;
1026		cpi->max_target = dpt->max_id;
1027		cpi->max_lun = dpt->max_lun;
1028		cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
1029		cpi->bus_id = cam_sim_bus(sim);
1030		cpi->base_transfer_speed = 3300;
1031		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032		strncpy(cpi->hba_vid, "DPT", HBA_IDLEN);
1033		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034		cpi->unit_number = cam_sim_unit(sim);
1035                cpi->transport = XPORT_SPI;
1036                cpi->transport_version = 2;
1037                cpi->protocol = PROTO_SCSI;
1038                cpi->protocol_version = SCSI_REV_2;
1039		cpi->ccb_h.status = CAM_REQ_CMP;
1040		xpt_done(ccb);
1041		break;
1042	}
1043	default:
1044		ccb->ccb_h.status = CAM_REQ_INVALID;
1045		xpt_done(ccb);
1046		break;
1047	}
1048}
1049
1050/*
1051 * This routine will try to send an EATA command to the DPT HBA.
1052 * It will, by default, try 20,000 times, waiting 50us between tries.
1053 * It returns 0 on success and 1 on failure.
1054 */
1055static int
1056dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
1057		      u_int32_t cmd_busaddr, u_int command, u_int retries,
1058		      u_int ifc, u_int code, u_int code2)
1059{
1060	u_int	loop;
1061
1062	if (!retries)
1063		retries = 20000;
1064
1065	/*
1066	 * I hate this polling nonsense. Wish there was a way to tell the DPT
1067	 * to go get commands at its own pace,  or to interrupt when ready.
1068	 * In the mean time we will measure how many itterations it really
1069	 * takes.
1070	 */
1071	for (loop = 0; loop < retries; loop++) {
1072		if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
1073			break;
1074		else
1075			DELAY(50);
1076	}
1077
1078	if (loop < retries) {
1079#ifdef DPT_MEASURE_PERFORMANCE
1080		if (loop > dpt->performance.max_eata_tries)
1081			dpt->performance.max_eata_tries = loop;
1082
1083		if (loop < dpt->performance.min_eata_tries)
1084			dpt->performance.min_eata_tries = loop;
1085#endif
1086	} else {
1087#ifdef DPT_MEASURE_PERFORMANCE
1088		++dpt->performance.command_too_busy;
1089#endif
1090		return (1);
1091	}
1092
1093	/* The controller is alive, advance the wedge timer */
1094#ifdef DPT_RESET_HBA
1095	dpt->last_contact = microtime_now;
1096#endif
1097
1098	if (cmd_block == NULL)
1099		cmd_busaddr = 0;
1100#if (BYTE_ORDER == BIG_ENDIAN)
1101	else {
1102		cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
1103			    | ((cmd_busaddr >> 16) & 0xFF)
1104			    | ((cmd_busaddr >> 8) & 0xFF)
1105			    | (cmd_busaddr & 0xFF);
1106	}
1107#endif
1108	/* And now the address */
1109	dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
1110
1111	if (command == EATA_CMD_IMMEDIATE) {
1112		if (cmd_block == NULL) {
1113			dpt_outb(dpt, HA_WCODE2, code2);
1114			dpt_outb(dpt, HA_WCODE, code);
1115		}
1116		dpt_outb(dpt, HA_WIFC, ifc);
1117	}
1118	dpt_outb(dpt, HA_WCOMMAND, command);
1119
1120	return (0);
1121}
1122
1123
1124/* ==================== Exported Function definitions =======================*/
1125void
1126dpt_alloc(device_t dev)
1127{
1128	dpt_softc_t	*dpt = device_get_softc(dev);
1129	int    i;
1130
1131	mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF);
1132	SLIST_INIT(&dpt->free_dccb_list);
1133	LIST_INIT(&dpt->pending_ccb_list);
1134	for (i = 0; i < MAX_CHANNELS; i++)
1135		dpt->resetlevel[i] = DPT_HA_OK;
1136
1137#ifdef DPT_MEASURE_PERFORMANCE
1138	dpt_reset_performance(dpt);
1139#endif /* DPT_MEASURE_PERFORMANCE */
1140	return;
1141}
1142
1143void
1144dpt_free(struct dpt_softc *dpt)
1145{
1146	switch (dpt->init_level) {
1147	default:
1148	case 5:
1149		bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1150	case 4:
1151		bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1152				dpt->dccb_dmamap);
1153		bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap);
1154	case 3:
1155		bus_dma_tag_destroy(dpt->dccb_dmat);
1156	case 2:
1157		bus_dma_tag_destroy(dpt->buffer_dmat);
1158	case 1:
1159	{
1160		struct sg_map_node *sg_map;
1161
1162		while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1163			SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1164			bus_dmamap_unload(dpt->sg_dmat,
1165					  sg_map->sg_dmamap);
1166			bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1167					sg_map->sg_dmamap);
1168			free(sg_map, M_DEVBUF);
1169		}
1170		bus_dma_tag_destroy(dpt->sg_dmat);
1171	}
1172	case 0:
1173		break;
1174	}
1175	mtx_destroy(&dpt->lock);
1176}
1177
1178int
1179dpt_alloc_resources (device_t dev)
1180{
1181	dpt_softc_t *	dpt;
1182	int		error;
1183
1184	dpt = device_get_softc(dev);
1185
1186	dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid,
1187					     RF_ACTIVE);
1188	if (dpt->io_res == NULL) {
1189		device_printf(dev, "No I/O space?!\n");
1190		error = ENOMEM;
1191		goto bad;
1192	}
1193
1194	dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid,
1195					      RF_ACTIVE);
1196	if (dpt->irq_res == NULL) {
1197		device_printf(dev, "No IRQ!\n");
1198		error = ENOMEM;
1199		goto bad;
1200	}
1201
1202	return (0);
1203bad:
1204	return(error);
1205}
1206
1207
1208void
1209dpt_release_resources (device_t dev)
1210{
1211	struct dpt_softc *	dpt;
1212
1213	dpt = device_get_softc(dev);
1214
1215	if (dpt->ih)
1216		bus_teardown_intr(dev, dpt->irq_res, dpt->ih);
1217        if (dpt->io_res)
1218                bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res);
1219        if (dpt->irq_res)
1220                bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res);
1221        if (dpt->drq_res)
1222                bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res);
1223
1224	return;
1225}
1226
1227static u_int8_t string_sizes[] =
1228{
1229	sizeof(((dpt_inq_t*)NULL)->vendor),
1230	sizeof(((dpt_inq_t*)NULL)->modelNum),
1231	sizeof(((dpt_inq_t*)NULL)->firmware),
1232	sizeof(((dpt_inq_t*)NULL)->protocol),
1233};
1234
1235int
1236dpt_init(struct dpt_softc *dpt)
1237{
1238	dpt_conf_t  conf;
1239	struct	    sg_map_node *sg_map;
1240	dpt_ccb_t  *dccb;
1241	u_int8_t   *strp;
1242	int	    index;
1243	int	    i;
1244	int	    retval;
1245
1246	dpt->init_level = 0;
1247	SLIST_INIT(&dpt->sg_maps);
1248	mtx_lock(&dpt->lock);
1249
1250#ifdef DPT_RESET_BOARD
1251	device_printf(dpt->dev, "resetting HBA\n");
1252	dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1253	DELAY(750000);
1254	/* XXX Shouldn't we poll a status register or something??? */
1255#endif
1256	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1257	if (bus_dma_tag_create(	/* parent	*/ dpt->parent_dmat,
1258				/* alignment	*/ 1,
1259				/* boundary	*/ 0,
1260				/* lowaddr	*/ BUS_SPACE_MAXADDR,
1261				/* highaddr	*/ BUS_SPACE_MAXADDR,
1262				/* filter	*/ NULL,
1263				/* filterarg	*/ NULL,
1264				/* maxsize	*/ PAGE_SIZE,
1265				/* nsegments	*/ 1,
1266				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1267				/* flags	*/ 0,
1268				/* lockfunc	*/ NULL,
1269				/* lockarg	*/ NULL,
1270				&dpt->sg_dmat) != 0) {
1271		goto error_exit;
1272        }
1273
1274	dpt->init_level++;
1275
1276	/*
1277	 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1278	 * memory.  To get the allocation size, we need to know how many
1279	 * ccbs the card supports.  This requires a ccb.  We solve this
1280	 * chicken and egg problem by allocating some re-usable S/G space
1281	 * up front, and treating it as our status packet, CCB, and target
1282	 * memory space for these commands.
1283	 */
1284	sg_map = dptallocsgmap(dpt);
1285	if (sg_map == NULL)
1286		goto error_exit;
1287
1288	dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1289	dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1290	bzero(dccb, sizeof(*dccb));
1291	dpt->sp_physaddr = sg_map->sg_physaddr;
1292	dccb->eata_ccb.cp_dataDMA =
1293	    htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1294	dccb->eata_ccb.cp_busaddr = ~0;
1295	dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1296	dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1297				       + offsetof(struct dpt_ccb, sense_data));
1298
1299	/* Okay.  Fetch our config */
1300	bzero(&dccb[1], sizeof(conf)); /* data area */
1301	retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1302			      sizeof(conf), 0xc1, 7, 1);
1303
1304	if (retval != 0) {
1305		device_printf(dpt->dev, "Failed to get board configuration\n");
1306		goto error_exit;
1307	}
1308	bcopy(&dccb[1], &conf, sizeof(conf));
1309
1310	bzero(&dccb[1], sizeof(dpt->board_data));
1311	retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1312			      sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1313	if (retval != 0) {
1314		device_printf(dpt->dev, "Failed to get inquiry information\n");
1315		goto error_exit;
1316	}
1317	bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1318
1319	dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1320			 (u_int8_t *)&dccb[1]);
1321
1322	switch (ntohl(conf.splen)) {
1323	case DPT_EATA_REVA:
1324		dpt->EATA_revision = 'a';
1325		break;
1326	case DPT_EATA_REVB:
1327		dpt->EATA_revision = 'b';
1328		break;
1329	case DPT_EATA_REVC:
1330		dpt->EATA_revision = 'c';
1331		break;
1332	case DPT_EATA_REVZ:
1333		dpt->EATA_revision = 'z';
1334		break;
1335	default:
1336		dpt->EATA_revision = '?';
1337	}
1338
1339	dpt->max_id	 = conf.MAX_ID;
1340	dpt->max_lun	 = conf.MAX_LUN;
1341	dpt->irq	 = conf.IRQ;
1342	dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1343	dpt->channels	 = conf.MAX_CHAN + 1;
1344	dpt->state	|= DPT_HA_OK;
1345	if (conf.SECOND)
1346		dpt->primary = FALSE;
1347	else
1348		dpt->primary = TRUE;
1349
1350	dpt->more_support = conf.MORE_support;
1351
1352	if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1353		dpt->immediate_support = 1;
1354	else
1355		dpt->immediate_support = 0;
1356
1357	dpt->broken_INQUIRY = FALSE;
1358
1359	dpt->cplen = ntohl(conf.cplen);
1360	dpt->cppadlen = ntohs(conf.cppadlen);
1361	dpt->max_dccbs = ntohs(conf.queuesiz);
1362
1363	if (dpt->max_dccbs > 256) {
1364		device_printf(dpt->dev, "Max CCBs reduced from %d to "
1365		       "256 due to tag algorithm\n", dpt->max_dccbs);
1366		dpt->max_dccbs = 256;
1367	}
1368
1369	dpt->hostid[0] = conf.scsi_id0;
1370	dpt->hostid[1] = conf.scsi_id1;
1371	dpt->hostid[2] = conf.scsi_id2;
1372
1373	if (conf.SG_64K)
1374		dpt->sgsize = 8192;
1375	else
1376		dpt->sgsize = ntohs(conf.SGsiz);
1377
1378	/* We can only get 64k buffers, so don't bother to waste space. */
1379	if (dpt->sgsize < 17 || dpt->sgsize > 32)
1380		dpt->sgsize = 32;
1381
1382	if (dpt->sgsize > dpt_max_segs)
1383		dpt->sgsize = dpt_max_segs;
1384
1385	/* DMA tag for mapping buffers into device visible space. */
1386	if (bus_dma_tag_create(	/* parent	*/ dpt->parent_dmat,
1387				/* alignment	*/ 1,
1388				/* boundary	*/ 0,
1389				/* lowaddr	*/ BUS_SPACE_MAXADDR,
1390				/* highaddr	*/ BUS_SPACE_MAXADDR,
1391				/* filter	*/ NULL,
1392				/* filterarg	*/ NULL,
1393				/* maxsize	*/ MAXBSIZE,
1394				/* nsegments	*/ dpt->sgsize,
1395				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1396				/* flags	*/ BUS_DMA_ALLOCNOW,
1397				/* lockfunc	*/ busdma_lock_mutex,
1398				/* lockarg	*/ &dpt->lock,
1399				&dpt->buffer_dmat) != 0) {
1400		device_printf(dpt->dev,
1401		    "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1402		goto error_exit;
1403	}
1404
1405	dpt->init_level++;
1406
1407	/* DMA tag for our ccb structures and interrupt status packet */
1408	if (bus_dma_tag_create(	/* parent	*/ dpt->parent_dmat,
1409				/* alignment	*/ 1,
1410				/* boundary	*/ 0,
1411				/* lowaddr	*/ BUS_SPACE_MAXADDR,
1412				/* highaddr	*/ BUS_SPACE_MAXADDR,
1413				/* filter	*/ NULL,
1414				/* filterarg	*/ NULL,
1415				/* maxsize	*/ (dpt->max_dccbs *
1416						    sizeof(struct dpt_ccb)) +
1417						    sizeof(dpt_sp_t),
1418				/* nsegments	*/ 1,
1419				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1420				/* flags	*/ 0,
1421				/* lockfunc	*/ NULL,
1422				/* lockarg	*/ NULL,
1423				&dpt->dccb_dmat) != 0) {
1424		device_printf(dpt->dev,
1425		    "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1426		goto error_exit;
1427        }
1428
1429	dpt->init_level++;
1430
1431	/* Allocation for our ccbs and interrupt status packet */
1432	if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1433			     BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1434		device_printf(dpt->dev,
1435		    "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1436		goto error_exit;
1437	}
1438
1439	dpt->init_level++;
1440
1441	/* And permanently map them */
1442	bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1443       			dpt->dpt_dccbs,
1444			(dpt->max_dccbs * sizeof(struct dpt_ccb))
1445			+ sizeof(dpt_sp_t),
1446			dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1447
1448	/* Clear them out. */
1449	bzero(dpt->dpt_dccbs,
1450	      (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1451
1452	dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1453
1454	dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1455	dpt->sp_physaddr = dpt->dpt_ccb_busbase
1456			 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1457	dpt->init_level++;
1458
1459	/* Allocate our first batch of ccbs */
1460	if (dptallocccbs(dpt) == 0) {
1461		device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n");
1462		mtx_unlock(&dpt->lock);
1463		return (2);
1464	}
1465
1466	/* Prepare for Target Mode */
1467	dpt->target_mode_enabled = 1;
1468
1469	/* Nuke excess spaces from inquiry information */
1470	strp = dpt->board_data.vendor;
1471	for (i = 0; i < sizeof(string_sizes); i++) {
1472		index = string_sizes[i] - 1;
1473		while (index && (strp[index] == ' '))
1474			strp[index--] = '\0';
1475		strp += string_sizes[i];
1476	}
1477
1478	device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ",
1479	       dpt->board_data.vendor,
1480	       dpt->board_data.modelNum, dpt->board_data.firmware);
1481
1482	printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1483
1484	if (dpt->cache_type != DPT_NO_CACHE
1485	 && dpt->cache_size != 0) {
1486		printf("%s Cache, ",
1487		       dpt->cache_type == DPT_CACHE_WRITETHROUGH
1488		     ? "Write-Through" : "Write-Back");
1489	}
1490
1491	printf("%d CCBs\n", dpt->max_dccbs);
1492	mtx_unlock(&dpt->lock);
1493	return (0);
1494
1495error_exit:
1496	mtx_unlock(&dpt->lock);
1497	return (1);
1498}
1499
1500int
1501dpt_attach(dpt_softc_t *dpt)
1502{
1503	struct cam_devq *devq;
1504	int i;
1505
1506	/*
1507	 * Create the device queue for our SIM.
1508	 */
1509	devq = cam_simq_alloc(dpt->max_dccbs);
1510	if (devq == NULL)
1511		return (0);
1512
1513	mtx_lock(&dpt->lock);
1514	for (i = 0; i < dpt->channels; i++) {
1515		/*
1516		 * Construct our SIM entry
1517		 */
1518		dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1519		    dpt, device_get_unit(dpt->dev), &dpt->lock,
1520					     /*untagged*/2,
1521					     /*tagged*/dpt->max_dccbs, devq);
1522		if (dpt->sims[i] == NULL) {
1523			if (i == 0)
1524				cam_simq_free(devq);
1525			else
1526				printf(	"%s(): Unable to attach bus %d "
1527					"due to resource shortage\n",
1528					__func__, i);
1529			break;
1530		}
1531
1532		if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){
1533			cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1534			dpt->sims[i] = NULL;
1535			break;
1536		}
1537
1538		if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1539				    cam_sim_path(dpt->sims[i]),
1540				    CAM_TARGET_WILDCARD,
1541				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1542			xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1543			cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1544			dpt->sims[i] = NULL;
1545			break;
1546		}
1547
1548	}
1549	mtx_unlock(&dpt->lock);
1550	if (i > 0)
1551		EVENTHANDLER_REGISTER(shutdown_final, dptshutdown,
1552				      dpt, SHUTDOWN_PRI_DEFAULT);
1553	return (i);
1554}
1555
1556int
1557dpt_detach (device_t dev)
1558{
1559	struct dpt_softc *	dpt;
1560	int			i;
1561
1562	dpt = device_get_softc(dev);
1563
1564	mtx_lock(&dpt->lock);
1565	for (i = 0; i < dpt->channels; i++) {
1566#if 0
1567	        xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL);
1568#endif
1569        	xpt_free_path(dpt->paths[i]);
1570        	xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1571        	cam_sim_free(dpt->sims[i], /*free_devq*/TRUE);
1572	}
1573	mtx_unlock(&dpt->lock);
1574
1575	dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT);
1576
1577	dpt_release_resources(dev);
1578
1579	dpt_free(dpt);
1580
1581	return (0);
1582}
1583
1584/*
1585 * This is the interrupt handler for the DPT driver.
1586 */
1587void
1588dpt_intr(void *arg)
1589{
1590	dpt_softc_t    *dpt;
1591
1592	dpt = arg;
1593	mtx_lock(&dpt->lock);
1594	dpt_intr_locked(dpt);
1595	mtx_unlock(&dpt->lock);
1596}
1597
1598void
1599dpt_intr_locked(dpt_softc_t *dpt)
1600{
1601	dpt_ccb_t      *dccb;
1602	union ccb      *ccb;
1603	u_int		status;
1604	u_int		aux_status;
1605	u_int		hba_stat;
1606	u_int		scsi_stat;
1607	u_int32_t	residue_len;	/* Number of bytes not transferred */
1608
1609	/* First order of business is to check if this interrupt is for us */
1610	while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1611
1612		/*
1613		 * What we want to do now, is to capture the status, all of it,
1614		 * move it where it belongs, wake up whoever sleeps waiting to
1615		 * process this result, and get out of here.
1616		 */
1617		if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1618		 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1619			device_printf(dpt->dev,
1620			    "Encountered bogus status packet\n");
1621			status = dpt_inb(dpt, HA_RSTATUS);
1622			return;
1623		}
1624
1625		dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1626
1627		dpt->sp->ccb_busaddr = ~0;
1628
1629		/* Ignore status packets with EOC not set */
1630		if (dpt->sp->EOC == 0) {
1631			device_printf(dpt->dev,
1632			       "ERROR: Request %d received with "
1633			       "clear EOC.\n     Marking as LOST.\n",
1634			       dccb->transaction_id);
1635
1636#ifdef DPT_HANDLE_TIMEOUTS
1637			dccb->state |= DPT_CCB_STATE_MARKED_LOST;
1638#endif
1639			/* This CLEARS the interrupt! */
1640			status = dpt_inb(dpt, HA_RSTATUS);
1641			continue;
1642		}
1643		dpt->sp->EOC = 0;
1644
1645		/*
1646		 * Double buffer the status information so the hardware can
1647		 * work on updating the status packet while we decifer the
1648		 * one we were just interrupted for.
1649		 * According to Mark Salyzyn, we only need few pieces of it.
1650		 */
1651		hba_stat = dpt->sp->hba_stat;
1652		scsi_stat = dpt->sp->scsi_stat;
1653		residue_len = dpt->sp->residue_len;
1654
1655		/* Clear interrupts, check for error */
1656		if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1657			/*
1658			 * Error Condition. Check for magic cookie. Exit
1659			 * this test on earliest sign of non-reset condition
1660			 */
1661
1662			/* Check that this is not a board reset interrupt */
1663			if (dpt_just_reset(dpt)) {
1664				device_printf(dpt->dev, "HBA rebooted.\n"
1665				       "      All transactions should be "
1666				       "resubmitted\n");
1667
1668				device_printf(dpt->dev,
1669				       ">>---->>  This is incomplete, "
1670				       "fix me....  <<----<<");
1671				panic("DPT Rebooted");
1672
1673			}
1674		}
1675		/* Process CCB */
1676		ccb = dccb->ccb;
1677		callout_stop(&dccb->timer);
1678		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1679			bus_dmasync_op_t op;
1680
1681			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1682				op = BUS_DMASYNC_POSTREAD;
1683			else
1684				op = BUS_DMASYNC_POSTWRITE;
1685			bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1686			bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1687		}
1688
1689		/* Common Case inline... */
1690		if (hba_stat == HA_NO_ERROR) {
1691			ccb->csio.scsi_status = scsi_stat;
1692			ccb->ccb_h.status = 0;
1693			switch (scsi_stat) {
1694			case SCSI_STATUS_OK:
1695				ccb->ccb_h.status |= CAM_REQ_CMP;
1696				break;
1697			case SCSI_STATUS_CHECK_COND:
1698			case SCSI_STATUS_CMD_TERMINATED:
1699				bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1700				      ccb->csio.sense_len);
1701				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1702				/* FALLTHROUGH */
1703			default:
1704				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1705				/* XXX Freeze DevQ */
1706				break;
1707			}
1708			ccb->csio.resid = residue_len;
1709			dptfreeccb(dpt, dccb);
1710			xpt_done(ccb);
1711		} else {
1712			dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1713					residue_len);
1714		}
1715	}
1716}
1717
1718static void
1719dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1720		u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1721{
1722	ccb->csio.resid = resid;
1723	switch (hba_stat) {
1724	case HA_ERR_SEL_TO:
1725		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1726		break;
1727	case HA_ERR_CMD_TO:
1728		ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1729		break;
1730	case HA_SCSIBUS_RESET:
1731	case HA_HBA_POWER_UP:	/* Similar effect to a bus reset??? */
1732		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1733		break;
1734	case HA_CP_ABORTED:
1735	case HA_CP_RESET:	/* XXX ??? */
1736	case HA_CP_ABORT_NA:	/* XXX ??? */
1737	case HA_CP_RESET_NA:	/* XXX ??? */
1738		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1739			ccb->ccb_h.status = CAM_REQ_ABORTED;
1740		break;
1741	case HA_PCI_PARITY:
1742	case HA_PCI_MABORT:
1743	case HA_PCI_TABORT:
1744	case HA_PCI_STABORT:
1745	case HA_BUS_PARITY:
1746	case HA_PARITY_ERR:
1747	case HA_ECC_ERR:
1748		ccb->ccb_h.status = CAM_UNCOR_PARITY;
1749		break;
1750	case HA_UNX_MSGRJCT:
1751		ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1752		break;
1753	case HA_UNX_BUSPHASE:
1754		ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1755		break;
1756	case HA_UNX_BUS_FREE:
1757		ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1758		break;
1759	case HA_SCSI_HUNG:
1760	case HA_RESET_STUCK:
1761		/*
1762		 * Dead???  Can the controller get unstuck
1763		 * from these conditions
1764		 */
1765		ccb->ccb_h.status = CAM_NO_HBA;
1766		break;
1767	case HA_RSENSE_FAIL:
1768		ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1769		break;
1770	default:
1771		device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat);
1772		printf("Please mail this message to shimon@simon-shapiro.org\n");
1773		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1774		break;
1775	}
1776	dptfreeccb(dpt, dccb);
1777	xpt_done(ccb);
1778}
1779
1780static void
1781dpttimeout(void *arg)
1782{
1783	struct dpt_ccb	 *dccb;
1784	union  ccb	 *ccb;
1785	struct dpt_softc *dpt;
1786
1787	dccb = (struct dpt_ccb *)arg;
1788	ccb = dccb->ccb;
1789	dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1790	mtx_assert(&dpt->lock, MA_OWNED);
1791	xpt_print_path(ccb->ccb_h.path);
1792	printf("CCB %p - timed out\n", (void *)dccb);
1793
1794	/*
1795	 * Try to clear any pending jobs.  FreeBSD will lose interrupts,
1796	 * leaving the controller suspended, and commands timed-out.
1797	 * By calling the interrupt handler, any command thus stuck will be
1798	 * completed.
1799	 */
1800	dpt_intr_locked(dpt);
1801
1802	if ((dccb->state & DCCB_ACTIVE) == 0) {
1803		xpt_print_path(ccb->ccb_h.path);
1804		printf("CCB %p - timed out CCB already completed\n",
1805		       (void *)dccb);
1806		return;
1807	}
1808
1809	/* Abort this particular command.  Leave all others running */
1810	dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1811			   /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1812	ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1813}
1814
1815/*
1816 * Shutdown the controller and ensure that the cache is completely flushed.
1817 * Called from the shutdown_final event after all disk access has completed.
1818 */
1819static void
1820dptshutdown(void *arg, int howto)
1821{
1822	dpt_softc_t *dpt;
1823
1824	dpt = (dpt_softc_t *)arg;
1825
1826	device_printf(dpt->dev,
1827	    "Shutting down (mode %x) HBA.	Please wait...\n", howto);
1828
1829	/*
1830	 * What we do for a shutdown, is give the DPT early power loss warning
1831	 */
1832	mtx_lock(&dpt->lock);
1833	dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1834	mtx_unlock(&dpt->lock);
1835	DELAY(1000 * 1000 * 5);
1836	device_printf(dpt->dev, "Controller was warned of shutdown and is now "
1837	       "disabled\n");
1838}
1839
1840/*============================================================================*/
1841
1842#if 0
1843#ifdef DPT_RESET_HBA
1844
1845/*
1846**	Function name : dpt_reset_hba
1847**
1848**	Description : Reset the HBA and properly discard all pending work
1849**	Input :       Softc
1850**	Output :      Nothing
1851*/
1852static void
1853dpt_reset_hba(dpt_softc_t *dpt)
1854{
1855	eata_ccb_t       *ccb;
1856	dpt_ccb_t         dccb, *dccbp;
1857	int               result;
1858	struct scsi_xfer *xs;
1859
1860	mtx_assert(&dpt->lock, MA_OWNED);
1861
1862	/* Prepare a control block.  The SCSI command part is immaterial */
1863	dccb.xs = NULL;
1864	dccb.flags = 0;
1865	dccb.state = DPT_CCB_STATE_NEW;
1866	dccb.std_callback = NULL;
1867	dccb.wrbuff_callback = NULL;
1868
1869	ccb = &dccb.eata_ccb;
1870	ccb->CP_OpCode = EATA_CMD_RESET;
1871	ccb->SCSI_Reset = 0;
1872	ccb->HBA_Init = 1;
1873	ccb->Auto_Req_Sen = 1;
1874	ccb->cp_id = 0; /* Should be ignored */
1875	ccb->DataIn = 1;
1876	ccb->DataOut = 0;
1877	ccb->Interpret = 1;
1878	ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1879	ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1880	ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1881	ccb->cp_viraddr = (u_int32_t) & ccb;
1882
1883	ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1884	ccb->cp_scsi_cmd = 0;  /* Should be ignored */
1885
1886	/* Lock up the submitted queue.  We are very persistant here */
1887	while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1888		DELAY(100);
1889	}
1890
1891	dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1892
1893	/* Send the RESET message */
1894	if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1895					    EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1896		device_printf(dpt->dev, "Failed to send the RESET message.\n"
1897		       "     Trying cold boot (ouch!)\n");
1898
1899
1900		if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1901						    EATA_COLD_BOOT, 0, 0,
1902						    0, 0)) != 0) {
1903			panic("%s:  Faild to cold boot the HBA\n",
1904			    device_get_nameunit(dpt->dev));
1905		}
1906#ifdef DPT_MEASURE_PERFORMANCE
1907		dpt->performance.cold_boots++;
1908#endif /* DPT_MEASURE_PERFORMANCE */
1909	}
1910
1911#ifdef DPT_MEASURE_PERFORMANCE
1912	dpt->performance.warm_starts++;
1913#endif /* DPT_MEASURE_PERFORMANCE */
1914
1915	device_printf(dpt->dev,
1916	    "Aborting pending requests.  O/S should re-submit\n");
1917
1918	while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1919		struct scsi_xfer *xs = dccbp->xs;
1920
1921		/* Not all transactions have xs structs */
1922		if (xs != NULL) {
1923			/* Tell the kernel proper this did not complete well */
1924			xs->error |= XS_SELTIMEOUT;
1925			xs->flags |= SCSI_ITSDONE;
1926			scsi_done(xs);
1927		}
1928
1929		dpt_Qremove_submitted(dpt, dccbp);
1930
1931		/* Remember, Callbacks are NOT in the standard queue */
1932		if (dccbp->std_callback != NULL) {
1933			(dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1934					       dccbp);
1935		} else {
1936			dpt_Qpush_free(dpt, dccbp);
1937		}
1938	}
1939
1940	device_printf(dpt->dev, "reset done aborting all pending commands\n");
1941	dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1942}
1943
1944#endif /* DPT_RESET_HBA */
1945
1946/*
1947 * Build a Command Block for target mode READ/WRITE BUFFER,
1948 * with the ``sync'' bit ON.
1949 *
1950 * Although the length and offset are 24 bit fields in the command, they cannot
1951 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1952 * If they are sensless, we round them to zero offset, maximum length and
1953 * complain.
1954 */
1955
1956static void
1957dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1958	       dpt_ccb_t * ccb, int mode, u_int8_t command,
1959	       u_int16_t length, u_int16_t offset)
1960{
1961	eata_ccb_t     *cp;
1962
1963	mtx_assert(&dpt->lock, MA_OWNED);
1964	if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1965		device_printf(dpt->dev,
1966		    "Length of %d, and offset of %d are wrong\n",
1967		    length, offset);
1968		length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1969		offset = 0;
1970	}
1971	ccb->xs = NULL;
1972	ccb->flags = 0;
1973	ccb->state = DPT_CCB_STATE_NEW;
1974	ccb->std_callback = (ccb_callback) dpt_target_done;
1975	ccb->wrbuff_callback = NULL;
1976
1977	cp = &ccb->eata_ccb;
1978	cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1979	cp->SCSI_Reset = 0;
1980	cp->HBA_Init = 0;
1981	cp->Auto_Req_Sen = 1;
1982	cp->cp_id = target;
1983	cp->DataIn = 1;
1984	cp->DataOut = 0;
1985	cp->Interpret = 0;
1986	cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1987	cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1988	cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1989	cp->cp_viraddr = (u_int32_t) & ccb;
1990
1991	cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1992
1993	cp->cp_scsi_cmd = command;
1994	cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1995	cp->cp_lun = lun;	/* Order is important here! */
1996	cp->cp_cdb[2] = 0x00;	/* Buffer Id, only 1 :-( */
1997	cp->cp_cdb[3] = (length >> 16) & 0xFF;	/* Buffer offset MSB */
1998	cp->cp_cdb[4] = (length >> 8) & 0xFF;
1999	cp->cp_cdb[5] = length & 0xFF;
2000	cp->cp_cdb[6] = (length >> 16) & 0xFF;	/* Length MSB */
2001	cp->cp_cdb[7] = (length >> 8) & 0xFF;
2002	cp->cp_cdb[8] = length & 0xFF;	/* Length LSB */
2003	cp->cp_cdb[9] = 0;	/* No sync, no match bits */
2004
2005	/*
2006	 * This could be optimized to live in dpt_register_buffer.
2007	 * We keep it here, just in case the kernel decides to reallocate pages
2008	 */
2009	if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
2010			       dpt->rw_buffer[bus][target][lun])) {
2011		device_printf(dpt->dev, "Failed to setup Scatter/Gather for "
2012		       "Target-Mode buffer\n");
2013	}
2014}
2015
2016/* Setup a target mode READ command */
2017
2018static void
2019dpt_set_target(int redo, dpt_softc_t * dpt,
2020	       u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
2021	       u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
2022{
2023
2024	mtx_assert(&dpt->lock, MA_OWNED);
2025	if (dpt->target_mode_enabled) {
2026		if (!redo)
2027			dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
2028				       SCSI_TM_READ_BUFFER, length, offset);
2029
2030		ccb->transaction_id = ++dpt->commands_processed;
2031
2032#ifdef DPT_MEASURE_PERFORMANCE
2033		dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2034		ccb->command_started = microtime_now;
2035#endif
2036		dpt_Qadd_waiting(dpt, ccb);
2037		dpt_sched_queue(dpt);
2038	} else {
2039		device_printf(dpt->dev,
2040		    "Target Mode Request, but Target Mode is OFF\n");
2041	}
2042}
2043
2044/*
2045 * Schedule a buffer to be sent to another target.
2046 * The work will be scheduled and the callback provided will be called when
2047 * the work is actually done.
2048 *
2049 * Please NOTE:  ``Anyone'' can send a buffer, but only registered clients
2050 * get notified of receipt of buffers.
2051 */
2052
2053int
2054dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2055		u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
2056		buff_wr_done callback)
2057{
2058	dpt_softc_t    *dpt;
2059	dpt_ccb_t      *ccb = NULL;
2060
2061	/* This is an external call.  Be a bit paranoid */
2062	dpt = devclass_get_device(dpt_devclass, unit);
2063	if (dpt == NULL)
2064		return (INVALID_UNIT);
2065
2066	mtx_lock(&dpt->lock);
2067	if (dpt->target_mode_enabled) {
2068		if ((channel >= dpt->channels) || (target > dpt->max_id) ||
2069		    (lun > dpt->max_lun)) {
2070			mtx_unlock(&dpt->lock);
2071			return (INVALID_SENDER);
2072		}
2073		if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
2074		    (dpt->buffer_receiver[channel][target][lun] == NULL)) {
2075			mtx_unlock(&dpt->lock);
2076			return (NOT_REGISTERED);
2077		}
2078
2079		/* Process the free list */
2080		if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2081			device_printf(dpt->dev,
2082			    "ERROR: Cannot allocate any more free CCB's.\n"
2083			    "             Please try later\n");
2084			mtx_unlock(&dpt->lock);
2085			return (NO_RESOURCES);
2086		}
2087		/* Now grab the newest CCB */
2088		if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2089			mtx_unlock(&dpt->lock);
2090			panic("%s: Got a NULL CCB from pop_free()\n",
2091			    device_get_nameunit(dpt->dev));
2092		}
2093
2094		bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
2095		dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
2096					   SCSI_TM_WRITE_BUFFER,
2097					   length, offset);
2098		ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
2099
2100		ccb->transaction_id = ++dpt->commands_processed;
2101
2102#ifdef DPT_MEASURE_PERFORMANCE
2103		dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2104		ccb->command_started = microtime_now;
2105#endif
2106		dpt_Qadd_waiting(dpt, ccb);
2107		dpt_sched_queue(dpt);
2108
2109		mtx_unlock(&dpt->lock);
2110		return (0);
2111	}
2112	mtx_unlock(&dpt->lock);
2113	return (DRIVER_DOWN);
2114}
2115
2116static void
2117dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2118{
2119	eata_ccb_t     *cp;
2120
2121	cp = &ccb->eata_ccb;
2122
2123	/*
2124	 * Remove the CCB from the waiting queue.
2125	 *  We do NOT put it back on the free, etc., queues as it is a special
2126	 * ccb, owned by the dpt_softc of this unit.
2127	 */
2128	dpt_Qremove_completed(dpt, ccb);
2129
2130#define br_channel           (ccb->eata_ccb.cp_channel)
2131#define br_target            (ccb->eata_ccb.cp_id)
2132#define br_lun               (ccb->eata_ccb.cp_LUN)
2133#define br_index	     [br_channel][br_target][br_lun]
2134#define read_buffer_callback (dpt->buffer_receiver br_index )
2135#define	read_buffer	     (dpt->rw_buffer[br_channel][br_target][br_lun])
2136#define cb(offset)           (ccb->eata_ccb.cp_cdb[offset])
2137#define br_offset            ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2138#define br_length            ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2139
2140	/* Different reasons for being here, you know... */
2141	switch (ccb->eata_ccb.cp_scsi_cmd) {
2142	case SCSI_TM_READ_BUFFER:
2143		if (read_buffer_callback != NULL) {
2144			/* This is a buffer generated by a kernel process */
2145			read_buffer_callback(device_get_unit(dpt->dev),
2146					     br_channel, br_target, br_lun,
2147					     read_buffer,
2148					     br_offset, br_length);
2149		} else {
2150			/*
2151			 * This is a buffer waited for by a user (sleeping)
2152			 * command
2153			 */
2154			wakeup(ccb);
2155		}
2156
2157		/* We ALWAYS re-issue the same command; args are don't-care  */
2158		dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2159		break;
2160
2161	case SCSI_TM_WRITE_BUFFER:
2162		(ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel,
2163					br_target, br_offset, br_length,
2164					br_lun, ccb->status_packet.hba_stat);
2165		break;
2166	default:
2167		device_printf(dpt->dev,
2168		    "%s is an unsupported command for target mode\n",
2169		    scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2170	}
2171	dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2172	dpt_Qpush_free(dpt, ccb);
2173}
2174
2175
2176/*
2177 * Use this function to register a client for a buffer read target operation.
2178 * The function you register will be called every time a buffer is received
2179 * by the target mode code.
2180 */
2181dpt_rb_t
2182dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2183		    u_int8_t mode, u_int16_t length, u_int16_t offset,
2184		    dpt_rec_buff callback, dpt_rb_op_t op)
2185{
2186	dpt_softc_t    *dpt;
2187	dpt_ccb_t      *ccb = NULL;
2188	int             ospl;
2189
2190	dpt = devclass_get_device(dpt_devclass, unit);
2191	if (dpt == NULL)
2192		return (INVALID_UNIT);
2193	mtx_lock(&dpt->lock);
2194
2195	if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
2196		mtx_unlock(&dpt->lock);
2197		return (DRIVER_DOWN);
2198	}
2199
2200	if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2201	    (lun > (dpt->max_lun - 1))) {
2202		mtx_unlock(&dpt->lock);
2203		return (INVALID_SENDER);
2204	}
2205
2206	if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2207		if (op == REGISTER_BUFFER) {
2208			/* Assign the requested callback */
2209			dpt->buffer_receiver[channel][target][lun] = callback;
2210			/* Get a CCB */
2211
2212			/* Process the free list */
2213			if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2214				device_printf(dpt->dev,
2215				    "ERROR: Cannot allocate any more free CCB's.\n"
2216				    "             Please try later\n");
2217				mtx_unlock(&dpt->lock);
2218				return (NO_RESOURCES);
2219			}
2220			/* Now grab the newest CCB */
2221			if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2222				mtx_unlock(&dpt->lock);
2223				panic("%s: Got a NULL CCB from pop_free()\n",
2224				    device_get_nameunit(dpt->dev));
2225			}
2226
2227			/* Clean up the leftover of the previous tenant */
2228			ccb->status = DPT_CCB_STATE_NEW;
2229			dpt->target_ccb[channel][target][lun] = ccb;
2230
2231			dpt->rw_buffer[channel][target][lun] =
2232				malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT);
2233			if (dpt->rw_buffer[channel][target][lun] == NULL) {
2234				device_printf(dpt->dev, "Failed to allocate "
2235				       "Target-Mode buffer\n");
2236				dpt_Qpush_free(dpt, ccb);
2237				mtx_unlock(&dpt->lock);
2238				return (NO_RESOURCES);
2239			}
2240			dpt_set_target(0, dpt, channel, target, lun, mode,
2241				       length, offset, ccb);
2242			mtx_unlock(&dpt->lock);
2243			return (SUCCESSFULLY_REGISTERED);
2244		} else {
2245			mtx_unlock(&dpt->lock);
2246			return (NOT_REGISTERED);
2247		}
2248	} else {
2249		if (op == REGISTER_BUFFER) {
2250			if (dpt->buffer_receiver[channel][target][lun] == callback) {
2251				mtx_unlock(&dpt->lock);
2252				return (ALREADY_REGISTERED);
2253			} else {
2254				mtx_unlock(&dpt->lock);
2255				return (REGISTERED_TO_ANOTHER);
2256			}
2257		} else {
2258			if (dpt->buffer_receiver[channel][target][lun] == callback) {
2259				dpt->buffer_receiver[channel][target][lun] = NULL;
2260				dpt_Qpush_free(dpt, ccb);
2261				free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2262				mtx_unlock(&dpt->lock);
2263				return (SUCCESSFULLY_REGISTERED);
2264			} else {
2265				mtx_unlock(&dpt->lock);
2266				return (INVALID_CALLBACK);
2267			}
2268		}
2269
2270	}
2271	mtx_unlock(&dpt->lock);
2272}
2273
2274/* Return the state of the blinking DPT LED's */
2275u_int8_t
2276dpt_blinking_led(dpt_softc_t * dpt)
2277{
2278	int             ndx;
2279	u_int32_t       state;
2280	u_int32_t       previous;
2281	u_int8_t        result;
2282
2283	mtx_assert(&dpt->lock, MA_OWNED);
2284	result = 0;
2285
2286	for (ndx = 0, state = 0, previous = 0;
2287	     (ndx < 10) && (state != previous);
2288	     ndx++) {
2289		previous = state;
2290		state = dpt_inl(dpt, 1);
2291	}
2292
2293	if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2294		result = dpt_inb(dpt, 5);
2295
2296	return (result);
2297}
2298
2299/*
2300 * Execute a command which did not come from the kernel's SCSI layer.
2301 * The only way to map user commands to bus and target is to comply with the
2302 * standard DPT wire-down scheme:
2303 */
2304int
2305dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2306	     caddr_t cmdarg, int minor_no)
2307{
2308	dpt_ccb_t *ccb;
2309	void	  *data;
2310	int	   channel, target, lun;
2311	int	   huh;
2312	int	   result;
2313	int	   submitted;
2314
2315	mtx_assert(&dpt->lock, MA_OWNED);
2316	data = NULL;
2317	channel = minor2hba(minor_no);
2318	target = minor2target(minor_no);
2319	lun = minor2lun(minor_no);
2320
2321	if ((channel > (dpt->channels - 1))
2322	 || (target > dpt->max_id)
2323	 || (lun > dpt->max_lun))
2324		return (ENXIO);
2325
2326	if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2327		/* This one is for the controller itself */
2328		if ((user_cmd->eataID[0] != 'E')
2329		 || (user_cmd->eataID[1] != 'A')
2330		 || (user_cmd->eataID[2] != 'T')
2331		 || (user_cmd->eataID[3] != 'A')) {
2332			return (ENXIO);
2333		}
2334	}
2335	/* Get a DPT CCB, so we can prepare a command */
2336
2337	/* Process the free list */
2338	if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2339		device_printf(dpt->dev,
2340		    "ERROR: Cannot allocate any more free CCB's.\n"
2341		    "             Please try later\n");
2342		return (EFAULT);
2343	}
2344	/* Now grab the newest CCB */
2345	if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2346		panic("%s: Got a NULL CCB from pop_free()\n",
2347		    device_get_nameunit(dpt->dev));
2348	} else {
2349		/* Clean up the leftover of the previous tenant */
2350		ccb->status = DPT_CCB_STATE_NEW;
2351	}
2352
2353	bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2354	      sizeof(eata_ccb_t));
2355
2356	/* We do not want to do user specified scatter/gather.  Why?? */
2357	if (ccb->eata_ccb.scatter == 1)
2358		return (EINVAL);
2359
2360	ccb->eata_ccb.Auto_Req_Sen = 1;
2361	ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2362	ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2363	ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2364	ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2365	ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2366	ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2367
2368	if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2369		/* Data I/O is involved in this command.  Alocate buffer */
2370		if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2371			data = contigmalloc(ccb->eata_ccb.cp_datalen,
2372					    M_TEMP, M_WAITOK, 0, ~0,
2373					    ccb->eata_ccb.cp_datalen,
2374					    0x10000);
2375		} else {
2376			data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2377				      M_WAITOK);
2378		}
2379
2380		if (data == NULL) {
2381			device_printf(dpt->dev, "Cannot allocate %d bytes "
2382			       "for EATA command\n",
2383			       ccb->eata_ccb.cp_datalen);
2384			return (EFAULT);
2385		}
2386#define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2387		if (ccb->eata_ccb.DataIn == 1) {
2388			if (copyin(usr_cmd_DMA,
2389				   data, ccb->eata_ccb.cp_datalen) == -1)
2390				return (EFAULT);
2391		}
2392	} else {
2393		/* No data I/O involved here.  Make sure the DPT knows that */
2394		ccb->eata_ccb.cp_datalen = 0;
2395		data = NULL;
2396	}
2397
2398	if (ccb->eata_ccb.FWNEST == 1)
2399		ccb->eata_ccb.FWNEST = 0;
2400
2401	if (ccb->eata_ccb.cp_datalen != 0) {
2402		if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2403				       data) != 0) {
2404			if (data != NULL)
2405				free(data, M_TEMP);
2406			return (EFAULT);
2407		}
2408	}
2409	/**
2410	 * We are required to quiet a SCSI bus.
2411	 * since we do not queue comands on a bus basis,
2412	 * we wait for ALL commands on a controller to complete.
2413	 * In the mean time, sched_queue() will not schedule new commands.
2414	 */
2415	if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2416	    && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2417		/* We wait for ALL traffic for this HBa to subside */
2418		dpt->state |= DPT_HA_QUIET;
2419
2420		while ((submitted = dpt->submitted_ccbs_count) != 0) {
2421			huh = mtx_sleep((void *) dpt, &dpt->lock,
2422			    PCATCH | PRIBIO, "dptqt", 100 * hz);
2423			switch (huh) {
2424			case 0:
2425				/* Wakeup call received */
2426				break;
2427			case EWOULDBLOCK:
2428				/* Timer Expired */
2429				break;
2430			default:
2431				/* anything else */
2432				break;
2433			}
2434		}
2435	}
2436	/* Resume normal operation */
2437	if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2438	    && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2439		dpt->state &= ~DPT_HA_QUIET;
2440	}
2441	/**
2442	 * Schedule the command and submit it.
2443	 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2444	 */
2445	ccb->xs = NULL;
2446	ccb->flags = 0;
2447	ccb->eata_ccb.Auto_Req_Sen = 1;	/* We always want this feature */
2448
2449	ccb->transaction_id = ++dpt->commands_processed;
2450	ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2451	ccb->result = (u_int32_t) & cmdarg;
2452	ccb->data = data;
2453
2454#ifdef DPT_MEASURE_PERFORMANCE
2455	++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2456	ccb->command_started = microtime_now;
2457#endif
2458	dpt_Qadd_waiting(dpt, ccb);
2459
2460	dpt_sched_queue(dpt);
2461
2462	/* Wait for the command to complete */
2463	(void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw",
2464	    100 * hz);
2465
2466	/* Free allocated memory */
2467	if (data != NULL)
2468		free(data, M_TEMP);
2469
2470	return (0);
2471}
2472
2473static void
2474dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2475{
2476	u_int32_t       result;
2477	caddr_t         cmd_arg;
2478
2479	mtx_unlock(&dpt->lock);
2480
2481	/**
2482	 * If Auto Request Sense is on, copyout the sense struct
2483	 */
2484#define usr_pckt_DMA 	(caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2485#define usr_pckt_len	ntohl(ccb->eata_ccb.cp_datalen)
2486	if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2487		if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2488			    sizeof(struct scsi_sense_data))) {
2489			mtx_lock(&dpt->lock);
2490			ccb->result = EFAULT;
2491			dpt_Qpush_free(dpt, ccb);
2492			wakeup(ccb);
2493			return;
2494		}
2495	}
2496	/* If DataIn is on, copyout the data */
2497	if ((ccb->eata_ccb.DataIn == 1)
2498	    && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2499		if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2500			mtx_lock(&dpt->lock);
2501			dpt_Qpush_free(dpt, ccb);
2502			ccb->result = EFAULT;
2503
2504			wakeup(ccb);
2505			return;
2506		}
2507	}
2508	/* Copyout the status */
2509	result = ccb->status_packet.hba_stat;
2510	cmd_arg = (caddr_t) ccb->result;
2511
2512	if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2513		mtx_lock(&dpt->lock);
2514		dpt_Qpush_free(dpt, ccb);
2515		ccb->result = EFAULT;
2516		wakeup(ccb);
2517		return;
2518	}
2519	mtx_lock(&dpt->lock);
2520	/* Put the CCB back in the freelist */
2521	ccb->state |= DPT_CCB_STATE_COMPLETED;
2522	dpt_Qpush_free(dpt, ccb);
2523
2524	/* Free allocated memory */
2525	return;
2526}
2527
2528#ifdef DPT_HANDLE_TIMEOUTS
2529/**
2530 * This function walks down the SUBMITTED queue.
2531 * Every request that is too old gets aborted and marked.
2532 * Since the DPT will complete (interrupt) immediately (what does that mean?),
2533 * We just walk the list, aborting old commands and marking them as such.
2534 * The dpt_complete function will get rid of the that were interrupted in the
2535 * normal manner.
2536 *
2537 * This function needs to run at splcam(), as it interacts with the submitted
2538 * queue, as well as the completed and free queues.  Just like dpt_intr() does.
2539 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr
2540 * willbe able to pre-empt it, grab a transaction in progress (towards
2541 * destruction) and operate on it.  The state of this transaction will be not
2542 * very clear.
2543 * The only other option, is to lock it only as long as necessary but have
2544 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in
2545 * a SMP environment, the advantage is dubvious for a function that runs once
2546 * every ten seconds for few microseconds and, on systems with healthy
2547 * hardware, does not do anything anyway.
2548 */
2549
2550static void
2551dpt_handle_timeouts(dpt_softc_t * dpt)
2552{
2553	dpt_ccb_t      *ccb;
2554
2555	if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) {
2556		device_printf(dpt->dev, "WARNING: Timeout Handling Collision\n");
2557		return;
2558	}
2559	dpt->state |= DPT_HA_TIMEOUTS_ACTIVE;
2560
2561	/* Loop through the entire submitted queue, looking for lost souls */
2562	TAILQ_FIRST(ccb, &&dpt->submitted_ccbs, links) {
2563		struct scsi_xfer *xs;
2564		u_int32_t       age, max_age;
2565
2566		xs = ccb->xs;
2567		age = dpt_time_delta(ccb->command_started, microtime_now);
2568
2569#define TenSec	10000000
2570
2571		if (xs == NULL) {	/* Local, non-kernel call */
2572			max_age = TenSec;
2573		} else {
2574			max_age = (((xs->timeout * (dpt->submitted_ccbs_count
2575						    + DPT_TIMEOUT_FACTOR))
2576				    > TenSec)
2577				 ? (xs->timeout * (dpt->submitted_ccbs_count
2578						   + DPT_TIMEOUT_FACTOR))
2579				   : TenSec);
2580		}
2581
2582		/*
2583		 * If a transaction is marked lost and is TWICE as old as we
2584		 * care, then, and only then do we destroy it!
2585		 */
2586		if (ccb->state & DPT_CCB_STATE_MARKED_LOST) {
2587			/* Remember who is next */
2588			if (age > (max_age * 2)) {
2589				dpt_Qremove_submitted(dpt, ccb);
2590				ccb->state &= ~DPT_CCB_STATE_MARKED_LOST;
2591				ccb->state |= DPT_CCB_STATE_ABORTED;
2592#define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)
2593				if (ccb->retries++ > DPT_RETRIES) {
2594					device_printf(dpt->dev,
2595					       "ERROR: Destroying stale "
2596					       "%d (%s)\n"
2597					       "		on "
2598					       "c%db%dt%du%d (%d/%d)\n",
2599					       ccb->transaction_id,
2600					       cmd_name,
2601					       device_get_unit(dpt->dev),
2602					       ccb->eata_ccb.cp_channel,
2603					       ccb->eata_ccb.cp_id,
2604					       ccb->eata_ccb.cp_LUN, age,
2605					       ccb->retries);
2606#define send_ccb &ccb->eata_ccb
2607#define ESA	 EATA_SPECIFIC_ABORT
2608					(void) dpt_send_immediate(dpt,
2609								  send_ccb,
2610								  ESA,
2611								  0, 0);
2612					dpt_Qpush_free(dpt, ccb);
2613
2614					/* The SCSI layer should re-try */
2615					xs->error |= XS_TIMEOUT;
2616					xs->flags |= SCSI_ITSDONE;
2617					scsi_done(xs);
2618				} else {
2619					device_printf(dpt->dev,
2620					       "ERROR: Stale %d (%s) on "
2621					       "c%db%dt%du%d (%d)\n"
2622					     "		gets another "
2623					       "chance(%d/%d)\n",
2624					       ccb->transaction_id,
2625					       cmd_name,
2626					       device_get_unit(dpt->dev),
2627					       ccb->eata_ccb.cp_channel,
2628					       ccb->eata_ccb.cp_id,
2629					       ccb->eata_ccb.cp_LUN,
2630					    age, ccb->retries, DPT_RETRIES);
2631
2632					dpt_Qpush_waiting(dpt, ccb);
2633					dpt_sched_queue(dpt);
2634				}
2635			}
2636		} else {
2637			/*
2638			 * This is a transaction that is not to be destroyed
2639			 * (yet) But it is too old for our liking. We wait as
2640			 * long as the upper layer thinks. Not really, we
2641			 * multiply that by the number of commands in the
2642			 * submitted queue + 1.
2643			 */
2644			if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) &&
2645			    (age != ~0) && (age > max_age)) {
2646				device_printf(dpt->dev,
2647				       "ERROR: Marking %d (%s) on "
2648				       "c%db%dt%du%d \n"
2649				       "            as late after %dusec\n",
2650				       ccb->transaction_id,
2651				       cmd_name,
2652				       device_get_unit(dpt->dev),
2653				       ccb->eata_ccb.cp_channel,
2654				       ccb->eata_ccb.cp_id,
2655				       ccb->eata_ccb.cp_LUN, age);
2656				ccb->state |= DPT_CCB_STATE_MARKED_LOST;
2657			}
2658		}
2659	}
2660
2661	dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE;
2662}
2663
2664static void
2665dpt_timeout(void *arg)
2666{
2667	dpt_softc_t    *dpt = (dpt_softc_t *) arg;
2668
2669	mtx_assert(&dpt->lock, MA_OWNED);
2670	if (!(dpt->state & DPT_HA_TIMEOUTS_ACTIVE))
2671		dpt_handle_timeouts(dpt);
2672
2673	callout_reset(&dpt->timer, hz * 10, dpt_timeout, dpt);
2674}
2675
2676#endif				/* DPT_HANDLE_TIMEOUTS */
2677
2678#endif
2679