adwcam.c revision 40024
1/*
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * pci/adw_pci.c	ABP940UW
8 *
9 * Copyright (c) 1998 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions, and the following disclaimer,
17 *    without modification, immediately at the beginning of the file.
18 * 2. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *      $Id$
34 */
35/*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47#include <stddef.h>	/* For offsetof */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/malloc.h>
53
54#include <machine/bus_pio.h>
55#include <machine/bus_memio.h>
56#include <machine/bus.h>
57#include <machine/clock.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_xpt_sim.h>
63#include <cam/cam_debug.h>
64
65#include <cam/scsi/scsi_message.h>
66
67#include <dev/advansys/adwvar.h>
68
69/* Definitions for our use of the SIM private CCB area */
70#define ccb_acb_ptr spriv_ptr0
71#define ccb_adw_ptr spriv_ptr1
72
73#define MIN(a, b) (((a) < (b)) ? (a) : (b))
74
75u_long adw_unit;
76
77static __inline u_int32_t	acbvtop(struct adw_softc *adw,
78					   struct acb *acb);
79static __inline struct acb *	acbptov(struct adw_softc *adw,
80					u_int32_t busaddr);
81static __inline struct acb*	adwgetacb(struct adw_softc *adw);
82static __inline void		adwfreeacb(struct adw_softc *adw,
83					   struct acb *acb);
84
85static void		adwmapmem(void *arg, bus_dma_segment_t *segs,
86				  int nseg, int error);
87static struct sg_map_node*
88			adwallocsgmap(struct adw_softc *adw);
89static int		adwallocacbs(struct adw_softc *adw);
90
91static void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
92				      int nseg, int error);
93static void		adw_action(struct cam_sim *sim, union ccb *ccb);
94static void		adw_poll(struct cam_sim *sim);
95static void		adw_async(void *callback_arg, u_int32_t code,
96				  struct cam_path *path, void *arg);
97static void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
98static void		adwtimeout(void *arg);
99static void		adw_handle_device_reset(struct adw_softc *adw,
100						u_int target);
101static void		adw_handle_bus_reset(struct adw_softc *adw,
102					     int initiated);
103
104static __inline u_int32_t
105acbvtop(struct adw_softc *adw, struct acb *acb)
106{
107	return (adw->acb_busbase
108	      + (u_int32_t)((caddr_t)acb - (caddr_t)adw->acbs));
109}
110
111static __inline struct acb *
112acbptov(struct adw_softc *adw, u_int32_t busaddr)
113{
114	return (adw->acbs
115	      + ((struct acb *)busaddr - (struct acb *)adw->acb_busbase));
116}
117
118static __inline struct acb*
119adwgetacb(struct adw_softc *adw)
120{
121	struct	acb* acb;
122	int	s;
123
124	s = splcam();
125	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
126		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
127	} else if (adw->num_acbs < adw->max_acbs) {
128		adwallocacbs(adw);
129		acb = SLIST_FIRST(&adw->free_acb_list);
130		if (acb == NULL)
131			printf("%s: Can't malloc ACB\n", adw_name(adw));
132		else {
133			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
134		}
135	}
136	splx(s);
137
138	return (acb);
139}
140
141static __inline void
142adwfreeacb(struct adw_softc *adw, struct acb *acb)
143{
144	int s;
145
146	s = splcam();
147	if ((acb->state & ACB_ACTIVE) != 0)
148		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
149	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
150		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
151	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
152	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
153		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
154		adw->state &= ~ADW_RESOURCE_SHORTAGE;
155	}
156	acb->state = ACB_FREE;
157	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
158	splx(s);
159}
160
161static void
162adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
163{
164	bus_addr_t *busaddrp;
165
166	busaddrp = (bus_addr_t *)arg;
167	*busaddrp = segs->ds_addr;
168}
169
170static struct sg_map_node *
171adwallocsgmap(struct adw_softc *adw)
172{
173	struct sg_map_node *sg_map;
174
175	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
176
177	if (sg_map == NULL)
178		return (NULL);
179
180	/* Allocate S/G space for the next batch of ACBS */
181	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
182			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
183		free(sg_map, M_DEVBUF);
184		return (NULL);
185	}
186
187	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
188
189	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
190			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
191
192	bzero(sg_map->sg_vaddr, PAGE_SIZE);
193	return (sg_map);
194}
195
196/*
197 * Allocate another chunk of CCB's. Return count of entries added.
198 * Assumed to be called at splcam().
199 */
200static int
201adwallocacbs(struct adw_softc *adw)
202{
203	struct acb *next_acb;
204	struct sg_map_node *sg_map;
205	bus_addr_t busaddr;
206	struct adw_sg_block *blocks;
207	int newcount;
208	int i;
209
210	next_acb = &adw->acbs[adw->num_acbs];
211
212	sg_map = adwallocsgmap(adw);
213
214	if (sg_map == NULL)
215		return (0);
216
217	blocks = sg_map->sg_vaddr;
218	busaddr = sg_map->sg_physaddr;
219
220	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
221	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
222		int error;
223		int j;
224
225		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
226					  &next_acb->dmamap);
227		if (error != 0)
228			break;
229		next_acb->queue.scsi_req_baddr = acbvtop(adw, next_acb);
230		next_acb->queue.sense_addr =
231		    acbvtop(adw, next_acb) + offsetof(struct acb, sense_data);
232		next_acb->sg_blocks = blocks;
233		next_acb->sg_busaddr = busaddr;
234		/* Setup static data in the sg blocks */
235		for (j = 0; j < ADW_SG_BLOCKCNT; j++) {
236			next_acb->sg_blocks[j].first_entry_no =
237			    j * ADW_NO_OF_SG_PER_BLOCK;
238		}
239		next_acb->state = ACB_FREE;
240		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
241		blocks += ADW_SG_BLOCKCNT;
242		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
243		next_acb++;
244		adw->num_acbs++;
245	}
246	return (i);
247}
248
249static void
250adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
251{
252	struct	 acb *acb;
253	union	 ccb *ccb;
254	struct	 adw_softc *adw;
255	int	 s, i;
256
257	acb = (struct acb *)arg;
258	ccb = acb->ccb;
259	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
260
261	if (error != 0) {
262		if (error != EFBIG)
263			printf("%s: Unexepected error 0x%x returned from "
264			       "bus_dmamap_load\n", adw_name(adw), error);
265		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
266			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
267			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
268		}
269		adwfreeacb(adw, acb);
270		xpt_done(ccb);
271		return;
272	}
273
274	if (nseg != 0) {
275		bus_dmasync_op_t op;
276
277		acb->queue.data_addr = dm_segs[0].ds_addr;
278		acb->queue.data_cnt = ccb->csio.dxfer_len;
279		if (nseg > 1) {
280			struct adw_sg_block *sg_block;
281			struct adw_sg_elm *sg;
282			bus_addr_t sg_busaddr;
283			u_int sg_index;
284			bus_dma_segment_t *end_seg;
285
286			end_seg = dm_segs + nseg;
287
288			sg_busaddr = acb->sg_busaddr;
289			sg_index = 0;
290			/* Copy the segments into our SG list */
291			for (sg_block = acb->sg_blocks;; sg_block++) {
292				u_int sg_left;
293
294				sg_left = ADW_NO_OF_SG_PER_BLOCK;
295				sg = sg_block->sg_list;
296				while (dm_segs < end_seg && sg_left != 0) {
297					sg->sg_addr = dm_segs->ds_addr;
298					sg->sg_count = dm_segs->ds_len;
299					sg++;
300					dm_segs++;
301					sg_left--;
302				}
303				sg_index += ADW_NO_OF_SG_PER_BLOCK - sg_left;
304				sg_block->last_entry_no = sg_index - 1;
305				if (dm_segs == end_seg) {
306					sg_block->sg_busaddr_next = 0;
307					break;
308				} else {
309					sg_busaddr +=
310					    sizeof(struct adw_sg_block);
311					sg_block->sg_busaddr_next = sg_busaddr;
312				}
313			}
314
315			acb->queue.sg_entry_cnt = nseg;
316			acb->queue.sg_real_addr = acb->sg_busaddr;
317		} else {
318			acb->queue.sg_entry_cnt = 0;
319			acb->queue.sg_real_addr = 0;
320		}
321
322		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
323			op = BUS_DMASYNC_PREREAD;
324		else
325			op = BUS_DMASYNC_PREWRITE;
326
327		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
328
329	} else {
330		acb->queue.sg_entry_cnt = 0;
331		acb->queue.data_addr = 0;
332		acb->queue.data_cnt = 0;
333		acb->queue.sg_real_addr = 0;
334	}
335	acb->queue.free_scsiq_link = 0;
336	acb->queue.ux_wk_data_cnt = 0;
337
338	s = splcam();
339
340	/*
341	 * Last time we need to check if this CCB needs to
342	 * be aborted.
343	 */
344	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
345		if (nseg != 0)
346			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
347		adwfreeacb(adw, acb);
348		xpt_done(ccb);
349		splx(s);
350		return;
351	}
352
353	acb->state |= ACB_ACTIVE;
354	ccb->ccb_h.status |= CAM_SIM_QUEUED;
355	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
356	ccb->ccb_h.timeout_ch =
357	    timeout(adwtimeout, (caddr_t)acb,
358		    (ccb->ccb_h.timeout * hz) / 1000);
359
360	adw_send_acb(adw, acb, acbvtop(adw, acb));
361
362	splx(s);
363}
364
365static void
366adw_action(struct cam_sim *sim, union ccb *ccb)
367{
368	struct	adw_softc *adw;
369	int	s;
370
371	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
372
373	adw = (struct adw_softc *)cam_sim_softc(sim);
374
375	switch (ccb->ccb_h.func_code) {
376	/* Common cases first */
377	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
378	{
379		struct	ccb_scsiio *csio;
380		struct	ccb_hdr *ccbh;
381		struct	acb *acb;
382
383		csio = &ccb->csio;
384		ccbh = &ccb->ccb_h;
385		/* Max supported CDB length is 12 bytes */
386		if (csio->cdb_len > 12) {
387			ccb->ccb_h.status = CAM_REQ_INVALID;
388			xpt_done(ccb);
389			return;
390		}
391
392		if ((acb = adwgetacb(adw)) == NULL) {
393			int s;
394
395			s = splcam();
396			adw->state |= ADW_RESOURCE_SHORTAGE;
397			splx(s);
398			xpt_freeze_simq(sim, /*count*/1);
399			ccb->ccb_h.status = CAM_REQUEUE_REQ;
400			xpt_done(ccb);
401			return;
402		}
403
404		/* Link dccb and ccb so we can find one from the other */
405		acb->ccb = ccb;
406		ccb->ccb_h.ccb_acb_ptr = acb;
407		ccb->ccb_h.ccb_adw_ptr = adw;
408
409		acb->queue.cntl = 0;
410		acb->queue.target_id = ccb->ccb_h.target_id;
411		acb->queue.target_lun = ccb->ccb_h.target_lun;
412
413		acb->queue.srb_ptr = 0;
414		acb->queue.a_flag = 0;
415		acb->queue.sense_len =
416			MIN(csio->sense_len, sizeof(acb->sense_data));
417		acb->queue.cdb_len = csio->cdb_len;
418
419		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
420			acb->queue.tag_code = csio->tag_action;
421		else
422			acb->queue.tag_code = 0;
423
424		acb->queue.done_status = 0;
425		acb->queue.scsi_status = 0;
426		acb->queue.host_status = 0;
427		acb->queue.ux_sg_ix = 0;
428
429		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
430			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
431				bcopy(csio->cdb_io.cdb_ptr,
432				      acb->queue.cdb, csio->cdb_len);
433			} else {
434				/* I guess I could map it in... */
435				ccb->ccb_h.status = CAM_REQ_INVALID;
436				adwfreeacb(adw, acb);
437				xpt_done(ccb);
438				return;
439			}
440		} else {
441			bcopy(csio->cdb_io.cdb_bytes,
442			      acb->queue.cdb, csio->cdb_len);
443		}
444
445		/*
446		 * If we have any data to send with this command,
447		 * map it into bus space.
448		 */
449		if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
450			if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
451				/*
452				 * We've been given a pointer
453				 * to a single buffer.
454				 */
455				if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
456					int s;
457					int error;
458
459					s = splsoftvm();
460					error =
461					    bus_dmamap_load(adw->buffer_dmat,
462							    acb->dmamap,
463							    csio->data_ptr,
464							    csio->dxfer_len,
465							    adwexecuteacb,
466							    acb, /*flags*/0);
467					if (error == EINPROGRESS) {
468						/*
469						 * So as to maintain ordering,
470						 * freeze the controller queue
471						 * until our mapping is
472						 * returned.
473						 */
474						xpt_freeze_simq(sim, 1);
475						acb->state |= CAM_RELEASE_SIMQ;
476					}
477					splx(s);
478				} else {
479					struct bus_dma_segment seg;
480
481					/* Pointer to physical buffer */
482					seg.ds_addr =
483					    (bus_addr_t)csio->data_ptr;
484					seg.ds_len = csio->dxfer_len;
485					adwexecuteacb(acb, &seg, 1, 0);
486				}
487			} else {
488				struct bus_dma_segment *segs;
489
490				if ((ccbh->flags & CAM_DATA_PHYS) != 0)
491					panic("adw_action - Physical "
492					      "segment pointers "
493					      "unsupported");
494
495				if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
496					panic("adw_action - Virtual "
497					      "segment addresses "
498					      "unsupported");
499
500				/* Just use the segments provided */
501				segs = (struct bus_dma_segment *)csio->data_ptr;
502				adwexecuteacb(acb, segs, csio->sglist_cnt,
503					      (csio->sglist_cnt < ADW_SGSIZE)
504					      ? 0 : EFBIG);
505			}
506		} else {
507			adwexecuteacb(acb, NULL, 0, 0);
508		}
509		break;
510	}
511	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
512	{
513		adw_idle_cmd_status_t status;
514
515		adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
516				  ccb->ccb_h.target_id);
517		status = adw_idle_cmd_wait(adw);
518		if (status == ADW_IDLE_CMD_SUCCESS) {
519			ccb->ccb_h.status = CAM_REQ_CMP;
520			if (bootverbose) {
521				xpt_print_path(ccb->ccb_h.path);
522				printf("BDR Delivered\n");
523			}
524		} else
525			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
526		xpt_done(ccb);
527		break;
528	}
529	case XPT_ABORT:			/* Abort the specified CCB */
530		/* XXX Implement */
531		ccb->ccb_h.status = CAM_REQ_INVALID;
532		xpt_done(ccb);
533		break;
534	case XPT_SET_TRAN_SETTINGS:
535	{
536		struct	  ccb_trans_settings *cts;
537		u_int	  target_mask;
538		int	  s;
539
540		cts = &ccb->cts;
541		target_mask = 0x01 << ccb->ccb_h.target_id;
542
543		s = splcam();
544		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
545			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
546				u_int discenb;
547
548				discenb =
549				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
550
551				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
552					discenb |= target_mask;
553				else
554					discenb &= ~target_mask;
555
556				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
557						  discenb);
558			}
559
560			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
561				u_int tagenb;
562
563				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
564					adw->tagenb |= target_mask;
565				else
566					adw->tagenb &= ~target_mask;
567			}
568
569			if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
570				u_int wdtrenb_orig;
571				u_int wdtrenb;
572				u_int wdtrdone;
573
574				wdtrenb_orig =
575				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
576				wdtrenb = wdtrenb_orig;
577				wdtrdone = adw_lram_read_16(adw,
578							    ADW_MC_WDTR_DONE);
579				switch (cts->bus_width) {
580				case MSG_EXT_WDTR_BUS_32_BIT:
581				case MSG_EXT_WDTR_BUS_16_BIT:
582					wdtrenb |= target_mask;
583					break;
584				case MSG_EXT_WDTR_BUS_8_BIT:
585				default:
586					wdtrenb &= ~target_mask;
587					break;
588				}
589				if (wdtrenb != wdtrenb_orig) {
590					adw_lram_write_16(adw,
591							  ADW_MC_WDTR_ABLE,
592							  wdtrenb);
593					wdtrdone &= ~target_mask;
594					adw_lram_write_16(adw,
595							  ADW_MC_WDTR_DONE,
596							  wdtrdone);
597				}
598			}
599
600			if ((cts->valid &  CCB_TRANS_SYNC_RATE_VALID) != 0) {
601				u_int sdtrenb_orig;
602				u_int sdtrenb;
603				u_int ultraenb_orig;
604				u_int ultraenb;
605				u_int sdtrdone;
606
607				sdtrenb_orig =
608				    adw_lram_read_16(adw, ADW_MC_SDTR_ABLE);
609				sdtrenb = sdtrenb_orig;
610
611				ultraenb_orig =
612				    adw_lram_read_16(adw, ADW_MC_ULTRA_ABLE);
613				ultraenb = ultraenb_orig;
614
615				sdtrdone = adw_lram_read_16(adw,
616							    ADW_MC_SDTR_DONE);
617
618				if (cts->sync_period == 0) {
619					sdtrenb &= ~target_mask;
620				} else if (cts->sync_period > 12) {
621					ultraenb &= ~target_mask;
622					sdtrenb |= target_mask;
623				} else {
624					ultraenb |= target_mask;
625					sdtrenb |= target_mask;
626				}
627
628				if ((cts->valid
629				   & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
630					if (cts->sync_offset == 0)
631						sdtrenb &= ~target_mask;
632				}
633
634				if (sdtrenb != sdtrenb_orig
635				 || ultraenb != ultraenb_orig) {
636					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
637							  sdtrenb);
638					adw_lram_write_16(adw,
639							  ADW_MC_ULTRA_ABLE,
640							  ultraenb);
641					sdtrdone &= ~target_mask;
642					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
643							  sdtrdone);
644				}
645			}
646		}
647		splx(s);
648		ccb->ccb_h.status = CAM_REQ_CMP;
649		xpt_done(ccb);
650		break;
651	}
652	case XPT_GET_TRAN_SETTINGS:
653	/* Get default/user set transfer settings for the target */
654	{
655		struct	ccb_trans_settings *cts;
656		u_int	target_mask;
657
658		cts = &ccb->cts;
659		target_mask = 0x01 << ccb->ccb_h.target_id;
660		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
661			cts->flags = 0;
662			if ((adw->user_discenb & target_mask) != 0)
663				cts->flags |= CCB_TRANS_DISC_ENB;
664
665			if ((adw->user_tagenb & target_mask) != 0)
666				cts->flags |= CCB_TRANS_TAG_ENB;
667
668			if ((adw->user_wdtr & target_mask) != 0)
669				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
670			else
671				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
672
673			if ((adw->user_sdtr & target_mask) != 0) {
674				if ((adw->user_ultra & target_mask) != 0)
675					cts->sync_period = 12; /* 20MHz */
676				else
677					cts->sync_period = 25; /* 10MHz */
678				cts->sync_offset = 15; /* XXX ??? */
679			}
680
681			cts->valid = CCB_TRANS_SYNC_RATE_VALID
682				   | CCB_TRANS_SYNC_OFFSET_VALID
683				   | CCB_TRANS_BUS_WIDTH_VALID
684				   | CCB_TRANS_DISC_VALID
685				   | CCB_TRANS_TQ_VALID;
686			ccb->ccb_h.status = CAM_REQ_CMP;
687		} else {
688			u_int targ_tinfo;
689
690			cts->flags = 0;
691			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
692			  & target_mask) != 0)
693				cts->flags |= CCB_TRANS_DISC_ENB;
694
695			if ((adw->tagenb & target_mask) != 0)
696				cts->flags |= CCB_TRANS_TAG_ENB;
697
698			targ_tinfo =
699			    adw_lram_read_16(adw,
700					     ADW_MC_DEVICE_HSHK_CFG_TABLE
701					     + (2 * ccb->ccb_h.target_id));
702
703			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
704				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
705			else
706				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
707
708			cts->sync_period =
709			    ADW_HSHK_CFG_PERIOD_FACTOR(targ_tinfo);
710
711			cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
712			if (cts->sync_period == 0)
713				cts->sync_offset = 0;
714
715			if (cts->sync_offset == 0)
716				cts->sync_period = 0;
717		}
718		cts->valid = CCB_TRANS_SYNC_RATE_VALID
719			   | CCB_TRANS_SYNC_OFFSET_VALID
720			   | CCB_TRANS_BUS_WIDTH_VALID
721			   | CCB_TRANS_DISC_VALID
722			   | CCB_TRANS_TQ_VALID;
723		ccb->ccb_h.status = CAM_REQ_CMP;
724		xpt_done(ccb);
725		break;
726	}
727	case XPT_CALC_GEOMETRY:
728	{
729		struct	  ccb_calc_geometry *ccg;
730		u_int32_t size_mb;
731		u_int32_t secs_per_cylinder;
732		int	  extended;
733
734		/*
735		 * XXX Use Adaptec translation until I find out how to
736		 *     get this information from the card.
737		 */
738		ccg = &ccb->ccg;
739		size_mb = ccg->volume_size
740			/ ((1024L * 1024L) / ccg->block_size);
741		extended = 1;
742
743		if (size_mb > 1024 && extended) {
744			ccg->heads = 255;
745			ccg->secs_per_track = 63;
746		} else {
747			ccg->heads = 64;
748			ccg->secs_per_track = 32;
749		}
750		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
751		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
752		ccb->ccb_h.status = CAM_REQ_CMP;
753		xpt_done(ccb);
754		break;
755	}
756	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
757	{
758		adw_idle_cmd_status_t status;
759
760		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET, /*param*/0);
761		status = adw_idle_cmd_wait(adw);
762		if (status == ADW_IDLE_CMD_SUCCESS) {
763			ccb->ccb_h.status = CAM_REQ_CMP;
764			if (bootverbose) {
765				xpt_print_path(adw->path);
766				printf("Bus Reset Delivered\n");
767			}
768		} else
769			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
770		xpt_done(ccb);
771		break;
772	}
773	case XPT_TERM_IO:		/* Terminate the I/O process */
774		/* XXX Implement */
775		ccb->ccb_h.status = CAM_REQ_INVALID;
776		xpt_done(ccb);
777		break;
778	case XPT_PATH_INQ:		/* Path routing inquiry */
779	{
780		struct ccb_pathinq *cpi = &ccb->cpi;
781
782		cpi->version_num = 1;
783		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
784		cpi->target_sprt = 0;
785		cpi->hba_misc = 0;
786		cpi->hba_eng_cnt = 0;
787		cpi->max_target = ADW_MAX_TID;
788		cpi->max_lun = ADW_MAX_LUN;
789		cpi->initiator_id = adw->initiator_id;
790		cpi->bus_id = cam_sim_bus(sim);
791		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
792		strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
793		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
794		cpi->unit_number = cam_sim_unit(sim);
795		cpi->ccb_h.status = CAM_REQ_CMP;
796		xpt_done(ccb);
797		break;
798	}
799	default:
800		ccb->ccb_h.status = CAM_REQ_INVALID;
801		xpt_done(ccb);
802		break;
803	}
804}
805
806static void
807adw_poll(struct cam_sim *sim)
808{
809	adw_intr(cam_sim_softc(sim));
810}
811
812static void
813adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
814{
815}
816
817struct adw_softc *
818adw_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh)
819{
820	struct	 adw_softc *adw;
821	int	 i;
822
823	/*
824	 * Allocate a storage area for us
825	 */
826	adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT);
827	if (adw == NULL) {
828		printf("adw%d: cannot malloc!\n", unit);
829		return NULL;
830	}
831	bzero(adw, sizeof(struct adw_softc));
832	LIST_INIT(&adw->pending_ccbs);
833	SLIST_INIT(&adw->sg_maps);
834	adw->unit = unit;
835	adw->tag = tag;
836	adw->bsh = bsh;
837	i = adw->unit / 10;
838	adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
839	if (adw->name == NULL) {
840		printf("adw%d: cannot malloc name!\n", unit);
841		free(adw, M_DEVBUF);
842		return NULL;
843	}
844	sprintf(adw->name, "adw%d", adw->unit);
845	return(adw);
846}
847
848void
849adw_free(struct adw_softc *adw)
850{
851	switch (adw->init_level) {
852	case 6:
853	{
854		struct sg_map_node *sg_map;
855
856		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
857			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
858			bus_dmamap_unload(adw->sg_dmat,
859					  sg_map->sg_dmamap);
860			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
861					sg_map->sg_dmamap);
862			free(sg_map, M_DEVBUF);
863		}
864		bus_dma_tag_destroy(adw->sg_dmat);
865	}
866	case 5:
867		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
868	case 4:
869		bus_dmamem_free(adw->acb_dmat, adw->acbs,
870				adw->acb_dmamap);
871		bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
872	case 3:
873		bus_dma_tag_destroy(adw->acb_dmat);
874	case 2:
875		bus_dma_tag_destroy(adw->buffer_dmat);
876	case 1:
877		bus_dma_tag_destroy(adw->parent_dmat);
878	case 0:
879		break;
880	}
881	free(adw->name, M_DEVBUF);
882	free(adw, M_DEVBUF);
883}
884
885int
886adw_init(struct adw_softc *adw)
887{
888	struct	  adw_eeprom eep_config;
889	u_int16_t checksum;
890	u_int16_t scsicfg1;
891
892	adw_reset_chip(adw);
893	checksum = adw_eeprom_read(adw, &eep_config);
894	bcopy(eep_config.serial_number, adw->serial_number,
895	      sizeof(adw->serial_number));
896	if (checksum != eep_config.checksum) {
897		u_int16_t serial_number[3];
898
899		printf("%s: EEPROM checksum failed.  Restoring Defaults\n",
900		       adw_name(adw));
901
902	        /*
903		 * Restore the default EEPROM settings.
904		 * Assume the 6 byte board serial number that was read
905		 * from EEPROM is correct even if the EEPROM checksum
906		 * failed.
907		 */
908		bcopy(&adw_default_eeprom, &eep_config, sizeof(eep_config));
909		bcopy(adw->serial_number, eep_config.serial_number,
910		      sizeof(serial_number));
911		adw_eeprom_write(adw, &eep_config);
912	}
913
914	/* Pull eeprom information into our softc. */
915	adw->bios_ctrl = eep_config.bios_ctrl;
916	adw->user_wdtr = eep_config.wdtr_able;
917	adw->user_sdtr = eep_config.sdtr_able;
918	adw->user_ultra = eep_config.ultra_able;
919	adw->user_tagenb = eep_config.tagqng_able;
920	adw->user_discenb = eep_config.disc_enable;
921	adw->max_acbs = eep_config.max_host_qng;
922	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
923
924	/*
925	 * Sanity check the number of host openings.
926	 */
927	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
928		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
929	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
930        	/* If the value is zero, assume it is uninitialized. */
931		if (adw->max_acbs == 0)
932			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
933		else
934			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
935
936	}
937
938	scsicfg1 = 0;
939	switch (eep_config.termination) {
940	default:
941		printf("%s: Invalid EEPROM Termination Settings.\n",
942		       adw_name(adw));
943		printf("%s: Reverting to Automatic Termination\n",
944		       adw_name(adw));
945		/* FALLTHROUGH */
946	case ADW_EEPROM_TERM_AUTO:
947		break;
948	case ADW_EEPROM_TERM_BOTH_ON:
949		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
950		/* FALLTHROUGH */
951	case ADW_EEPROM_TERM_HIGH_ON:
952		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
953		/* FALLTHROUGH */
954	case ADW_EEPROM_TERM_OFF:
955		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
956		break;
957	}
958
959	printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
960
961	if (adw_init_chip(adw, scsicfg1) != 0)
962		return (-1);
963
964	printf("Queue Depth %d\n", adw->max_acbs);
965
966	/* DMA tag for mapping buffers into device visible space. */
967	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0, /*boundary*/0,
968			       /*lowaddr*/BUS_SPACE_MAXADDR,
969			       /*highaddr*/BUS_SPACE_MAXADDR,
970			       /*filter*/NULL, /*filterarg*/NULL,
971			       /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE,
972			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
973			       /*flags*/BUS_DMA_ALLOCNOW,
974			       &adw->buffer_dmat) != 0) {
975		return (-1);
976	}
977
978	adw->init_level++;
979
980	/* DMA tag for our ccb structures */
981	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0, /*boundary*/0,
982			       /*lowaddr*/BUS_SPACE_MAXADDR,
983			       /*highaddr*/BUS_SPACE_MAXADDR,
984			       /*filter*/NULL, /*filterarg*/NULL,
985			       adw->max_acbs * sizeof(struct acb),
986			       /*nsegments*/1,
987			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
988			       /*flags*/0, &adw->acb_dmat) != 0) {
989		return (-1);
990        }
991
992	adw->init_level++;
993
994	/* Allocation for our ccbs */
995	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
996			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) {
997		return (-1);
998	}
999
1000	adw->init_level++;
1001
1002	/* And permanently map them */
1003	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1004			adw->acbs,
1005			adw->max_acbs * sizeof(struct acb),
1006			adwmapmem, &adw->acb_busbase, /*flags*/0);
1007
1008	/* Clear them out. */
1009	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1010
1011	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1012	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0, /*boundary*/0,
1013			       /*lowaddr*/BUS_SPACE_MAXADDR,
1014			       /*highaddr*/BUS_SPACE_MAXADDR,
1015			       /*filter*/NULL, /*filterarg*/NULL,
1016			       PAGE_SIZE, /*nsegments*/1,
1017			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1018			       /*flags*/0, &adw->sg_dmat) != 0) {
1019		return (-1);
1020        }
1021
1022	adw->init_level++;
1023
1024	/* Allocate our first batch of ccbs */
1025	if (adwallocacbs(adw) == 0)
1026		return (-1);
1027
1028	return (0);
1029}
1030
1031/*
1032 * Attach all the sub-devices we can find
1033 */
1034int
1035adw_attach(struct adw_softc *adw)
1036{
1037	struct ccb_setasync csa;
1038	struct cam_devq *devq;
1039
1040	/* Start the Risc processor now that we are fully configured. */
1041	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1042
1043	/*
1044	 * Create the device queue for our SIM.
1045	 */
1046	devq = cam_simq_alloc(adw->max_acbs);
1047	if (devq == NULL)
1048		return (0);
1049
1050	/*
1051	 * Construct our SIM entry.
1052	 */
1053	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1054				 1, adw->max_acbs, devq);
1055	if (adw->sim == NULL)
1056		return (0);
1057
1058	/*
1059	 * Register the bus.
1060	 */
1061	if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1062		cam_sim_free(adw->sim, /*free devq*/TRUE);
1063		return (0);
1064	}
1065
1066	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1067			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1068	   == CAM_REQ_CMP) {
1069		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1070		csa.ccb_h.func_code = XPT_SASYNC_CB;
1071		csa.event_enable = AC_LOST_DEVICE;
1072		csa.callback = adw_async;
1073		csa.callback_arg = adw;
1074		xpt_action((union ccb *)&csa);
1075	}
1076
1077	return (0);
1078}
1079
1080void
1081adw_intr(void *arg)
1082{
1083	struct	adw_softc *adw;
1084	u_int	int_stat;
1085	u_int	next_doneq;
1086	u_int	next_completeq;
1087	u_int	doneq_start;
1088
1089	adw = (struct adw_softc *)arg;
1090	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1091		return;
1092
1093	/* Reading the register clears the interrupt. */
1094	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1095
1096	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1097		/* Idle Command Complete */
1098		adw->idle_command_cmp = 1;
1099		switch (adw->idle_cmd) {
1100		case ADW_IDLE_CMD_DEVICE_RESET:
1101			adw_handle_device_reset(adw,
1102						/*target*/adw->idle_cmd_param);
1103			break;
1104		case ADW_IDLE_CMD_SCSI_RESET:
1105			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1106			break;
1107		default:
1108			break;
1109		}
1110		adw->idle_cmd = ADW_IDLE_CMD_COMPLETED;
1111	}
1112
1113	if ((int_stat & ADW_INTR_STATUS_INTRC) != 0) {
1114		/* SCSI Bus Reset */
1115		adw_handle_bus_reset(adw, /*initiated*/FALSE);
1116        }
1117
1118	/*
1119	 * ADW_MC_HOST_NEXT_DONE is actually the last completed RISC
1120	 * Queue List request. Its forward pointer (RQL_FWD) points to the
1121	 * current completed RISC Queue List request.
1122	 */
1123	next_doneq = adw_lram_read_8(adw, ADW_MC_HOST_NEXT_DONE);
1124	next_doneq = ADW_MC_RISC_Q_LIST_BASE + RQL_FWD
1125		   + (next_doneq * ADW_MC_RISC_Q_LIST_SIZE);
1126
1127	next_completeq = adw_lram_read_8(adw, next_doneq);
1128	doneq_start = ADW_MC_NULL_Q;
1129	/* Loop until all completed Q's are processed. */
1130	while (next_completeq != ADW_MC_NULL_Q) {
1131		u_int32_t acb_busaddr;
1132		struct	  acb *acb;
1133		union	  ccb *ccb;
1134
1135		doneq_start = next_completeq;
1136
1137		next_doneq = ADW_MC_RISC_Q_LIST_BASE +
1138			     (next_completeq * ADW_MC_RISC_Q_LIST_SIZE);
1139
1140		/*
1141		 * Read the ADW_SCSI_REQ_Q physical address pointer from
1142		 * the RISC list entry.
1143		 */
1144		acb_busaddr = adw_lram_read_32(adw, next_doneq + RQL_PHYADDR);
1145		acb = acbptov(adw, acb_busaddr);
1146
1147		/* Change the RISC Queue List state to free. */
1148		adw_lram_write_8(adw, next_doneq + RQL_STATE, ADW_MC_QS_FREE);
1149
1150		/* Get the RISC Queue List forward pointer. */
1151		next_completeq = adw_lram_read_8(adw, next_doneq + RQL_FWD);
1152
1153		/* Process CCB */
1154		ccb = acb->ccb;
1155		untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1156		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1157			bus_dmasync_op_t op;
1158
1159			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1160				op = BUS_DMASYNC_POSTREAD;
1161			else
1162				op = BUS_DMASYNC_POSTWRITE;
1163			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1164			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1165			ccb->csio.resid = acb->queue.data_cnt;
1166		} else
1167			ccb->csio.resid = 0;
1168
1169		/* Common Cases inline... */
1170		if (acb->queue.host_status == QHSTA_NO_ERROR
1171		 && (acb->queue.done_status == QD_NO_ERROR
1172		  || acb->queue.done_status == QD_WITH_ERROR)) {
1173			ccb->csio.scsi_status = acb->queue.scsi_status;
1174			ccb->ccb_h.status = 0;
1175			switch (ccb->csio.scsi_status) {
1176			case SCSI_STATUS_OK:
1177				ccb->ccb_h.status |= CAM_REQ_CMP;
1178				break;
1179			case SCSI_STATUS_CHECK_COND:
1180			case SCSI_STATUS_CMD_TERMINATED:
1181				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1182				      ccb->csio.sense_len);
1183				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1184				ccb->csio.sense_resid = acb->queue.sense_len;
1185				/* FALLTHROUGH */
1186			default:
1187				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1188						  |  CAM_DEV_QFRZN;
1189				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1190				break;
1191			}
1192			adwfreeacb(adw, acb);
1193			xpt_done(ccb);
1194
1195		} else {
1196			adwprocesserror(adw, acb);
1197		}
1198	}
1199
1200	if (doneq_start != ADW_MC_NULL_Q)
1201		adw_lram_write_8(adw, ADW_MC_HOST_NEXT_DONE, doneq_start);
1202}
1203
1204static void
1205adwprocesserror(struct adw_softc *adw, struct acb *acb)
1206{
1207	union ccb *ccb;
1208
1209	ccb = acb->ccb;
1210	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1211		ccb->ccb_h.status = CAM_REQ_ABORTED;
1212	} else {
1213
1214		switch (acb->queue.host_status) {
1215		case QHSTA_M_SEL_TIMEOUT:
1216			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1217			break;
1218		case QHSTA_M_SXFR_OFF_UFLW:
1219		case QHSTA_M_SXFR_OFF_OFLW:
1220		case QHSTA_M_DATA_OVER_RUN:
1221			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1222			break;
1223		case QHSTA_M_SXFR_DESELECTED:
1224		case QHSTA_M_UNEXPECTED_BUS_FREE:
1225			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1226			break;
1227		case QHSTA_M_QUEUE_ABORTED:
1228			/* BDR or Bus Reset */
1229			ccb->ccb_h.status = adw->last_reset;
1230			break;
1231		case QHSTA_M_SXFR_SDMA_ERR:
1232		case QHSTA_M_SXFR_SXFR_PERR:
1233		case QHSTA_M_RDMA_PERR:
1234			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1235			break;
1236		case QHSTA_M_WTM_TIMEOUT:
1237		case QHSTA_M_SXFR_WD_TMO:
1238			/* The SCSI bus hung in a phase */
1239			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1240			adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET,
1241					  /*param*/0);
1242			break;
1243		case QHSTA_M_SXFR_XFR_PH_ERR:
1244			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1245			break;
1246		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1247			break;
1248		case QHSTA_M_BAD_CMPL_STATUS_IN:
1249			/* No command complete after a status message */
1250			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1251			break;
1252		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1253			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1254			break;
1255		case QHSTA_M_INVALID_DEVICE:
1256			ccb->ccb_h.status = CAM_PATH_INVALID;
1257			break;
1258		case QHSTA_M_NO_AUTO_REQ_SENSE:
1259			/*
1260			 * User didn't request sense, but we got a
1261			 * check condition.
1262			 */
1263			ccb->csio.scsi_status = acb->queue.scsi_status;
1264			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1265			break;
1266		default:
1267			panic("%s: Unhandled Host status error %x",
1268			      adw_name(adw), acb->queue.host_status);
1269			/* NOTREACHED */
1270		}
1271	}
1272	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1273		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1274		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1275	}
1276	adwfreeacb(adw, acb);
1277	xpt_done(ccb);
1278}
1279
1280static void
1281adwtimeout(void *arg)
1282{
1283	struct acb	     *acb;
1284	union  ccb	     *ccb;
1285	struct adw_softc     *adw;
1286	adw_idle_cmd_status_t status;
1287	int		      s;
1288
1289	acb = (struct acb *)arg;
1290	ccb = acb->ccb;
1291	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1292	xpt_print_path(ccb->ccb_h.path);
1293	printf("ACB %p - timed out\n", (void *)acb);
1294
1295	s = splcam();
1296
1297	if ((acb->state & ACB_ACTIVE) == 0) {
1298		xpt_print_path(ccb->ccb_h.path);
1299		printf("ACB %p - timed out CCB already completed\n",
1300		       (void *)acb);
1301		splx(s);
1302		return;
1303	}
1304
1305	/* Attempt a BDR first */
1306	adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1307			  ccb->ccb_h.target_id);
1308	splx(s);
1309	status = adw_idle_cmd_wait(adw);
1310	if (status == ADW_IDLE_CMD_SUCCESS) {
1311		printf("%s: BDR Delivered.  No longer in timeout\n",
1312		       adw_name(adw));
1313	} else {
1314		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET, /*param*/0);
1315		status = adw_idle_cmd_wait(adw);
1316		if (status != ADW_IDLE_CMD_SUCCESS)
1317			panic("%s: Bus Reset during timeout failed",
1318			      adw_name(adw));
1319	}
1320}
1321
1322static void
1323adw_handle_device_reset(struct adw_softc *adw, u_int target)
1324{
1325	struct cam_path *path;
1326	cam_status error;
1327
1328	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1329				target, CAM_LUN_WILDCARD);
1330
1331	if (error == CAM_REQ_CMP) {
1332		xpt_async(AC_SENT_BDR, path, NULL);
1333		xpt_free_path(path);
1334	}
1335	adw->last_reset = CAM_BDR_SENT;
1336}
1337
1338static void
1339adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1340{
1341	if (initiated) {
1342		/*
1343		 * The microcode currently sets the SCSI Bus Reset signal
1344		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1345		 * command above.  But the SCSI Bus Reset Hold Time in the
1346		 * microcode is not deterministic (it may in fact be for less
1347		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1348		 * the Adv Library sets the SCSI Bus Reset signal for
1349		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1350		 * than 25 us.
1351		 */
1352		u_int scsi_ctrl;
1353
1354	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1355		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1356		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1357		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1358
1359		/*
1360		 * We will perform the async notification when the
1361		 * SCSI Reset interrupt occurs.
1362		 */
1363	} else
1364		xpt_async(AC_BUS_RESET, adw->path, NULL);
1365	adw->last_reset = CAM_SCSI_BUS_RESET;
1366}
1367