adwcam.c revision 49860
1/*
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * pci/adw_pci.c	ABP940UW
8 *
9 * Copyright (c) 1998 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions, and the following disclaimer,
17 *    without modification, immediately at the beginning of the file.
18 * 2. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *      $Id: adwcam.c,v 1.3 1999/05/06 20:16:12 ken Exp $
34 */
35/*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47#include <stddef.h>	/* For offsetof */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/malloc.h>
53
54#include <machine/bus_pio.h>
55#include <machine/bus_memio.h>
56#include <machine/bus.h>
57#include <machine/clock.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_xpt_sim.h>
63#include <cam/cam_debug.h>
64
65#include <cam/scsi/scsi_message.h>
66
67#include <dev/advansys/adwvar.h>
68
69/* Definitions for our use of the SIM private CCB area */
70#define ccb_acb_ptr spriv_ptr0
71#define ccb_adw_ptr spriv_ptr1
72
73#define MIN(a, b) (((a) < (b)) ? (a) : (b))
74
75u_long adw_unit;
76
77static __inline u_int32_t	acbvtop(struct adw_softc *adw,
78					   struct acb *acb);
79static __inline struct acb *	acbptov(struct adw_softc *adw,
80					u_int32_t busaddr);
81static __inline struct acb*	adwgetacb(struct adw_softc *adw);
82static __inline void		adwfreeacb(struct adw_softc *adw,
83					   struct acb *acb);
84
85static void		adwmapmem(void *arg, bus_dma_segment_t *segs,
86				  int nseg, int error);
87static struct sg_map_node*
88			adwallocsgmap(struct adw_softc *adw);
89static int		adwallocacbs(struct adw_softc *adw);
90
91static void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
92				      int nseg, int error);
93static void		adw_action(struct cam_sim *sim, union ccb *ccb);
94static void		adw_poll(struct cam_sim *sim);
95static void		adw_async(void *callback_arg, u_int32_t code,
96				  struct cam_path *path, void *arg);
97static void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
98static void		adwtimeout(void *arg);
99static void		adw_handle_device_reset(struct adw_softc *adw,
100						u_int target);
101static void		adw_handle_bus_reset(struct adw_softc *adw,
102					     int initiated);
103
104static __inline u_int32_t
105acbvtop(struct adw_softc *adw, struct acb *acb)
106{
107	return (adw->acb_busbase
108	      + (u_int32_t)((caddr_t)acb - (caddr_t)adw->acbs));
109}
110
111static __inline struct acb *
112acbptov(struct adw_softc *adw, u_int32_t busaddr)
113{
114	return (adw->acbs
115	      + ((struct acb *)busaddr - (struct acb *)adw->acb_busbase));
116}
117
118static __inline struct acb*
119adwgetacb(struct adw_softc *adw)
120{
121	struct	acb* acb;
122	int	s;
123
124	s = splcam();
125	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
126		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
127	} else if (adw->num_acbs < adw->max_acbs) {
128		adwallocacbs(adw);
129		acb = SLIST_FIRST(&adw->free_acb_list);
130		if (acb == NULL)
131			printf("%s: Can't malloc ACB\n", adw_name(adw));
132		else {
133			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
134		}
135	}
136	splx(s);
137
138	return (acb);
139}
140
141static __inline void
142adwfreeacb(struct adw_softc *adw, struct acb *acb)
143{
144	int s;
145
146	s = splcam();
147	if ((acb->state & ACB_ACTIVE) != 0)
148		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
149	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
150		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
151	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
152	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
153		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
154		adw->state &= ~ADW_RESOURCE_SHORTAGE;
155	}
156	acb->state = ACB_FREE;
157	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
158	splx(s);
159}
160
161static void
162adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
163{
164	bus_addr_t *busaddrp;
165
166	busaddrp = (bus_addr_t *)arg;
167	*busaddrp = segs->ds_addr;
168}
169
170static struct sg_map_node *
171adwallocsgmap(struct adw_softc *adw)
172{
173	struct sg_map_node *sg_map;
174
175	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
176
177	if (sg_map == NULL)
178		return (NULL);
179
180	/* Allocate S/G space for the next batch of ACBS */
181	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
182			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
183		free(sg_map, M_DEVBUF);
184		return (NULL);
185	}
186
187	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
188
189	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
190			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
191
192	bzero(sg_map->sg_vaddr, PAGE_SIZE);
193	return (sg_map);
194}
195
196/*
197 * Allocate another chunk of CCB's. Return count of entries added.
198 * Assumed to be called at splcam().
199 */
200static int
201adwallocacbs(struct adw_softc *adw)
202{
203	struct acb *next_acb;
204	struct sg_map_node *sg_map;
205	bus_addr_t busaddr;
206	struct adw_sg_block *blocks;
207	int newcount;
208	int i;
209
210	next_acb = &adw->acbs[adw->num_acbs];
211
212	sg_map = adwallocsgmap(adw);
213
214	if (sg_map == NULL)
215		return (0);
216
217	blocks = sg_map->sg_vaddr;
218	busaddr = sg_map->sg_physaddr;
219
220	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
221	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
222		int error;
223		int j;
224
225		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
226					  &next_acb->dmamap);
227		if (error != 0)
228			break;
229		next_acb->queue.scsi_req_baddr = acbvtop(adw, next_acb);
230		next_acb->queue.sense_addr =
231		    acbvtop(adw, next_acb) + offsetof(struct acb, sense_data);
232		next_acb->sg_blocks = blocks;
233		next_acb->sg_busaddr = busaddr;
234		/* Setup static data in the sg blocks */
235		for (j = 0; j < ADW_SG_BLOCKCNT; j++) {
236			next_acb->sg_blocks[j].first_entry_no =
237			    j * ADW_NO_OF_SG_PER_BLOCK;
238		}
239		next_acb->state = ACB_FREE;
240		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
241		blocks += ADW_SG_BLOCKCNT;
242		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
243		next_acb++;
244		adw->num_acbs++;
245	}
246	return (i);
247}
248
249static void
250adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
251{
252	struct	 acb *acb;
253	union	 ccb *ccb;
254	struct	 adw_softc *adw;
255	int	 s;
256
257	acb = (struct acb *)arg;
258	ccb = acb->ccb;
259	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
260
261	if (error != 0) {
262		if (error != EFBIG)
263			printf("%s: Unexepected error 0x%x returned from "
264			       "bus_dmamap_load\n", adw_name(adw), error);
265		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
266			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
267			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
268		}
269		adwfreeacb(adw, acb);
270		xpt_done(ccb);
271		return;
272	}
273
274	if (nseg != 0) {
275		bus_dmasync_op_t op;
276
277		acb->queue.data_addr = dm_segs[0].ds_addr;
278		acb->queue.data_cnt = ccb->csio.dxfer_len;
279		if (nseg > 1) {
280			struct adw_sg_block *sg_block;
281			struct adw_sg_elm *sg;
282			bus_addr_t sg_busaddr;
283			u_int sg_index;
284			bus_dma_segment_t *end_seg;
285
286			end_seg = dm_segs + nseg;
287
288			sg_busaddr = acb->sg_busaddr;
289			sg_index = 0;
290			/* Copy the segments into our SG list */
291			for (sg_block = acb->sg_blocks;; sg_block++) {
292				u_int sg_left;
293
294				sg_left = ADW_NO_OF_SG_PER_BLOCK;
295				sg = sg_block->sg_list;
296				while (dm_segs < end_seg && sg_left != 0) {
297					sg->sg_addr = dm_segs->ds_addr;
298					sg->sg_count = dm_segs->ds_len;
299					sg++;
300					dm_segs++;
301					sg_left--;
302				}
303				sg_index += ADW_NO_OF_SG_PER_BLOCK - sg_left;
304				sg_block->last_entry_no = sg_index - 1;
305				if (dm_segs == end_seg) {
306					sg_block->sg_busaddr_next = 0;
307					break;
308				} else {
309					sg_busaddr +=
310					    sizeof(struct adw_sg_block);
311					sg_block->sg_busaddr_next = sg_busaddr;
312				}
313			}
314
315			acb->queue.sg_entry_cnt = nseg;
316			acb->queue.sg_real_addr = acb->sg_busaddr;
317		} else {
318			acb->queue.sg_entry_cnt = 0;
319			acb->queue.sg_real_addr = 0;
320		}
321
322		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
323			op = BUS_DMASYNC_PREREAD;
324		else
325			op = BUS_DMASYNC_PREWRITE;
326
327		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
328
329	} else {
330		acb->queue.sg_entry_cnt = 0;
331		acb->queue.data_addr = 0;
332		acb->queue.data_cnt = 0;
333		acb->queue.sg_real_addr = 0;
334	}
335	acb->queue.free_scsiq_link = 0;
336	acb->queue.ux_wk_data_cnt = 0;
337
338	s = splcam();
339
340	/*
341	 * Last time we need to check if this CCB needs to
342	 * be aborted.
343	 */
344	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
345		if (nseg != 0)
346			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
347		adwfreeacb(adw, acb);
348		xpt_done(ccb);
349		splx(s);
350		return;
351	}
352
353	acb->state |= ACB_ACTIVE;
354	ccb->ccb_h.status |= CAM_SIM_QUEUED;
355	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
356	ccb->ccb_h.timeout_ch =
357	    timeout(adwtimeout, (caddr_t)acb,
358		    (ccb->ccb_h.timeout * hz) / 1000);
359
360	adw_send_acb(adw, acb, acbvtop(adw, acb));
361
362	splx(s);
363}
364
365static void
366adw_action(struct cam_sim *sim, union ccb *ccb)
367{
368	struct	adw_softc *adw;
369
370	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
371
372	adw = (struct adw_softc *)cam_sim_softc(sim);
373
374	switch (ccb->ccb_h.func_code) {
375	/* Common cases first */
376	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
377	{
378		struct	ccb_scsiio *csio;
379		struct	ccb_hdr *ccbh;
380		struct	acb *acb;
381
382		csio = &ccb->csio;
383		ccbh = &ccb->ccb_h;
384		/* Max supported CDB length is 12 bytes */
385		if (csio->cdb_len > 12) {
386			ccb->ccb_h.status = CAM_REQ_INVALID;
387			xpt_done(ccb);
388			return;
389		}
390
391		if ((acb = adwgetacb(adw)) == NULL) {
392			int s;
393
394			s = splcam();
395			adw->state |= ADW_RESOURCE_SHORTAGE;
396			splx(s);
397			xpt_freeze_simq(sim, /*count*/1);
398			ccb->ccb_h.status = CAM_REQUEUE_REQ;
399			xpt_done(ccb);
400			return;
401		}
402
403		/* Link dccb and ccb so we can find one from the other */
404		acb->ccb = ccb;
405		ccb->ccb_h.ccb_acb_ptr = acb;
406		ccb->ccb_h.ccb_adw_ptr = adw;
407
408		acb->queue.cntl = 0;
409		acb->queue.target_id = ccb->ccb_h.target_id;
410		acb->queue.target_lun = ccb->ccb_h.target_lun;
411
412		acb->queue.srb_ptr = 0;
413		acb->queue.a_flag = 0;
414		acb->queue.sense_len =
415			MIN(csio->sense_len, sizeof(acb->sense_data));
416		acb->queue.cdb_len = csio->cdb_len;
417
418		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
419			acb->queue.tag_code = csio->tag_action;
420		else
421			acb->queue.tag_code = 0;
422
423		acb->queue.done_status = 0;
424		acb->queue.scsi_status = 0;
425		acb->queue.host_status = 0;
426		acb->queue.ux_sg_ix = 0;
427
428		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
429			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
430				bcopy(csio->cdb_io.cdb_ptr,
431				      acb->queue.cdb, csio->cdb_len);
432			} else {
433				/* I guess I could map it in... */
434				ccb->ccb_h.status = CAM_REQ_INVALID;
435				adwfreeacb(adw, acb);
436				xpt_done(ccb);
437				return;
438			}
439		} else {
440			bcopy(csio->cdb_io.cdb_bytes,
441			      acb->queue.cdb, csio->cdb_len);
442		}
443
444		/*
445		 * If we have any data to send with this command,
446		 * map it into bus space.
447		 */
448		if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
449			if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
450				/*
451				 * We've been given a pointer
452				 * to a single buffer.
453				 */
454				if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
455					int s;
456					int error;
457
458					s = splsoftvm();
459					error =
460					    bus_dmamap_load(adw->buffer_dmat,
461							    acb->dmamap,
462							    csio->data_ptr,
463							    csio->dxfer_len,
464							    adwexecuteacb,
465							    acb, /*flags*/0);
466					if (error == EINPROGRESS) {
467						/*
468						 * So as to maintain ordering,
469						 * freeze the controller queue
470						 * until our mapping is
471						 * returned.
472						 */
473						xpt_freeze_simq(sim, 1);
474						acb->state |= CAM_RELEASE_SIMQ;
475					}
476					splx(s);
477				} else {
478					struct bus_dma_segment seg;
479
480					/* Pointer to physical buffer */
481					seg.ds_addr =
482					    (bus_addr_t)csio->data_ptr;
483					seg.ds_len = csio->dxfer_len;
484					adwexecuteacb(acb, &seg, 1, 0);
485				}
486			} else {
487				struct bus_dma_segment *segs;
488
489				if ((ccbh->flags & CAM_DATA_PHYS) != 0)
490					panic("adw_action - Physical "
491					      "segment pointers "
492					      "unsupported");
493
494				if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
495					panic("adw_action - Virtual "
496					      "segment addresses "
497					      "unsupported");
498
499				/* Just use the segments provided */
500				segs = (struct bus_dma_segment *)csio->data_ptr;
501				adwexecuteacb(acb, segs, csio->sglist_cnt,
502					      (csio->sglist_cnt < ADW_SGSIZE)
503					      ? 0 : EFBIG);
504			}
505		} else {
506			adwexecuteacb(acb, NULL, 0, 0);
507		}
508		break;
509	}
510	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
511	{
512		adw_idle_cmd_status_t status;
513
514		adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
515				  ccb->ccb_h.target_id);
516		status = adw_idle_cmd_wait(adw);
517		if (status == ADW_IDLE_CMD_SUCCESS) {
518			ccb->ccb_h.status = CAM_REQ_CMP;
519			if (bootverbose) {
520				xpt_print_path(ccb->ccb_h.path);
521				printf("BDR Delivered\n");
522			}
523		} else
524			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
525		xpt_done(ccb);
526		break;
527	}
528	case XPT_ABORT:			/* Abort the specified CCB */
529		/* XXX Implement */
530		ccb->ccb_h.status = CAM_REQ_INVALID;
531		xpt_done(ccb);
532		break;
533	case XPT_SET_TRAN_SETTINGS:
534	{
535		struct	  ccb_trans_settings *cts;
536		u_int	  target_mask;
537		int	  s;
538
539		cts = &ccb->cts;
540		target_mask = 0x01 << ccb->ccb_h.target_id;
541
542		s = splcam();
543		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
544			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
545				u_int discenb;
546
547				discenb =
548				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
549
550				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
551					discenb |= target_mask;
552				else
553					discenb &= ~target_mask;
554
555				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
556						  discenb);
557			}
558
559			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
560
561				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
562					adw->tagenb |= target_mask;
563				else
564					adw->tagenb &= ~target_mask;
565			}
566
567			if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
568				u_int wdtrenb_orig;
569				u_int wdtrenb;
570				u_int wdtrdone;
571
572				wdtrenb_orig =
573				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
574				wdtrenb = wdtrenb_orig;
575				wdtrdone = adw_lram_read_16(adw,
576							    ADW_MC_WDTR_DONE);
577				switch (cts->bus_width) {
578				case MSG_EXT_WDTR_BUS_32_BIT:
579				case MSG_EXT_WDTR_BUS_16_BIT:
580					wdtrenb |= target_mask;
581					break;
582				case MSG_EXT_WDTR_BUS_8_BIT:
583				default:
584					wdtrenb &= ~target_mask;
585					break;
586				}
587				if (wdtrenb != wdtrenb_orig) {
588					adw_lram_write_16(adw,
589							  ADW_MC_WDTR_ABLE,
590							  wdtrenb);
591					wdtrdone &= ~target_mask;
592					adw_lram_write_16(adw,
593							  ADW_MC_WDTR_DONE,
594							  wdtrdone);
595				}
596			}
597
598			if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
599			 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
600				u_int sdtrenb_orig;
601				u_int sdtrenb;
602				u_int ultraenb_orig;
603				u_int ultraenb;
604				u_int sdtrdone;
605
606				sdtrenb_orig =
607				    adw_lram_read_16(adw, ADW_MC_SDTR_ABLE);
608				sdtrenb = sdtrenb_orig;
609
610				ultraenb_orig =
611				    adw_lram_read_16(adw, ADW_MC_ULTRA_ABLE);
612				ultraenb = ultraenb_orig;
613
614				sdtrdone = adw_lram_read_16(adw,
615							    ADW_MC_SDTR_DONE);
616
617				if ((cts->valid
618				   & CCB_TRANS_SYNC_RATE_VALID) != 0) {
619
620					if (cts->sync_period == 0) {
621						sdtrenb &= ~target_mask;
622					} else if (cts->sync_period > 12) {
623						ultraenb &= ~target_mask;
624						sdtrenb |= target_mask;
625					} else {
626						ultraenb |= target_mask;
627						sdtrenb |= target_mask;
628					}
629				}
630
631				if ((cts->valid
632				   & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
633					if (cts->sync_offset == 0)
634						sdtrenb &= ~target_mask;
635				}
636
637				if (sdtrenb != sdtrenb_orig
638				 || ultraenb != ultraenb_orig) {
639					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
640							  sdtrenb);
641					adw_lram_write_16(adw,
642							  ADW_MC_ULTRA_ABLE,
643							  ultraenb);
644					sdtrdone &= ~target_mask;
645					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
646							  sdtrdone);
647				}
648			}
649		}
650		splx(s);
651		ccb->ccb_h.status = CAM_REQ_CMP;
652		xpt_done(ccb);
653		break;
654	}
655	case XPT_GET_TRAN_SETTINGS:
656	/* Get default/user set transfer settings for the target */
657	{
658		struct	ccb_trans_settings *cts;
659		u_int	target_mask;
660
661		cts = &ccb->cts;
662		target_mask = 0x01 << ccb->ccb_h.target_id;
663		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
664			cts->flags = 0;
665			if ((adw->user_discenb & target_mask) != 0)
666				cts->flags |= CCB_TRANS_DISC_ENB;
667
668			if ((adw->user_tagenb & target_mask) != 0)
669				cts->flags |= CCB_TRANS_TAG_ENB;
670
671			if ((adw->user_wdtr & target_mask) != 0)
672				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
673			else
674				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
675
676			if ((adw->user_sdtr & target_mask) != 0) {
677				if ((adw->user_ultra & target_mask) != 0)
678					cts->sync_period = 12; /* 20MHz */
679				else
680					cts->sync_period = 25; /* 10MHz */
681				cts->sync_offset = 15; /* XXX ??? */
682			}
683
684			cts->valid = CCB_TRANS_SYNC_RATE_VALID
685				   | CCB_TRANS_SYNC_OFFSET_VALID
686				   | CCB_TRANS_BUS_WIDTH_VALID
687				   | CCB_TRANS_DISC_VALID
688				   | CCB_TRANS_TQ_VALID;
689			ccb->ccb_h.status = CAM_REQ_CMP;
690		} else {
691			u_int targ_tinfo;
692
693			cts->flags = 0;
694			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
695			  & target_mask) != 0)
696				cts->flags |= CCB_TRANS_DISC_ENB;
697
698			if ((adw->tagenb & target_mask) != 0)
699				cts->flags |= CCB_TRANS_TAG_ENB;
700
701			targ_tinfo =
702			    adw_lram_read_16(adw,
703					     ADW_MC_DEVICE_HSHK_CFG_TABLE
704					     + (2 * ccb->ccb_h.target_id));
705
706			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
707				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
708			else
709				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
710
711			cts->sync_period =
712			    ADW_HSHK_CFG_PERIOD_FACTOR(targ_tinfo);
713
714			cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
715			if (cts->sync_period == 0)
716				cts->sync_offset = 0;
717
718			if (cts->sync_offset == 0)
719				cts->sync_period = 0;
720		}
721		cts->valid = CCB_TRANS_SYNC_RATE_VALID
722			   | CCB_TRANS_SYNC_OFFSET_VALID
723			   | CCB_TRANS_BUS_WIDTH_VALID
724			   | CCB_TRANS_DISC_VALID
725			   | CCB_TRANS_TQ_VALID;
726		ccb->ccb_h.status = CAM_REQ_CMP;
727		xpt_done(ccb);
728		break;
729	}
730	case XPT_CALC_GEOMETRY:
731	{
732		struct	  ccb_calc_geometry *ccg;
733		u_int32_t size_mb;
734		u_int32_t secs_per_cylinder;
735		int	  extended;
736
737		/*
738		 * XXX Use Adaptec translation until I find out how to
739		 *     get this information from the card.
740		 */
741		ccg = &ccb->ccg;
742		size_mb = ccg->volume_size
743			/ ((1024L * 1024L) / ccg->block_size);
744		extended = 1;
745
746		if (size_mb > 1024 && extended) {
747			ccg->heads = 255;
748			ccg->secs_per_track = 63;
749		} else {
750			ccg->heads = 64;
751			ccg->secs_per_track = 32;
752		}
753		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
754		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
755		ccb->ccb_h.status = CAM_REQ_CMP;
756		xpt_done(ccb);
757		break;
758	}
759	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
760	{
761		adw_idle_cmd_status_t status;
762
763		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET, /*param*/0);
764		status = adw_idle_cmd_wait(adw);
765		if (status == ADW_IDLE_CMD_SUCCESS) {
766			ccb->ccb_h.status = CAM_REQ_CMP;
767			if (bootverbose) {
768				xpt_print_path(adw->path);
769				printf("Bus Reset Delivered\n");
770			}
771		} else
772			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
773		xpt_done(ccb);
774		break;
775	}
776	case XPT_TERM_IO:		/* Terminate the I/O process */
777		/* XXX Implement */
778		ccb->ccb_h.status = CAM_REQ_INVALID;
779		xpt_done(ccb);
780		break;
781	case XPT_PATH_INQ:		/* Path routing inquiry */
782	{
783		struct ccb_pathinq *cpi = &ccb->cpi;
784
785		cpi->version_num = 1;
786		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
787		cpi->target_sprt = 0;
788		cpi->hba_misc = 0;
789		cpi->hba_eng_cnt = 0;
790		cpi->max_target = ADW_MAX_TID;
791		cpi->max_lun = ADW_MAX_LUN;
792		cpi->initiator_id = adw->initiator_id;
793		cpi->bus_id = cam_sim_bus(sim);
794		cpi->base_transfer_speed = 3300;
795		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
796		strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
797		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
798		cpi->unit_number = cam_sim_unit(sim);
799		cpi->ccb_h.status = CAM_REQ_CMP;
800		xpt_done(ccb);
801		break;
802	}
803	default:
804		ccb->ccb_h.status = CAM_REQ_INVALID;
805		xpt_done(ccb);
806		break;
807	}
808}
809
810static void
811adw_poll(struct cam_sim *sim)
812{
813	adw_intr(cam_sim_softc(sim));
814}
815
816static void
817adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
818{
819}
820
821struct adw_softc *
822adw_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh)
823{
824	struct	 adw_softc *adw;
825	int	 i;
826
827	/*
828	 * Allocate a storage area for us
829	 */
830	adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT);
831	if (adw == NULL) {
832		printf("adw%d: cannot malloc!\n", unit);
833		return NULL;
834	}
835	bzero(adw, sizeof(struct adw_softc));
836	LIST_INIT(&adw->pending_ccbs);
837	SLIST_INIT(&adw->sg_maps);
838	adw->unit = unit;
839	adw->tag = tag;
840	adw->bsh = bsh;
841	i = adw->unit / 10;
842	adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
843	if (adw->name == NULL) {
844		printf("adw%d: cannot malloc name!\n", unit);
845		free(adw, M_DEVBUF);
846		return NULL;
847	}
848	sprintf(adw->name, "adw%d", adw->unit);
849	return(adw);
850}
851
852void
853adw_free(struct adw_softc *adw)
854{
855	switch (adw->init_level) {
856	case 6:
857	{
858		struct sg_map_node *sg_map;
859
860		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
861			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
862			bus_dmamap_unload(adw->sg_dmat,
863					  sg_map->sg_dmamap);
864			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
865					sg_map->sg_dmamap);
866			free(sg_map, M_DEVBUF);
867		}
868		bus_dma_tag_destroy(adw->sg_dmat);
869	}
870	case 5:
871		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
872	case 4:
873		bus_dmamem_free(adw->acb_dmat, adw->acbs,
874				adw->acb_dmamap);
875		bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
876	case 3:
877		bus_dma_tag_destroy(adw->acb_dmat);
878	case 2:
879		bus_dma_tag_destroy(adw->buffer_dmat);
880	case 1:
881		bus_dma_tag_destroy(adw->parent_dmat);
882	case 0:
883		break;
884	}
885	free(adw->name, M_DEVBUF);
886	free(adw, M_DEVBUF);
887}
888
889int
890adw_init(struct adw_softc *adw)
891{
892	struct	  adw_eeprom eep_config;
893	u_int16_t checksum;
894	u_int16_t scsicfg1;
895
896	adw_reset_chip(adw);
897	checksum = adw_eeprom_read(adw, &eep_config);
898	bcopy(eep_config.serial_number, adw->serial_number,
899	      sizeof(adw->serial_number));
900	if (checksum != eep_config.checksum) {
901		u_int16_t serial_number[3];
902
903		printf("%s: EEPROM checksum failed.  Restoring Defaults\n",
904		       adw_name(adw));
905
906	        /*
907		 * Restore the default EEPROM settings.
908		 * Assume the 6 byte board serial number that was read
909		 * from EEPROM is correct even if the EEPROM checksum
910		 * failed.
911		 */
912		bcopy(&adw_default_eeprom, &eep_config, sizeof(eep_config));
913		bcopy(adw->serial_number, eep_config.serial_number,
914		      sizeof(serial_number));
915		adw_eeprom_write(adw, &eep_config);
916	}
917
918	/* Pull eeprom information into our softc. */
919	adw->bios_ctrl = eep_config.bios_ctrl;
920	adw->user_wdtr = eep_config.wdtr_able;
921	adw->user_sdtr = eep_config.sdtr_able;
922	adw->user_ultra = eep_config.ultra_able;
923	adw->user_tagenb = eep_config.tagqng_able;
924	adw->user_discenb = eep_config.disc_enable;
925	adw->max_acbs = eep_config.max_host_qng;
926	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
927
928	/*
929	 * Sanity check the number of host openings.
930	 */
931	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
932		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
933	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
934        	/* If the value is zero, assume it is uninitialized. */
935		if (adw->max_acbs == 0)
936			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
937		else
938			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
939
940	}
941
942	scsicfg1 = 0;
943	switch (eep_config.termination) {
944	default:
945		printf("%s: Invalid EEPROM Termination Settings.\n",
946		       adw_name(adw));
947		printf("%s: Reverting to Automatic Termination\n",
948		       adw_name(adw));
949		/* FALLTHROUGH */
950	case ADW_EEPROM_TERM_AUTO:
951		break;
952	case ADW_EEPROM_TERM_BOTH_ON:
953		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
954		/* FALLTHROUGH */
955	case ADW_EEPROM_TERM_HIGH_ON:
956		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
957		/* FALLTHROUGH */
958	case ADW_EEPROM_TERM_OFF:
959		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
960		break;
961	}
962
963	printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
964
965	if (adw_init_chip(adw, scsicfg1) != 0)
966		return (-1);
967
968	printf("Queue Depth %d\n", adw->max_acbs);
969
970	/* DMA tag for mapping buffers into device visible space. */
971	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
972			       /*lowaddr*/BUS_SPACE_MAXADDR,
973			       /*highaddr*/BUS_SPACE_MAXADDR,
974			       /*filter*/NULL, /*filterarg*/NULL,
975			       /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE,
976			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
977			       /*flags*/BUS_DMA_ALLOCNOW,
978			       &adw->buffer_dmat) != 0) {
979		return (-1);
980	}
981
982	adw->init_level++;
983
984	/* DMA tag for our ccb structures */
985	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
986			       /*lowaddr*/BUS_SPACE_MAXADDR,
987			       /*highaddr*/BUS_SPACE_MAXADDR,
988			       /*filter*/NULL, /*filterarg*/NULL,
989			       adw->max_acbs * sizeof(struct acb),
990			       /*nsegments*/1,
991			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
992			       /*flags*/0, &adw->acb_dmat) != 0) {
993		return (-1);
994        }
995
996	adw->init_level++;
997
998	/* Allocation for our ccbs */
999	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1000			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) {
1001		return (-1);
1002	}
1003
1004	adw->init_level++;
1005
1006	/* And permanently map them */
1007	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1008			adw->acbs,
1009			adw->max_acbs * sizeof(struct acb),
1010			adwmapmem, &adw->acb_busbase, /*flags*/0);
1011
1012	/* Clear them out. */
1013	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1014
1015	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1016	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1017			       /*lowaddr*/BUS_SPACE_MAXADDR,
1018			       /*highaddr*/BUS_SPACE_MAXADDR,
1019			       /*filter*/NULL, /*filterarg*/NULL,
1020			       PAGE_SIZE, /*nsegments*/1,
1021			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1022			       /*flags*/0, &adw->sg_dmat) != 0) {
1023		return (-1);
1024        }
1025
1026	adw->init_level++;
1027
1028	/* Allocate our first batch of ccbs */
1029	if (adwallocacbs(adw) == 0)
1030		return (-1);
1031
1032	return (0);
1033}
1034
1035/*
1036 * Attach all the sub-devices we can find
1037 */
1038int
1039adw_attach(struct adw_softc *adw)
1040{
1041	struct ccb_setasync csa;
1042	struct cam_devq *devq;
1043
1044	/* Start the Risc processor now that we are fully configured. */
1045	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1046
1047	/*
1048	 * Create the device queue for our SIM.
1049	 */
1050	devq = cam_simq_alloc(adw->max_acbs);
1051	if (devq == NULL)
1052		return (0);
1053
1054	/*
1055	 * Construct our SIM entry.
1056	 */
1057	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1058				 1, adw->max_acbs, devq);
1059	if (adw->sim == NULL)
1060		return (0);
1061
1062	/*
1063	 * Register the bus.
1064	 */
1065	if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1066		cam_sim_free(adw->sim, /*free devq*/TRUE);
1067		return (0);
1068	}
1069
1070	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1071			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1072	   == CAM_REQ_CMP) {
1073		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1074		csa.ccb_h.func_code = XPT_SASYNC_CB;
1075		csa.event_enable = AC_LOST_DEVICE;
1076		csa.callback = adw_async;
1077		csa.callback_arg = adw;
1078		xpt_action((union ccb *)&csa);
1079	}
1080
1081	return (0);
1082}
1083
1084void
1085adw_intr(void *arg)
1086{
1087	struct	adw_softc *adw;
1088	u_int	int_stat;
1089	u_int	next_doneq;
1090	u_int	next_completeq;
1091	u_int	doneq_start;
1092
1093	adw = (struct adw_softc *)arg;
1094	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1095		return;
1096
1097	/* Reading the register clears the interrupt. */
1098	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1099
1100	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1101		/* Idle Command Complete */
1102		adw->idle_command_cmp = 1;
1103		switch (adw->idle_cmd) {
1104		case ADW_IDLE_CMD_DEVICE_RESET:
1105			adw_handle_device_reset(adw,
1106						/*target*/adw->idle_cmd_param);
1107			break;
1108		case ADW_IDLE_CMD_SCSI_RESET:
1109			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1110			break;
1111		default:
1112			break;
1113		}
1114		adw->idle_cmd = ADW_IDLE_CMD_COMPLETED;
1115	}
1116
1117	if ((int_stat & ADW_INTR_STATUS_INTRC) != 0) {
1118		/* SCSI Bus Reset */
1119		adw_handle_bus_reset(adw, /*initiated*/FALSE);
1120        }
1121
1122	/*
1123	 * ADW_MC_HOST_NEXT_DONE is actually the last completed RISC
1124	 * Queue List request. Its forward pointer (RQL_FWD) points to the
1125	 * current completed RISC Queue List request.
1126	 */
1127	next_doneq = adw_lram_read_8(adw, ADW_MC_HOST_NEXT_DONE);
1128	next_doneq = ADW_MC_RISC_Q_LIST_BASE + RQL_FWD
1129		   + (next_doneq * ADW_MC_RISC_Q_LIST_SIZE);
1130
1131	next_completeq = adw_lram_read_8(adw, next_doneq);
1132	doneq_start = ADW_MC_NULL_Q;
1133	/* Loop until all completed Q's are processed. */
1134	while (next_completeq != ADW_MC_NULL_Q) {
1135		u_int32_t acb_busaddr;
1136		struct	  acb *acb;
1137		union	  ccb *ccb;
1138
1139		doneq_start = next_completeq;
1140
1141		next_doneq = ADW_MC_RISC_Q_LIST_BASE +
1142			     (next_completeq * ADW_MC_RISC_Q_LIST_SIZE);
1143
1144		/*
1145		 * Read the ADW_SCSI_REQ_Q physical address pointer from
1146		 * the RISC list entry.
1147		 */
1148		acb_busaddr = adw_lram_read_32(adw, next_doneq + RQL_PHYADDR);
1149		acb = acbptov(adw, acb_busaddr);
1150
1151		/* Change the RISC Queue List state to free. */
1152		adw_lram_write_8(adw, next_doneq + RQL_STATE, ADW_MC_QS_FREE);
1153
1154		/* Get the RISC Queue List forward pointer. */
1155		next_completeq = adw_lram_read_8(adw, next_doneq + RQL_FWD);
1156
1157		/* Process CCB */
1158		ccb = acb->ccb;
1159		untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1160		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1161			bus_dmasync_op_t op;
1162
1163			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1164				op = BUS_DMASYNC_POSTREAD;
1165			else
1166				op = BUS_DMASYNC_POSTWRITE;
1167			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1168			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1169			ccb->csio.resid = acb->queue.data_cnt;
1170		} else
1171			ccb->csio.resid = 0;
1172
1173		/* Common Cases inline... */
1174		if (acb->queue.host_status == QHSTA_NO_ERROR
1175		 && (acb->queue.done_status == QD_NO_ERROR
1176		  || acb->queue.done_status == QD_WITH_ERROR)) {
1177			ccb->csio.scsi_status = acb->queue.scsi_status;
1178			ccb->ccb_h.status = 0;
1179			switch (ccb->csio.scsi_status) {
1180			case SCSI_STATUS_OK:
1181				ccb->ccb_h.status |= CAM_REQ_CMP;
1182				break;
1183			case SCSI_STATUS_CHECK_COND:
1184			case SCSI_STATUS_CMD_TERMINATED:
1185				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1186				      ccb->csio.sense_len);
1187				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1188				ccb->csio.sense_resid = acb->queue.sense_len;
1189				/* FALLTHROUGH */
1190			default:
1191				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1192						  |  CAM_DEV_QFRZN;
1193				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1194				break;
1195			}
1196			adwfreeacb(adw, acb);
1197			xpt_done(ccb);
1198
1199		} else {
1200			adwprocesserror(adw, acb);
1201		}
1202	}
1203
1204	if (doneq_start != ADW_MC_NULL_Q)
1205		adw_lram_write_8(adw, ADW_MC_HOST_NEXT_DONE, doneq_start);
1206}
1207
1208static void
1209adwprocesserror(struct adw_softc *adw, struct acb *acb)
1210{
1211	union ccb *ccb;
1212
1213	ccb = acb->ccb;
1214	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1215		ccb->ccb_h.status = CAM_REQ_ABORTED;
1216	} else {
1217
1218		switch (acb->queue.host_status) {
1219		case QHSTA_M_SEL_TIMEOUT:
1220			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1221			break;
1222		case QHSTA_M_SXFR_OFF_UFLW:
1223		case QHSTA_M_SXFR_OFF_OFLW:
1224		case QHSTA_M_DATA_OVER_RUN:
1225			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1226			break;
1227		case QHSTA_M_SXFR_DESELECTED:
1228		case QHSTA_M_UNEXPECTED_BUS_FREE:
1229			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1230			break;
1231		case QHSTA_M_QUEUE_ABORTED:
1232			/* BDR or Bus Reset */
1233			ccb->ccb_h.status = adw->last_reset;
1234			break;
1235		case QHSTA_M_SXFR_SDMA_ERR:
1236		case QHSTA_M_SXFR_SXFR_PERR:
1237		case QHSTA_M_RDMA_PERR:
1238			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1239			break;
1240		case QHSTA_M_WTM_TIMEOUT:
1241		case QHSTA_M_SXFR_WD_TMO:
1242			/* The SCSI bus hung in a phase */
1243			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1244			adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET,
1245					  /*param*/0);
1246			break;
1247		case QHSTA_M_SXFR_XFR_PH_ERR:
1248			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1249			break;
1250		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1251			break;
1252		case QHSTA_M_BAD_CMPL_STATUS_IN:
1253			/* No command complete after a status message */
1254			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1255			break;
1256		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1257			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1258			break;
1259		case QHSTA_M_INVALID_DEVICE:
1260			ccb->ccb_h.status = CAM_PATH_INVALID;
1261			break;
1262		case QHSTA_M_NO_AUTO_REQ_SENSE:
1263			/*
1264			 * User didn't request sense, but we got a
1265			 * check condition.
1266			 */
1267			ccb->csio.scsi_status = acb->queue.scsi_status;
1268			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1269			break;
1270		default:
1271			panic("%s: Unhandled Host status error %x",
1272			      adw_name(adw), acb->queue.host_status);
1273			/* NOTREACHED */
1274		}
1275	}
1276	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1277		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1278		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1279	}
1280	adwfreeacb(adw, acb);
1281	xpt_done(ccb);
1282}
1283
1284static void
1285adwtimeout(void *arg)
1286{
1287	struct acb	     *acb;
1288	union  ccb	     *ccb;
1289	struct adw_softc     *adw;
1290	adw_idle_cmd_status_t status;
1291	int		      s;
1292
1293	acb = (struct acb *)arg;
1294	ccb = acb->ccb;
1295	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1296	xpt_print_path(ccb->ccb_h.path);
1297	printf("ACB %p - timed out\n", (void *)acb);
1298
1299	s = splcam();
1300
1301	if ((acb->state & ACB_ACTIVE) == 0) {
1302		xpt_print_path(ccb->ccb_h.path);
1303		printf("ACB %p - timed out CCB already completed\n",
1304		       (void *)acb);
1305		splx(s);
1306		return;
1307	}
1308
1309	/* Attempt a BDR first */
1310	adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1311			  ccb->ccb_h.target_id);
1312	splx(s);
1313	status = adw_idle_cmd_wait(adw);
1314	if (status == ADW_IDLE_CMD_SUCCESS) {
1315		printf("%s: BDR Delivered.  No longer in timeout\n",
1316		       adw_name(adw));
1317	} else {
1318		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET, /*param*/0);
1319		status = adw_idle_cmd_wait(adw);
1320		if (status != ADW_IDLE_CMD_SUCCESS)
1321			panic("%s: Bus Reset during timeout failed",
1322			      adw_name(adw));
1323	}
1324}
1325
1326static void
1327adw_handle_device_reset(struct adw_softc *adw, u_int target)
1328{
1329	struct cam_path *path;
1330	cam_status error;
1331
1332	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1333				target, CAM_LUN_WILDCARD);
1334
1335	if (error == CAM_REQ_CMP) {
1336		xpt_async(AC_SENT_BDR, path, NULL);
1337		xpt_free_path(path);
1338	}
1339	adw->last_reset = CAM_BDR_SENT;
1340}
1341
1342static void
1343adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1344{
1345	if (initiated) {
1346		/*
1347		 * The microcode currently sets the SCSI Bus Reset signal
1348		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1349		 * command above.  But the SCSI Bus Reset Hold Time in the
1350		 * microcode is not deterministic (it may in fact be for less
1351		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1352		 * the Adv Library sets the SCSI Bus Reset signal for
1353		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1354		 * than 25 us.
1355		 */
1356		u_int scsi_ctrl;
1357
1358	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1359		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1360		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1361		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1362
1363		/*
1364		 * We will perform the async notification when the
1365		 * SCSI Reset interrupt occurs.
1366		 */
1367	} else
1368		xpt_async(AC_BUS_RESET, adw->path, NULL);
1369	adw->last_reset = CAM_SCSI_BUS_RESET;
1370}
1371