adwcam.c revision 56979
1/*
2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c	ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions, and the following disclaimer,
17 *    without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: head/sys/dev/advansys/adwcam.c 56979 2000-02-03 16:34:57Z gibbs $
34 */
35/*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47#include <stddef.h>	/* For offsetof */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/malloc.h>
53#include <sys/bus.h>
54
55#include <machine/bus_pio.h>
56#include <machine/bus_memio.h>
57#include <machine/bus.h>
58#include <machine/clock.h>
59#include <machine/resource.h>
60
61#include <sys/rman.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_sim.h>
66#include <cam/cam_xpt_sim.h>
67#include <cam/cam_debug.h>
68
69#include <cam/scsi/scsi_message.h>
70
71#include <dev/advansys/adwvar.h>
72
73/* Definitions for our use of the SIM private CCB area */
74#define ccb_acb_ptr spriv_ptr0
75#define ccb_adw_ptr spriv_ptr1
76
77#define MIN(a, b) (((a) < (b)) ? (a) : (b))
78
79u_long adw_unit;
80
81static __inline struct acb*	adwgetacb(struct adw_softc *adw);
82static __inline void		adwfreeacb(struct adw_softc *adw,
83					   struct acb *acb);
84
85static void		adwmapmem(void *arg, bus_dma_segment_t *segs,
86				  int nseg, int error);
87static struct sg_map_node*
88			adwallocsgmap(struct adw_softc *adw);
89static int		adwallocacbs(struct adw_softc *adw);
90
91static void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
92				      int nseg, int error);
93static void		adw_action(struct cam_sim *sim, union ccb *ccb);
94static void		adw_poll(struct cam_sim *sim);
95static void		adw_async(void *callback_arg, u_int32_t code,
96				  struct cam_path *path, void *arg);
97static void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
98static void		adwtimeout(void *arg);
99static void		adw_handle_device_reset(struct adw_softc *adw,
100						u_int target);
101static void		adw_handle_bus_reset(struct adw_softc *adw,
102					     int initiated);
103
104static __inline struct acb*
105adwgetacb(struct adw_softc *adw)
106{
107	struct	acb* acb;
108	int	s;
109
110	s = splcam();
111	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
112		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
113	} else if (adw->num_acbs < adw->max_acbs) {
114		adwallocacbs(adw);
115		acb = SLIST_FIRST(&adw->free_acb_list);
116		if (acb == NULL)
117			printf("%s: Can't malloc ACB\n", adw_name(adw));
118		else {
119			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
120		}
121	}
122	splx(s);
123
124	return (acb);
125}
126
127static __inline void
128adwfreeacb(struct adw_softc *adw, struct acb *acb)
129{
130	int s;
131
132	s = splcam();
133	if ((acb->state & ACB_ACTIVE) != 0)
134		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
135	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
136		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
137	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
138	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
139		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
140		adw->state &= ~ADW_RESOURCE_SHORTAGE;
141	}
142	acb->state = ACB_FREE;
143	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
144	splx(s);
145}
146
147static void
148adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
149{
150	bus_addr_t *busaddrp;
151
152	busaddrp = (bus_addr_t *)arg;
153	*busaddrp = segs->ds_addr;
154}
155
156static struct sg_map_node *
157adwallocsgmap(struct adw_softc *adw)
158{
159	struct sg_map_node *sg_map;
160
161	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
162
163	if (sg_map == NULL)
164		return (NULL);
165
166	/* Allocate S/G space for the next batch of ACBS */
167	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
168			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
169		free(sg_map, M_DEVBUF);
170		return (NULL);
171	}
172
173	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
174
175	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
176			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
177
178	bzero(sg_map->sg_vaddr, PAGE_SIZE);
179	return (sg_map);
180}
181
182/*
183 * Allocate another chunk of CCB's. Return count of entries added.
184 * Assumed to be called at splcam().
185 */
186static int
187adwallocacbs(struct adw_softc *adw)
188{
189	struct acb *next_acb;
190	struct sg_map_node *sg_map;
191	bus_addr_t busaddr;
192	struct adw_sg_block *blocks;
193	int newcount;
194	int i;
195
196	next_acb = &adw->acbs[adw->num_acbs];
197	sg_map = adwallocsgmap(adw);
198
199	if (sg_map == NULL)
200		return (0);
201
202	blocks = sg_map->sg_vaddr;
203	busaddr = sg_map->sg_physaddr;
204
205	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
206	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
207		int error;
208
209		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
210					  &next_acb->dmamap);
211		if (error != 0)
212			break;
213		next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
214		next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
215		next_acb->queue.sense_baddr =
216		    acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
217		next_acb->sg_blocks = blocks;
218		next_acb->sg_busaddr = busaddr;
219		next_acb->state = ACB_FREE;
220		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
221		blocks += ADW_SG_BLOCKCNT;
222		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
223		next_acb++;
224		adw->num_acbs++;
225	}
226	return (i);
227}
228
229static void
230adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
231{
232	struct	 acb *acb;
233	union	 ccb *ccb;
234	struct	 adw_softc *adw;
235	int	 s;
236
237	acb = (struct acb *)arg;
238	ccb = acb->ccb;
239	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
240
241	if (error != 0) {
242		if (error != EFBIG)
243			printf("%s: Unexepected error 0x%x returned from "
244			       "bus_dmamap_load\n", adw_name(adw), error);
245		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
246			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
247			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
248		}
249		adwfreeacb(adw, acb);
250		xpt_done(ccb);
251		return;
252	}
253
254	if (nseg != 0) {
255		bus_dmasync_op_t op;
256
257		acb->queue.data_addr = dm_segs[0].ds_addr;
258		acb->queue.data_cnt = ccb->csio.dxfer_len;
259		if (nseg > 1) {
260			struct adw_sg_block *sg_block;
261			struct adw_sg_elm *sg;
262			bus_addr_t sg_busaddr;
263			u_int sg_index;
264			bus_dma_segment_t *end_seg;
265
266			end_seg = dm_segs + nseg;
267
268			sg_busaddr = acb->sg_busaddr;
269			sg_index = 0;
270			/* Copy the segments into our SG list */
271			for (sg_block = acb->sg_blocks;; sg_block++) {
272				u_int i;
273
274				sg = sg_block->sg_list;
275				for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
276					if (dm_segs >= end_seg)
277						break;
278
279					sg->sg_addr = dm_segs->ds_addr;
280					sg->sg_count = dm_segs->ds_len;
281					sg++;
282					dm_segs++;
283				}
284				sg_block->sg_cnt = i;
285				sg_index += i;
286				if (dm_segs == end_seg) {
287					sg_block->sg_busaddr_next = 0;
288					break;
289				} else {
290					sg_busaddr +=
291					    sizeof(struct adw_sg_block);
292					sg_block->sg_busaddr_next = sg_busaddr;
293				}
294			}
295			acb->queue.sg_real_addr = acb->sg_busaddr;
296		} else {
297			acb->queue.sg_real_addr = 0;
298		}
299
300		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
301			op = BUS_DMASYNC_PREREAD;
302		else
303			op = BUS_DMASYNC_PREWRITE;
304
305		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
306
307	} else {
308		acb->queue.data_addr = 0;
309		acb->queue.data_cnt = 0;
310		acb->queue.sg_real_addr = 0;
311	}
312
313	s = splcam();
314
315	/*
316	 * Last time we need to check if this CCB needs to
317	 * be aborted.
318	 */
319	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
320		if (nseg != 0)
321			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
322		adwfreeacb(adw, acb);
323		xpt_done(ccb);
324		splx(s);
325		return;
326	}
327
328	acb->state |= ACB_ACTIVE;
329	ccb->ccb_h.status |= CAM_SIM_QUEUED;
330	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
331	ccb->ccb_h.timeout_ch =
332	    timeout(adwtimeout, (caddr_t)acb,
333		    (ccb->ccb_h.timeout * hz) / 1000);
334
335	adw_send_acb(adw, acb, acbvtob(adw, acb));
336
337	splx(s);
338}
339
340static void
341adw_action(struct cam_sim *sim, union ccb *ccb)
342{
343	struct	adw_softc *adw;
344
345	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
346
347	adw = (struct adw_softc *)cam_sim_softc(sim);
348
349	switch (ccb->ccb_h.func_code) {
350	/* Common cases first */
351	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
352	{
353		struct	ccb_scsiio *csio;
354		struct	ccb_hdr *ccbh;
355		struct	acb *acb;
356
357		csio = &ccb->csio;
358		ccbh = &ccb->ccb_h;
359
360		/* Max supported CDB length is 12 bytes */
361		if (csio->cdb_len > 12) {
362			ccb->ccb_h.status = CAM_REQ_INVALID;
363			xpt_done(ccb);
364			return;
365		}
366
367		if ((acb = adwgetacb(adw)) == NULL) {
368			int s;
369
370			s = splcam();
371			adw->state |= ADW_RESOURCE_SHORTAGE;
372			splx(s);
373			xpt_freeze_simq(sim, /*count*/1);
374			ccb->ccb_h.status = CAM_REQUEUE_REQ;
375			xpt_done(ccb);
376			return;
377		}
378
379		/* Link acb and ccb so we can find one from the other */
380		acb->ccb = ccb;
381		ccb->ccb_h.ccb_acb_ptr = acb;
382		ccb->ccb_h.ccb_adw_ptr = adw;
383
384		acb->queue.cntl = 0;
385		acb->queue.target_cmd = 0;
386		acb->queue.target_id = ccb->ccb_h.target_id;
387		acb->queue.target_lun = ccb->ccb_h.target_lun;
388
389		acb->queue.mflag = 0;
390		acb->queue.sense_len =
391			MIN(csio->sense_len, sizeof(acb->sense_data));
392		acb->queue.cdb_len = csio->cdb_len;
393		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
394			switch (csio->tag_action) {
395			case MSG_SIMPLE_Q_TAG:
396				acb->queue.scsi_cntl = 0;
397				break;
398			case MSG_HEAD_OF_Q_TAG:
399				acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
400				break;
401			case MSG_ORDERED_Q_TAG:
402				acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
403				break;
404			}
405		} else
406			acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
407
408		if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
409			acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
410
411		acb->queue.done_status = 0;
412		acb->queue.scsi_status = 0;
413		acb->queue.host_status = 0;
414		acb->queue.sg_wk_ix = 0;
415		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
416			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
417				bcopy(csio->cdb_io.cdb_ptr,
418				      acb->queue.cdb, csio->cdb_len);
419			} else {
420				/* I guess I could map it in... */
421				ccb->ccb_h.status = CAM_REQ_INVALID;
422				adwfreeacb(adw, acb);
423				xpt_done(ccb);
424				return;
425			}
426		} else {
427			bcopy(csio->cdb_io.cdb_bytes,
428			      acb->queue.cdb, csio->cdb_len);
429		}
430
431		/*
432		 * If we have any data to send with this command,
433		 * map it into bus space.
434		 */
435		if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
436			if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
437				/*
438				 * We've been given a pointer
439				 * to a single buffer.
440				 */
441				if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
442					int s;
443					int error;
444
445					s = splsoftvm();
446					error =
447					    bus_dmamap_load(adw->buffer_dmat,
448							    acb->dmamap,
449							    csio->data_ptr,
450							    csio->dxfer_len,
451							    adwexecuteacb,
452							    acb, /*flags*/0);
453					if (error == EINPROGRESS) {
454						/*
455						 * So as to maintain ordering,
456						 * freeze the controller queue
457						 * until our mapping is
458						 * returned.
459						 */
460						xpt_freeze_simq(sim, 1);
461						acb->state |= CAM_RELEASE_SIMQ;
462					}
463					splx(s);
464				} else {
465					struct bus_dma_segment seg;
466
467					/* Pointer to physical buffer */
468					seg.ds_addr =
469					    (bus_addr_t)csio->data_ptr;
470					seg.ds_len = csio->dxfer_len;
471					adwexecuteacb(acb, &seg, 1, 0);
472				}
473			} else {
474				struct bus_dma_segment *segs;
475
476				if ((ccbh->flags & CAM_DATA_PHYS) != 0)
477					panic("adw_action - Physical "
478					      "segment pointers "
479					      "unsupported");
480
481				if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
482					panic("adw_action - Virtual "
483					      "segment addresses "
484					      "unsupported");
485
486				/* Just use the segments provided */
487				segs = (struct bus_dma_segment *)csio->data_ptr;
488				adwexecuteacb(acb, segs, csio->sglist_cnt,
489					      (csio->sglist_cnt < ADW_SGSIZE)
490					      ? 0 : EFBIG);
491			}
492		} else {
493			adwexecuteacb(acb, NULL, 0, 0);
494		}
495		break;
496	}
497	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
498	{
499		adw_idle_cmd_status_t status;
500
501		adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
502				  ccb->ccb_h.target_id);
503		status = adw_idle_cmd_wait(adw);
504		if (status == ADW_IDLE_CMD_SUCCESS) {
505			ccb->ccb_h.status = CAM_REQ_CMP;
506			if (bootverbose) {
507				xpt_print_path(ccb->ccb_h.path);
508				printf("BDR Delivered\n");
509			}
510		} else
511			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
512		xpt_done(ccb);
513		break;
514	}
515	case XPT_ABORT:			/* Abort the specified CCB */
516		/* XXX Implement */
517		ccb->ccb_h.status = CAM_REQ_INVALID;
518		xpt_done(ccb);
519		break;
520	case XPT_SET_TRAN_SETTINGS:
521	{
522		struct	  ccb_trans_settings *cts;
523		u_int	  target_mask;
524		int	  s;
525
526		cts = &ccb->cts;
527		target_mask = 0x01 << ccb->ccb_h.target_id;
528
529		s = splcam();
530		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
531			u_int sdtrdone;
532
533			sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
534			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
535				u_int discenb;
536
537				discenb =
538				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
539
540				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
541					discenb |= target_mask;
542				else
543					discenb &= ~target_mask;
544
545				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
546						  discenb);
547			}
548
549			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
550
551				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
552					adw->tagenb |= target_mask;
553				else
554					adw->tagenb &= ~target_mask;
555			}
556
557			if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
558				u_int wdtrenb_orig;
559				u_int wdtrenb;
560				u_int wdtrdone;
561
562				wdtrenb_orig =
563				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
564				wdtrenb = wdtrenb_orig;
565				wdtrdone = adw_lram_read_16(adw,
566							    ADW_MC_WDTR_DONE);
567				switch (cts->bus_width) {
568				case MSG_EXT_WDTR_BUS_32_BIT:
569				case MSG_EXT_WDTR_BUS_16_BIT:
570					wdtrenb |= target_mask;
571					break;
572				case MSG_EXT_WDTR_BUS_8_BIT:
573				default:
574					wdtrenb &= ~target_mask;
575					break;
576				}
577				if (wdtrenb != wdtrenb_orig) {
578					adw_lram_write_16(adw,
579							  ADW_MC_WDTR_ABLE,
580							  wdtrenb);
581					wdtrdone &= ~target_mask;
582					adw_lram_write_16(adw,
583							  ADW_MC_WDTR_DONE,
584							  wdtrdone);
585					/* Wide negotiation forces async */
586					sdtrdone &= ~target_mask;
587					adw_lram_write_16(adw,
588							  ADW_MC_SDTR_DONE,
589							  sdtrdone);
590				}
591			}
592
593			if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
594			 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
595				u_int sdtr_orig;
596				u_int sdtr;
597				u_int sdtrable_orig;
598				u_int sdtrable;
599
600				sdtr = adw_get_chip_sdtr(adw,
601							 ccb->ccb_h.target_id);
602				sdtr_orig = sdtr;
603				sdtrable = adw_lram_read_16(adw,
604							    ADW_MC_SDTR_ABLE);
605				sdtrable_orig = sdtrable;
606
607				if ((cts->valid
608				   & CCB_TRANS_SYNC_RATE_VALID) != 0) {
609
610					sdtr =
611					    adw_find_sdtr(adw,
612							  cts->sync_period);
613				}
614
615				if ((cts->valid
616				   & CCB_TRANS_SYNC_OFFSET_VALID) != 0) {
617					if (cts->sync_offset == 0)
618						sdtr = ADW_MC_SDTR_ASYNC;
619				}
620
621				if (sdtr == ADW_MC_SDTR_ASYNC)
622					sdtrable &= ~target_mask;
623				else
624					sdtrable |= target_mask;
625				if (sdtr != sdtr_orig
626				 || sdtrable != sdtrable_orig) {
627					adw_set_chip_sdtr(adw,
628							  ccb->ccb_h.target_id,
629							  sdtr);
630					sdtrdone &= ~target_mask;
631					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
632							  sdtrable);
633					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
634							  sdtrdone);
635
636				}
637			}
638		}
639		splx(s);
640		ccb->ccb_h.status = CAM_REQ_CMP;
641		xpt_done(ccb);
642		break;
643	}
644	case XPT_GET_TRAN_SETTINGS:
645	/* Get default/user set transfer settings for the target */
646	{
647		struct	ccb_trans_settings *cts;
648		u_int	target_mask;
649
650		cts = &ccb->cts;
651		target_mask = 0x01 << ccb->ccb_h.target_id;
652		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
653			u_int mc_sdtr;
654
655			cts->flags = 0;
656			if ((adw->user_discenb & target_mask) != 0)
657				cts->flags |= CCB_TRANS_DISC_ENB;
658
659			if ((adw->user_tagenb & target_mask) != 0)
660				cts->flags |= CCB_TRANS_TAG_ENB;
661
662			if ((adw->user_wdtr & target_mask) != 0)
663				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
664			else
665				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
666
667			mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
668			cts->sync_period = adw_find_period(adw, mc_sdtr);
669			if (cts->sync_period != 0)
670				cts->sync_offset = 15; /* XXX ??? */
671			else
672				cts->sync_offset = 0;
673
674			cts->valid = CCB_TRANS_SYNC_RATE_VALID
675				   | CCB_TRANS_SYNC_OFFSET_VALID
676				   | CCB_TRANS_BUS_WIDTH_VALID
677				   | CCB_TRANS_DISC_VALID
678				   | CCB_TRANS_TQ_VALID;
679			ccb->ccb_h.status = CAM_REQ_CMP;
680		} else {
681			u_int targ_tinfo;
682
683			cts->flags = 0;
684			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
685			  & target_mask) != 0)
686				cts->flags |= CCB_TRANS_DISC_ENB;
687
688			if ((adw->tagenb & target_mask) != 0)
689				cts->flags |= CCB_TRANS_TAG_ENB;
690
691			targ_tinfo =
692			    adw_lram_read_16(adw,
693					     ADW_MC_DEVICE_HSHK_CFG_TABLE
694					     + (2 * ccb->ccb_h.target_id));
695
696			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
697				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
698			else
699				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
700
701			cts->sync_period =
702			    adw_hshk_cfg_period_factor(targ_tinfo);
703
704			cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
705			if (cts->sync_period == 0)
706				cts->sync_offset = 0;
707
708			if (cts->sync_offset == 0)
709				cts->sync_period = 0;
710		}
711		cts->valid = CCB_TRANS_SYNC_RATE_VALID
712			   | CCB_TRANS_SYNC_OFFSET_VALID
713			   | CCB_TRANS_BUS_WIDTH_VALID
714			   | CCB_TRANS_DISC_VALID
715			   | CCB_TRANS_TQ_VALID;
716		ccb->ccb_h.status = CAM_REQ_CMP;
717		xpt_done(ccb);
718		break;
719	}
720	case XPT_CALC_GEOMETRY:
721	{
722		struct	  ccb_calc_geometry *ccg;
723		u_int32_t size_mb;
724		u_int32_t secs_per_cylinder;
725		int	  extended;
726
727		/*
728		 * XXX Use Adaptec translation until I find out how to
729		 *     get this information from the card.
730		 */
731		ccg = &ccb->ccg;
732		size_mb = ccg->volume_size
733			/ ((1024L * 1024L) / ccg->block_size);
734		extended = 1;
735
736		if (size_mb > 1024 && extended) {
737			ccg->heads = 255;
738			ccg->secs_per_track = 63;
739		} else {
740			ccg->heads = 64;
741			ccg->secs_per_track = 32;
742		}
743		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
744		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
745		ccb->ccb_h.status = CAM_REQ_CMP;
746		xpt_done(ccb);
747		break;
748	}
749	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
750	{
751		adw_idle_cmd_status_t status;
752
753		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_START,
754				  /*param*/0);
755		status = adw_idle_cmd_wait(adw);
756		if (status != ADW_IDLE_CMD_SUCCESS) {
757			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
758			xpt_done(ccb);
759			break;
760		}
761		DELAY(100);
762		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_END, /*param*/0);
763		status = adw_idle_cmd_wait(adw);
764		if (status != ADW_IDLE_CMD_SUCCESS) {
765			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
766			xpt_done(ccb);
767			break;
768		}
769		ccb->ccb_h.status = CAM_REQ_CMP;
770		if (bootverbose) {
771			xpt_print_path(adw->path);
772			printf("Bus Reset Delivered\n");
773		}
774		xpt_done(ccb);
775		break;
776	}
777	case XPT_TERM_IO:		/* Terminate the I/O process */
778		/* XXX Implement */
779		ccb->ccb_h.status = CAM_REQ_INVALID;
780		xpt_done(ccb);
781		break;
782	case XPT_PATH_INQ:		/* Path routing inquiry */
783	{
784		struct ccb_pathinq *cpi = &ccb->cpi;
785
786		cpi->version_num = 1;
787		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
788		cpi->target_sprt = 0;
789		cpi->hba_misc = 0;
790		cpi->hba_eng_cnt = 0;
791		cpi->max_target = ADW_MAX_TID;
792		cpi->max_lun = ADW_MAX_LUN;
793		cpi->initiator_id = adw->initiator_id;
794		cpi->bus_id = cam_sim_bus(sim);
795		cpi->base_transfer_speed = 3300;
796		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
797		strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
798		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
799		cpi->unit_number = cam_sim_unit(sim);
800		cpi->ccb_h.status = CAM_REQ_CMP;
801		xpt_done(ccb);
802		break;
803	}
804	default:
805		ccb->ccb_h.status = CAM_REQ_INVALID;
806		xpt_done(ccb);
807		break;
808	}
809}
810
811static void
812adw_poll(struct cam_sim *sim)
813{
814	adw_intr(cam_sim_softc(sim));
815}
816
817static void
818adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
819{
820}
821
822struct adw_softc *
823adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
824{
825	struct	 adw_softc *adw;
826	int	 i;
827
828	/*
829	 * Allocate a storage area for us
830	 */
831	adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT);
832	if (adw == NULL) {
833		printf("adw%d: cannot malloc!\n", device_get_unit(dev));
834		return NULL;
835	}
836	bzero(adw, sizeof(struct adw_softc));
837	LIST_INIT(&adw->pending_ccbs);
838	SLIST_INIT(&adw->sg_maps);
839	adw->device = dev;
840	adw->unit = device_get_unit(dev);
841	adw->regs_res_type = regs_type;
842	adw->regs_res_id = regs_id;
843	adw->regs = regs;
844	adw->tag = rman_get_bustag(regs);
845	adw->bsh = rman_get_bushandle(regs);
846	i = adw->unit / 10;
847	adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT);
848	if (adw->name == NULL) {
849		printf("adw%d: cannot malloc name!\n", adw->unit);
850		free(adw, M_DEVBUF);
851		return NULL;
852	}
853	sprintf(adw->name, "adw%d", adw->unit);
854	return(adw);
855}
856
857void
858adw_free(struct adw_softc *adw)
859{
860	switch (adw->init_level) {
861	case 9:
862	{
863		struct sg_map_node *sg_map;
864
865		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
866			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
867			bus_dmamap_unload(adw->sg_dmat,
868					  sg_map->sg_dmamap);
869			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
870					sg_map->sg_dmamap);
871			free(sg_map, M_DEVBUF);
872		}
873		bus_dma_tag_destroy(adw->sg_dmat);
874	}
875	case 8:
876		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
877	case 7:
878		bus_dmamem_free(adw->acb_dmat, adw->acbs,
879				adw->acb_dmamap);
880		bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
881	case 6:
882		bus_dma_tag_destroy(adw->acb_dmat);
883	case 5:
884		bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
885	case 4:
886		bus_dmamem_free(adw->carrier_dmat, adw->carriers,
887				adw->carrier_dmamap);
888		bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
889	case 3:
890		bus_dma_tag_destroy(adw->carrier_dmat);
891	case 2:
892		bus_dma_tag_destroy(adw->buffer_dmat);
893	case 1:
894		bus_dma_tag_destroy(adw->parent_dmat);
895	case 0:
896		break;
897	}
898	free(adw->name, M_DEVBUF);
899	free(adw, M_DEVBUF);
900}
901
902int
903adw_init(struct adw_softc *adw)
904{
905	struct	  adw_eeprom eep_config;
906	u_int	  tid;
907	u_int	  i;
908	u_int16_t checksum;
909	u_int16_t scsicfg1;
910
911	checksum = adw_eeprom_read(adw, &eep_config);
912	bcopy(eep_config.serial_number, adw->serial_number,
913	      sizeof(adw->serial_number));
914	if (checksum != eep_config.checksum) {
915		u_int16_t serial_number[3];
916
917		adw->flags |= ADW_EEPROM_FAILED;
918		printf("%s: EEPROM checksum failed.  Restoring Defaults\n",
919		       adw_name(adw));
920
921	        /*
922		 * Restore the default EEPROM settings.
923		 * Assume the 6 byte board serial number that was read
924		 * from EEPROM is correct even if the EEPROM checksum
925		 * failed.
926		 */
927		bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
928		bcopy(adw->serial_number, eep_config.serial_number,
929		      sizeof(serial_number));
930		adw_eeprom_write(adw, &eep_config);
931	}
932
933	/* Pull eeprom information into our softc. */
934	adw->bios_ctrl = eep_config.bios_ctrl;
935	adw->user_wdtr = eep_config.wdtr_able;
936	for (tid = 0; tid < ADW_MAX_TID; tid++) {
937		u_int	  mc_sdtr;
938		u_int16_t tid_mask;
939
940		tid_mask = 0x1 << tid;
941		if ((adw->features & ADW_ULTRA) != 0) {
942			/*
943			 * Ultra chips store sdtr and ultraenb
944			 * bits in their seeprom, so we must
945			 * construct valid mc_sdtr entries for
946			 * indirectly.
947			 */
948			if (eep_config.sync1.sync_enable & tid_mask) {
949				if (eep_config.sync2.ultra_enable & tid_mask)
950					mc_sdtr = ADW_MC_SDTR_20;
951				else
952					mc_sdtr = ADW_MC_SDTR_10;
953			} else
954				mc_sdtr = ADW_MC_SDTR_ASYNC;
955		} else {
956			switch (ADW_TARGET_GROUP(tid)) {
957			case 3:
958				mc_sdtr = eep_config.sync4.sdtr4;
959				break;
960			case 2:
961				mc_sdtr = eep_config.sync3.sdtr3;
962				break;
963			case 1:
964				mc_sdtr = eep_config.sync2.sdtr2;
965				break;
966			default: /* Shut up compiler */
967			case 0:
968				mc_sdtr = eep_config.sync1.sdtr1;
969				break;
970			}
971			mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
972			mc_sdtr &= 0xFF;
973		}
974		adw_set_user_sdtr(adw, tid, mc_sdtr);
975	}
976	adw->user_tagenb = eep_config.tagqng_able;
977	adw->user_discenb = eep_config.disc_enable;
978	adw->max_acbs = eep_config.max_host_qng;
979	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
980
981	/*
982	 * Sanity check the number of host openings.
983	 */
984	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
985		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
986	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
987        	/* If the value is zero, assume it is uninitialized. */
988		if (adw->max_acbs == 0)
989			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
990		else
991			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
992	}
993
994	scsicfg1 = 0;
995	if ((adw->features & ADW_ULTRA2) != 0) {
996		switch (eep_config.termination_lvd) {
997		default:
998			printf("%s: Invalid EEPROM LVD Termination Settings.\n",
999			       adw_name(adw));
1000			printf("%s: Reverting to Automatic LVD Termination\n",
1001			       adw_name(adw));
1002			/* FALLTHROUGH */
1003		case ADW_EEPROM_TERM_AUTO:
1004			break;
1005		case ADW_EEPROM_TERM_BOTH_ON:
1006			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
1007			/* FALLTHROUGH */
1008		case ADW_EEPROM_TERM_HIGH_ON:
1009			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
1010			/* FALLTHROUGH */
1011		case ADW_EEPROM_TERM_OFF:
1012			scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
1013			break;
1014		}
1015	}
1016
1017	switch (eep_config.termination_se) {
1018	default:
1019		printf("%s: Invalid SE EEPROM Termination Settings.\n",
1020		       adw_name(adw));
1021		printf("%s: Reverting to Automatic SE Termination\n",
1022		       adw_name(adw));
1023		/* FALLTHROUGH */
1024	case ADW_EEPROM_TERM_AUTO:
1025		break;
1026	case ADW_EEPROM_TERM_BOTH_ON:
1027		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
1028		/* FALLTHROUGH */
1029	case ADW_EEPROM_TERM_HIGH_ON:
1030		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
1031		/* FALLTHROUGH */
1032	case ADW_EEPROM_TERM_OFF:
1033		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
1034		break;
1035	}
1036	printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id);
1037
1038	/* DMA tag for mapping buffers into device visible space. */
1039	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1040			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1041			       /*highaddr*/BUS_SPACE_MAXADDR,
1042			       /*filter*/NULL, /*filterarg*/NULL,
1043			       /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE,
1044			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1045			       /*flags*/BUS_DMA_ALLOCNOW,
1046			       &adw->buffer_dmat) != 0) {
1047		return (ENOMEM);
1048	}
1049
1050	adw->init_level++;
1051
1052	/* DMA tag for our ccb carrier structures */
1053	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10,
1054			       /*boundary*/0,
1055			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1056			       /*highaddr*/BUS_SPACE_MAXADDR,
1057			       /*filter*/NULL, /*filterarg*/NULL,
1058			       (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1059				* sizeof(struct adw_carrier),
1060			       /*nsegments*/1,
1061			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1062			       /*flags*/0, &adw->carrier_dmat) != 0) {
1063		return (ENOMEM);
1064        }
1065
1066	adw->init_level++;
1067
1068	/* Allocation for our ccb carrier structures */
1069	if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1070			     BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1071		return (ENOMEM);
1072	}
1073
1074	adw->init_level++;
1075
1076	/* And permanently map them */
1077	bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1078			adw->carriers,
1079			(adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1080			 * sizeof(struct adw_carrier),
1081			adwmapmem, &adw->carrier_busbase, /*flags*/0);
1082
1083	/* Clear them out. */
1084	bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1085			     * sizeof(struct adw_carrier));
1086
1087	/* Setup our free carrier list */
1088	adw->free_carriers = adw->carriers;
1089	for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1090		adw->carriers[i].carr_offset =
1091			carriervtobo(adw, &adw->carriers[i]);
1092		adw->carriers[i].carr_ba =
1093			carriervtob(adw, &adw->carriers[i]);
1094		adw->carriers[i].areq_ba = 0;
1095		adw->carriers[i].next_ba =
1096			carriervtobo(adw, &adw->carriers[i+1]);
1097	}
1098	/* Terminal carrier.  Never leaves the freelist */
1099	adw->carriers[i].carr_offset =
1100		carriervtobo(adw, &adw->carriers[i]);
1101	adw->carriers[i].carr_ba =
1102		carriervtob(adw, &adw->carriers[i]);
1103	adw->carriers[i].areq_ba = 0;
1104	adw->carriers[i].next_ba = ~0;
1105
1106	adw->init_level++;
1107
1108	/* DMA tag for our acb structures */
1109	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1110			       /*lowaddr*/BUS_SPACE_MAXADDR,
1111			       /*highaddr*/BUS_SPACE_MAXADDR,
1112			       /*filter*/NULL, /*filterarg*/NULL,
1113			       adw->max_acbs * sizeof(struct acb),
1114			       /*nsegments*/1,
1115			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1116			       /*flags*/0, &adw->acb_dmat) != 0) {
1117		return (ENOMEM);
1118        }
1119
1120	adw->init_level++;
1121
1122	/* Allocation for our ccbs */
1123	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1124			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1125		return (ENOMEM);
1126
1127	adw->init_level++;
1128
1129	/* And permanently map them */
1130	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1131			adw->acbs,
1132			adw->max_acbs * sizeof(struct acb),
1133			adwmapmem, &adw->acb_busbase, /*flags*/0);
1134
1135	/* Clear them out. */
1136	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1137
1138	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1139	if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0,
1140			       /*lowaddr*/BUS_SPACE_MAXADDR,
1141			       /*highaddr*/BUS_SPACE_MAXADDR,
1142			       /*filter*/NULL, /*filterarg*/NULL,
1143			       PAGE_SIZE, /*nsegments*/1,
1144			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1145			       /*flags*/0, &adw->sg_dmat) != 0) {
1146		return (ENOMEM);
1147        }
1148
1149	adw->init_level++;
1150
1151	/* Allocate our first batch of ccbs */
1152	if (adwallocacbs(adw) == 0)
1153		return (ENOMEM);
1154
1155	if (adw_init_chip(adw, scsicfg1) != 0)
1156		return (ENXIO);
1157
1158	printf("Queue Depth %d\n", adw->max_acbs);
1159
1160	return (0);
1161}
1162
1163/*
1164 * Attach all the sub-devices we can find
1165 */
1166int
1167adw_attach(struct adw_softc *adw)
1168{
1169	struct ccb_setasync csa;
1170	struct cam_devq *devq;
1171	int s;
1172	int error;
1173
1174	error = 0;
1175	s = splcam();
1176	/* Hook up our interrupt handler */
1177	if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM,
1178				    adw_intr, adw, &adw->ih)) != 0) {
1179		device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1180			      error);
1181		goto fail;
1182	}
1183
1184	/* Start the Risc processor now that we are fully configured. */
1185	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1186
1187	/*
1188	 * Create the device queue for our SIM.
1189	 */
1190	devq = cam_simq_alloc(adw->max_acbs);
1191	if (devq == NULL)
1192		return (ENOMEM);
1193
1194	/*
1195	 * Construct our SIM entry.
1196	 */
1197	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
1198				 1, adw->max_acbs, devq);
1199	if (adw->sim == NULL) {
1200		error = ENOMEM;
1201		goto fail;
1202	}
1203
1204	/*
1205	 * Register the bus.
1206	 */
1207	if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) {
1208		cam_sim_free(adw->sim, /*free devq*/TRUE);
1209		error = ENOMEM;
1210		goto fail;
1211	}
1212
1213	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1214			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1215	   == CAM_REQ_CMP) {
1216		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1217		csa.ccb_h.func_code = XPT_SASYNC_CB;
1218		csa.event_enable = AC_LOST_DEVICE;
1219		csa.callback = adw_async;
1220		csa.callback_arg = adw;
1221		xpt_action((union ccb *)&csa);
1222	}
1223
1224fail:
1225	splx(s);
1226	return (error);
1227}
1228
1229void
1230adw_intr(void *arg)
1231{
1232	struct	adw_softc *adw;
1233	u_int	int_stat;
1234
1235	adw = (struct adw_softc *)arg;
1236	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1237		return;
1238
1239	/* Reading the register clears the interrupt. */
1240	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1241
1242	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1243		u_int intrb_code;
1244
1245		/* Async Microcode Event */
1246		intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1247		switch (intrb_code) {
1248		case ADW_ASYNC_CARRIER_READY_FAILURE:
1249			/*
1250			 * The RISC missed our update of
1251			 * the commandq.
1252			 */
1253			if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1254				adw_tickle_risc(adw, ADW_TICKLE_A);
1255			break;
1256    		case ADW_ASYNC_SCSI_BUS_RESET_DET:
1257			/*
1258			 * The firmware detected a SCSI Bus reset.
1259			 */
1260			printf("Someone Reset the Bus\n");
1261			adw_handle_bus_reset(adw, /*initiated*/FALSE);
1262			break;
1263		case ADW_ASYNC_RDMA_FAILURE:
1264			/*
1265			 * Handle RDMA failure by resetting the
1266			 * SCSI Bus and chip.
1267			 */
1268#if XXX
1269			AdvResetChipAndSB(adv_dvc_varp);
1270#endif
1271			break;
1272
1273		case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1274			/*
1275			 * Host generated SCSI bus reset occurred.
1276			 */
1277			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1278        		break;
1279    		default:
1280			printf("adw_intr: unknown async code 0x%x\n",
1281			       intrb_code);
1282			break;
1283		}
1284	}
1285
1286	/*
1287	 * Run down the RequestQ.
1288	 */
1289	while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1290		struct adw_carrier *free_carrier;
1291		struct acb *acb;
1292		union ccb *ccb;
1293
1294#if 0
1295		printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1296		       adw->responseq->carr_offset,
1297		       adw->responseq->carr_ba,
1298		       adw->responseq->areq_ba,
1299		       adw->responseq->next_ba);
1300#endif
1301		/*
1302		 * The firmware copies the adw_scsi_req_q.acb_baddr
1303		 * field into the areq_ba field of the carrier.
1304		 */
1305		acb = acbbotov(adw, adw->responseq->areq_ba);
1306
1307		/*
1308		 * The least significant four bits of the next_ba
1309		 * field are used as flags.  Mask them out and then
1310		 * advance through the list.
1311		 */
1312		free_carrier = adw->responseq;
1313		adw->responseq =
1314		    carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1315		free_carrier->next_ba = adw->free_carriers->carr_offset;
1316		adw->free_carriers = free_carrier;
1317
1318		/* Process CCB */
1319		ccb = acb->ccb;
1320		untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch);
1321		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1322			bus_dmasync_op_t op;
1323
1324			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1325				op = BUS_DMASYNC_POSTREAD;
1326			else
1327				op = BUS_DMASYNC_POSTWRITE;
1328			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1329			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1330			ccb->csio.resid = acb->queue.data_cnt;
1331		} else
1332			ccb->csio.resid = 0;
1333
1334		/* Common Cases inline... */
1335		if (acb->queue.host_status == QHSTA_NO_ERROR
1336		 && (acb->queue.done_status == QD_NO_ERROR
1337		  || acb->queue.done_status == QD_WITH_ERROR)) {
1338			ccb->csio.scsi_status = acb->queue.scsi_status;
1339			ccb->ccb_h.status = 0;
1340			switch (ccb->csio.scsi_status) {
1341			case SCSI_STATUS_OK:
1342				ccb->ccb_h.status |= CAM_REQ_CMP;
1343				break;
1344			case SCSI_STATUS_CHECK_COND:
1345			case SCSI_STATUS_CMD_TERMINATED:
1346				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1347				      ccb->csio.sense_len);
1348				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1349				ccb->csio.sense_resid = acb->queue.sense_len;
1350				/* FALLTHROUGH */
1351			default:
1352				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1353						  |  CAM_DEV_QFRZN;
1354				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1355				break;
1356			}
1357			adwfreeacb(adw, acb);
1358			xpt_done(ccb);
1359		} else {
1360			adwprocesserror(adw, acb);
1361		}
1362	}
1363}
1364
1365static void
1366adwprocesserror(struct adw_softc *adw, struct acb *acb)
1367{
1368	union ccb *ccb;
1369
1370	ccb = acb->ccb;
1371	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1372		ccb->ccb_h.status = CAM_REQ_ABORTED;
1373	} else {
1374
1375		switch (acb->queue.host_status) {
1376		case QHSTA_M_SEL_TIMEOUT:
1377			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1378			break;
1379		case QHSTA_M_SXFR_OFF_UFLW:
1380		case QHSTA_M_SXFR_OFF_OFLW:
1381		case QHSTA_M_DATA_OVER_RUN:
1382			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1383			break;
1384		case QHSTA_M_SXFR_DESELECTED:
1385		case QHSTA_M_UNEXPECTED_BUS_FREE:
1386			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1387			break;
1388		case QHSTA_M_QUEUE_ABORTED:
1389			/* BDR or Bus Reset */
1390			ccb->ccb_h.status = adw->last_reset;
1391			break;
1392		case QHSTA_M_SXFR_SDMA_ERR:
1393		case QHSTA_M_SXFR_SXFR_PERR:
1394		case QHSTA_M_RDMA_PERR:
1395			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1396			break;
1397		case QHSTA_M_WTM_TIMEOUT:
1398		case QHSTA_M_SXFR_WD_TMO:
1399		{
1400			adw_idle_cmd_status_t status;
1401
1402			/* The SCSI bus hung in a phase */
1403			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1404			adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_START,
1405					  /*param*/0);
1406			status = adw_idle_cmd_wait(adw);
1407			if (status != ADW_IDLE_CMD_SUCCESS)
1408				panic("%s: Bus Reset during WD timeout failed",
1409				      adw_name(adw));
1410			DELAY(100);
1411			adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_END,
1412					  /*param*/0);
1413			status = adw_idle_cmd_wait(adw);
1414			if (status != ADW_IDLE_CMD_SUCCESS)
1415				panic("%s: Bus Reset during WD timeout failed",
1416				      adw_name(adw));
1417			break;
1418		}
1419		case QHSTA_M_SXFR_XFR_PH_ERR:
1420			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1421			break;
1422		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1423			break;
1424		case QHSTA_M_BAD_CMPL_STATUS_IN:
1425			/* No command complete after a status message */
1426			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1427			break;
1428		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1429			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1430			break;
1431		case QHSTA_M_INVALID_DEVICE:
1432			ccb->ccb_h.status = CAM_PATH_INVALID;
1433			break;
1434		case QHSTA_M_NO_AUTO_REQ_SENSE:
1435			/*
1436			 * User didn't request sense, but we got a
1437			 * check condition.
1438			 */
1439			ccb->csio.scsi_status = acb->queue.scsi_status;
1440			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1441			break;
1442		default:
1443			panic("%s: Unhandled Host status error %x",
1444			      adw_name(adw), acb->queue.host_status);
1445			/* NOTREACHED */
1446		}
1447	}
1448	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1449		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1450		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1451	}
1452	adwfreeacb(adw, acb);
1453	xpt_done(ccb);
1454}
1455
1456static void
1457adwtimeout(void *arg)
1458{
1459	struct acb	     *acb;
1460	union  ccb	     *ccb;
1461	struct adw_softc     *adw;
1462	adw_idle_cmd_status_t status;
1463	int		      s;
1464
1465	acb = (struct acb *)arg;
1466	ccb = acb->ccb;
1467	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1468	xpt_print_path(ccb->ccb_h.path);
1469	printf("ACB %p - timed out\n", (void *)acb);
1470
1471	s = splcam();
1472
1473	if ((acb->state & ACB_ACTIVE) == 0) {
1474		xpt_print_path(ccb->ccb_h.path);
1475		printf("ACB %p - timed out CCB already completed\n",
1476		       (void *)acb);
1477		splx(s);
1478		return;
1479	}
1480
1481	/* Attempt a BDR first */
1482	adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1483			  ccb->ccb_h.target_id);
1484	splx(s);
1485	status = adw_idle_cmd_wait(adw);
1486	if (status == ADW_IDLE_CMD_SUCCESS) {
1487		printf("%s: BDR Delivered.  No longer in timeout\n",
1488		       adw_name(adw));
1489		adw_handle_device_reset(adw, ccb->ccb_h.target_id);
1490	} else {
1491		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_START,
1492				  /*param*/0);
1493		status = adw_idle_cmd_wait(adw);
1494		if (status != ADW_IDLE_CMD_SUCCESS)
1495			panic("%s: Bus Reset during timeout failed",
1496			      adw_name(adw));
1497		DELAY(100);
1498		adw_idle_cmd_send(adw, ADW_IDLE_CMD_SCSI_RESET_END,
1499				  /*param*/0);
1500		status = adw_idle_cmd_wait(adw);
1501		if (status != ADW_IDLE_CMD_SUCCESS)
1502			panic("%s: Bus Reset during timeout failed",
1503			      adw_name(adw));
1504	}
1505}
1506
1507static void
1508adw_handle_device_reset(struct adw_softc *adw, u_int target)
1509{
1510	struct cam_path *path;
1511	cam_status error;
1512
1513	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1514				target, CAM_LUN_WILDCARD);
1515
1516	if (error == CAM_REQ_CMP) {
1517		xpt_async(AC_SENT_BDR, path, NULL);
1518		xpt_free_path(path);
1519	}
1520	adw->last_reset = CAM_BDR_SENT;
1521}
1522
1523static void
1524adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1525{
1526	if (initiated) {
1527		/*
1528		 * The microcode currently sets the SCSI Bus Reset signal
1529		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1530		 * command above.  But the SCSI Bus Reset Hold Time in the
1531		 * microcode is not deterministic (it may in fact be for less
1532		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1533		 * the Adv Library sets the SCSI Bus Reset signal for
1534		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1535		 * than 25 us.
1536		 */
1537		u_int scsi_ctrl;
1538
1539	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1540		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1541		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1542		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1543
1544		/*
1545		 * We will perform the async notification when the
1546		 * SCSI Reset interrupt occurs.
1547		 */
1548	} else
1549		xpt_async(AC_BUS_RESET, adw->path, NULL);
1550	adw->last_reset = CAM_SCSI_BUS_RESET;
1551}
1552