1/*-
2 * CAM SCSI interface for the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c	ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
10 *
11 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions, and the following disclaimer,
19 *    without modification.
20 * 2. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35/*
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48#include <sys/cdefs.h>
49__FBSDID("$FreeBSD$");
50
51#include <sys/param.h>
52#include <sys/conf.h>
53#include <sys/systm.h>
54#include <sys/kernel.h>
55#include <sys/malloc.h>
56#include <sys/lock.h>
57#include <sys/module.h>
58#include <sys/mutex.h>
59#include <sys/bus.h>
60
61#include <machine/bus.h>
62#include <machine/resource.h>
63
64#include <sys/rman.h>
65
66#include <cam/cam.h>
67#include <cam/cam_ccb.h>
68#include <cam/cam_sim.h>
69#include <cam/cam_xpt_sim.h>
70#include <cam/cam_debug.h>
71
72#include <cam/scsi/scsi_message.h>
73
74#include <dev/advansys/adwvar.h>
75
76/* Definitions for our use of the SIM private CCB area */
77#define ccb_acb_ptr spriv_ptr0
78#define ccb_adw_ptr spriv_ptr1
79
80static __inline struct acb*	adwgetacb(struct adw_softc *adw);
81static __inline void		adwfreeacb(struct adw_softc *adw,
82					   struct acb *acb);
83
84static void		adwmapmem(void *arg, bus_dma_segment_t *segs,
85				  int nseg, int error);
86static struct sg_map_node*
87			adwallocsgmap(struct adw_softc *adw);
88static int		adwallocacbs(struct adw_softc *adw);
89
90static void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
91				      int nseg, int error);
92static void		adw_action(struct cam_sim *sim, union ccb *ccb);
93static void		adw_intr_locked(struct adw_softc *adw);
94static void		adw_poll(struct cam_sim *sim);
95static void		adw_async(void *callback_arg, u_int32_t code,
96				  struct cam_path *path, void *arg);
97static void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
98static void		adwtimeout(void *arg);
99static void		adw_handle_device_reset(struct adw_softc *adw,
100						u_int target);
101static void		adw_handle_bus_reset(struct adw_softc *adw,
102					     int initiated);
103
104static __inline struct acb*
105adwgetacb(struct adw_softc *adw)
106{
107	struct	acb* acb;
108
109	if (!dumping)
110		mtx_assert(&adw->lock, MA_OWNED);
111	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
112		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
113	} else if (adw->num_acbs < adw->max_acbs) {
114		adwallocacbs(adw);
115		acb = SLIST_FIRST(&adw->free_acb_list);
116		if (acb == NULL)
117			device_printf(adw->device, "Can't malloc ACB\n");
118		else {
119			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
120		}
121	}
122
123	return (acb);
124}
125
126static __inline void
127adwfreeacb(struct adw_softc *adw, struct acb *acb)
128{
129
130	if (!dumping)
131		mtx_assert(&adw->lock, MA_OWNED);
132	if ((acb->state & ACB_ACTIVE) != 0)
133		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
134	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
135		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
136	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
137	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
138		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
139		adw->state &= ~ADW_RESOURCE_SHORTAGE;
140	}
141	acb->state = ACB_FREE;
142	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
143}
144
145static void
146adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
147{
148	bus_addr_t *busaddrp;
149
150	busaddrp = (bus_addr_t *)arg;
151	*busaddrp = segs->ds_addr;
152}
153
154static struct sg_map_node *
155adwallocsgmap(struct adw_softc *adw)
156{
157	struct sg_map_node *sg_map;
158
159	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
160
161	if (sg_map == NULL)
162		return (NULL);
163
164	/* Allocate S/G space for the next batch of ACBS */
165	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
166			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
167		free(sg_map, M_DEVBUF);
168		return (NULL);
169	}
170
171	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
172
173	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
174			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
175
176	bzero(sg_map->sg_vaddr, PAGE_SIZE);
177	return (sg_map);
178}
179
180/*
181 * Allocate another chunk of CCB's. Return count of entries added.
182 */
183static int
184adwallocacbs(struct adw_softc *adw)
185{
186	struct acb *next_acb;
187	struct sg_map_node *sg_map;
188	bus_addr_t busaddr;
189	struct adw_sg_block *blocks;
190	int newcount;
191	int i;
192
193	next_acb = &adw->acbs[adw->num_acbs];
194	sg_map = adwallocsgmap(adw);
195
196	if (sg_map == NULL)
197		return (0);
198
199	blocks = sg_map->sg_vaddr;
200	busaddr = sg_map->sg_physaddr;
201
202	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
203	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
204		int error;
205
206		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
207					  &next_acb->dmamap);
208		if (error != 0)
209			break;
210		next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
211		next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
212		next_acb->queue.sense_baddr =
213		    acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
214		next_acb->sg_blocks = blocks;
215		next_acb->sg_busaddr = busaddr;
216		next_acb->state = ACB_FREE;
217		callout_init_mtx(&next_acb->timer, &adw->lock, 0);
218		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
219		blocks += ADW_SG_BLOCKCNT;
220		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
221		next_acb++;
222		adw->num_acbs++;
223	}
224	return (i);
225}
226
227static void
228adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
229{
230	struct	 acb *acb;
231	union	 ccb *ccb;
232	struct	 adw_softc *adw;
233
234	acb = (struct acb *)arg;
235	ccb = acb->ccb;
236	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
237
238	if (!dumping)
239		mtx_assert(&adw->lock, MA_OWNED);
240	if (error != 0) {
241		if (error != EFBIG)
242			device_printf(adw->device, "Unexepected error 0x%x "
243			    "returned from bus_dmamap_load\n", error);
244		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
245			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
246			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
247		}
248		adwfreeacb(adw, acb);
249		xpt_done(ccb);
250		return;
251	}
252
253	if (nseg != 0) {
254		bus_dmasync_op_t op;
255
256		acb->queue.data_addr = dm_segs[0].ds_addr;
257		acb->queue.data_cnt = ccb->csio.dxfer_len;
258		if (nseg > 1) {
259			struct adw_sg_block *sg_block;
260			struct adw_sg_elm *sg;
261			bus_addr_t sg_busaddr;
262			u_int sg_index;
263			bus_dma_segment_t *end_seg;
264
265			end_seg = dm_segs + nseg;
266
267			sg_busaddr = acb->sg_busaddr;
268			sg_index = 0;
269			/* Copy the segments into our SG list */
270			for (sg_block = acb->sg_blocks;; sg_block++) {
271				u_int i;
272
273				sg = sg_block->sg_list;
274				for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
275					if (dm_segs >= end_seg)
276						break;
277
278					sg->sg_addr = dm_segs->ds_addr;
279					sg->sg_count = dm_segs->ds_len;
280					sg++;
281					dm_segs++;
282				}
283				sg_block->sg_cnt = i;
284				sg_index += i;
285				if (dm_segs == end_seg) {
286					sg_block->sg_busaddr_next = 0;
287					break;
288				} else {
289					sg_busaddr +=
290					    sizeof(struct adw_sg_block);
291					sg_block->sg_busaddr_next = sg_busaddr;
292				}
293			}
294			acb->queue.sg_real_addr = acb->sg_busaddr;
295		} else {
296			acb->queue.sg_real_addr = 0;
297		}
298
299		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
300			op = BUS_DMASYNC_PREREAD;
301		else
302			op = BUS_DMASYNC_PREWRITE;
303
304		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
305
306	} else {
307		acb->queue.data_addr = 0;
308		acb->queue.data_cnt = 0;
309		acb->queue.sg_real_addr = 0;
310	}
311
312	/*
313	 * Last time we need to check if this CCB needs to
314	 * be aborted.
315	 */
316	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
317		if (nseg != 0)
318			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
319		adwfreeacb(adw, acb);
320		xpt_done(ccb);
321		return;
322	}
323
324	acb->state |= ACB_ACTIVE;
325	ccb->ccb_h.status |= CAM_SIM_QUEUED;
326	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
327	callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
328	    adwtimeout, acb, 0);
329
330	adw_send_acb(adw, acb, acbvtob(adw, acb));
331}
332
333static void
334adw_action(struct cam_sim *sim, union ccb *ccb)
335{
336	struct	adw_softc *adw;
337
338	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
339
340	adw = (struct adw_softc *)cam_sim_softc(sim);
341	if (!dumping)
342		mtx_assert(&adw->lock, MA_OWNED);
343
344	switch (ccb->ccb_h.func_code) {
345	/* Common cases first */
346	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
347	{
348		struct	ccb_scsiio *csio;
349		struct	acb *acb;
350		int error;
351
352		csio = &ccb->csio;
353
354		/* Max supported CDB length is 12 bytes */
355		if (csio->cdb_len > 12) {
356			ccb->ccb_h.status = CAM_REQ_INVALID;
357			xpt_done(ccb);
358			return;
359		}
360
361		if ((acb = adwgetacb(adw)) == NULL) {
362			adw->state |= ADW_RESOURCE_SHORTAGE;
363			xpt_freeze_simq(sim, /*count*/1);
364			ccb->ccb_h.status = CAM_REQUEUE_REQ;
365			xpt_done(ccb);
366			return;
367		}
368
369		/* Link acb and ccb so we can find one from the other */
370		acb->ccb = ccb;
371		ccb->ccb_h.ccb_acb_ptr = acb;
372		ccb->ccb_h.ccb_adw_ptr = adw;
373
374		acb->queue.cntl = 0;
375		acb->queue.target_cmd = 0;
376		acb->queue.target_id = ccb->ccb_h.target_id;
377		acb->queue.target_lun = ccb->ccb_h.target_lun;
378
379		acb->queue.mflag = 0;
380		acb->queue.sense_len =
381			MIN(csio->sense_len, sizeof(acb->sense_data));
382		acb->queue.cdb_len = csio->cdb_len;
383		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
384			switch (csio->tag_action) {
385			case MSG_SIMPLE_Q_TAG:
386				acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
387				break;
388			case MSG_HEAD_OF_Q_TAG:
389				acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
390				break;
391			case MSG_ORDERED_Q_TAG:
392				acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
393				break;
394			default:
395				acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
396				break;
397			}
398		} else
399			acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
400
401		if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
402			acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
403
404		acb->queue.done_status = 0;
405		acb->queue.scsi_status = 0;
406		acb->queue.host_status = 0;
407		acb->queue.sg_wk_ix = 0;
408		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
409			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
410				bcopy(csio->cdb_io.cdb_ptr,
411				      acb->queue.cdb, csio->cdb_len);
412			} else {
413				/* I guess I could map it in... */
414				ccb->ccb_h.status = CAM_REQ_INVALID;
415				adwfreeacb(adw, acb);
416				xpt_done(ccb);
417				return;
418			}
419		} else {
420			bcopy(csio->cdb_io.cdb_bytes,
421			      acb->queue.cdb, csio->cdb_len);
422		}
423
424		error = bus_dmamap_load_ccb(adw->buffer_dmat,
425					    acb->dmamap,
426					    ccb,
427					    adwexecuteacb,
428					    acb, /*flags*/0);
429		if (error == EINPROGRESS) {
430			/*
431			 * So as to maintain ordering, freeze the controller
432			 * queue until our mapping is returned.
433			 */
434			xpt_freeze_simq(sim, 1);
435			acb->state |= CAM_RELEASE_SIMQ;
436		}
437		break;
438	}
439	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
440	{
441		adw_idle_cmd_status_t status;
442
443		status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
444					   ccb->ccb_h.target_id);
445		if (status == ADW_IDLE_CMD_SUCCESS) {
446			ccb->ccb_h.status = CAM_REQ_CMP;
447			if (bootverbose) {
448				xpt_print_path(ccb->ccb_h.path);
449				printf("BDR Delivered\n");
450			}
451		} else
452			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
453		xpt_done(ccb);
454		break;
455	}
456	case XPT_ABORT:			/* Abort the specified CCB */
457		/* XXX Implement */
458		ccb->ccb_h.status = CAM_REQ_INVALID;
459		xpt_done(ccb);
460		break;
461	case XPT_SET_TRAN_SETTINGS:
462	{
463		struct ccb_trans_settings_scsi *scsi;
464		struct ccb_trans_settings_spi *spi;
465		struct	  ccb_trans_settings *cts;
466		u_int	  target_mask;
467
468		cts = &ccb->cts;
469		target_mask = 0x01 << ccb->ccb_h.target_id;
470
471		scsi = &cts->proto_specific.scsi;
472		spi = &cts->xport_specific.spi;
473		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
474			u_int sdtrdone;
475
476			sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
477			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
478				u_int discenb;
479
480				discenb =
481				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
482
483				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
484					discenb |= target_mask;
485				else
486					discenb &= ~target_mask;
487
488				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
489						  discenb);
490			}
491
492			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
493
494				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
495					adw->tagenb |= target_mask;
496				else
497					adw->tagenb &= ~target_mask;
498			}
499
500			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
501				u_int wdtrenb_orig;
502				u_int wdtrenb;
503				u_int wdtrdone;
504
505				wdtrenb_orig =
506				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
507				wdtrenb = wdtrenb_orig;
508				wdtrdone = adw_lram_read_16(adw,
509							    ADW_MC_WDTR_DONE);
510				switch (spi->bus_width) {
511				case MSG_EXT_WDTR_BUS_32_BIT:
512				case MSG_EXT_WDTR_BUS_16_BIT:
513					wdtrenb |= target_mask;
514					break;
515				case MSG_EXT_WDTR_BUS_8_BIT:
516				default:
517					wdtrenb &= ~target_mask;
518					break;
519				}
520				if (wdtrenb != wdtrenb_orig) {
521					adw_lram_write_16(adw,
522							  ADW_MC_WDTR_ABLE,
523							  wdtrenb);
524					wdtrdone &= ~target_mask;
525					adw_lram_write_16(adw,
526							  ADW_MC_WDTR_DONE,
527							  wdtrdone);
528					/* Wide negotiation forces async */
529					sdtrdone &= ~target_mask;
530					adw_lram_write_16(adw,
531							  ADW_MC_SDTR_DONE,
532							  sdtrdone);
533				}
534			}
535
536			if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
537			 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
538				u_int sdtr_orig;
539				u_int sdtr;
540				u_int sdtrable_orig;
541				u_int sdtrable;
542
543				sdtr = adw_get_chip_sdtr(adw,
544							 ccb->ccb_h.target_id);
545				sdtr_orig = sdtr;
546				sdtrable = adw_lram_read_16(adw,
547							    ADW_MC_SDTR_ABLE);
548				sdtrable_orig = sdtrable;
549
550				if ((spi->valid
551				   & CTS_SPI_VALID_SYNC_RATE) != 0) {
552
553					sdtr =
554					    adw_find_sdtr(adw,
555							  spi->sync_period);
556				}
557
558				if ((spi->valid
559				   & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
560					if (spi->sync_offset == 0)
561						sdtr = ADW_MC_SDTR_ASYNC;
562				}
563
564				if (sdtr == ADW_MC_SDTR_ASYNC)
565					sdtrable &= ~target_mask;
566				else
567					sdtrable |= target_mask;
568				if (sdtr != sdtr_orig
569				 || sdtrable != sdtrable_orig) {
570					adw_set_chip_sdtr(adw,
571							  ccb->ccb_h.target_id,
572							  sdtr);
573					sdtrdone &= ~target_mask;
574					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
575							  sdtrable);
576					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
577							  sdtrdone);
578
579				}
580			}
581		}
582		ccb->ccb_h.status = CAM_REQ_CMP;
583		xpt_done(ccb);
584		break;
585	}
586	case XPT_GET_TRAN_SETTINGS:
587	/* Get default/user set transfer settings for the target */
588	{
589		struct ccb_trans_settings_scsi *scsi;
590		struct ccb_trans_settings_spi *spi;
591		struct	ccb_trans_settings *cts;
592		u_int	target_mask;
593
594		cts = &ccb->cts;
595		target_mask = 0x01 << ccb->ccb_h.target_id;
596		cts->protocol = PROTO_SCSI;
597		cts->protocol_version = SCSI_REV_2;
598		cts->transport = XPORT_SPI;
599		cts->transport_version = 2;
600
601		scsi = &cts->proto_specific.scsi;
602		spi = &cts->xport_specific.spi;
603		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
604			u_int mc_sdtr;
605
606			spi->flags = 0;
607			if ((adw->user_discenb & target_mask) != 0)
608				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
609
610			if ((adw->user_tagenb & target_mask) != 0)
611				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
612
613			if ((adw->user_wdtr & target_mask) != 0)
614				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
615			else
616				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
617
618			mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
619			spi->sync_period = adw_find_period(adw, mc_sdtr);
620			if (spi->sync_period != 0)
621				spi->sync_offset = 15; /* XXX ??? */
622			else
623				spi->sync_offset = 0;
624
625
626		} else {
627			u_int targ_tinfo;
628
629			spi->flags = 0;
630			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
631			  & target_mask) != 0)
632				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
633
634			if ((adw->tagenb & target_mask) != 0)
635				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
636
637			targ_tinfo =
638			    adw_lram_read_16(adw,
639					     ADW_MC_DEVICE_HSHK_CFG_TABLE
640					     + (2 * ccb->ccb_h.target_id));
641
642			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
643				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
644			else
645				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
646
647			spi->sync_period =
648			    adw_hshk_cfg_period_factor(targ_tinfo);
649
650			spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
651			if (spi->sync_period == 0)
652				spi->sync_offset = 0;
653
654			if (spi->sync_offset == 0)
655				spi->sync_period = 0;
656		}
657
658		spi->valid = CTS_SPI_VALID_SYNC_RATE
659			   | CTS_SPI_VALID_SYNC_OFFSET
660			   | CTS_SPI_VALID_BUS_WIDTH
661			   | CTS_SPI_VALID_DISC;
662		scsi->valid = CTS_SCSI_VALID_TQ;
663		ccb->ccb_h.status = CAM_REQ_CMP;
664		xpt_done(ccb);
665		break;
666	}
667	case XPT_CALC_GEOMETRY:
668	{
669		/*
670		 * XXX Use Adaptec translation until I find out how to
671		 *     get this information from the card.
672		 */
673		cam_calc_geometry(&ccb->ccg, /*extended*/1);
674		xpt_done(ccb);
675		break;
676	}
677	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
678	{
679		int failure;
680
681		failure = adw_reset_bus(adw);
682		if (failure != 0) {
683			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
684		} else {
685			if (bootverbose) {
686				xpt_print_path(adw->path);
687				printf("Bus Reset Delivered\n");
688			}
689			ccb->ccb_h.status = CAM_REQ_CMP;
690		}
691		xpt_done(ccb);
692		break;
693	}
694	case XPT_TERM_IO:		/* Terminate the I/O process */
695		/* XXX Implement */
696		ccb->ccb_h.status = CAM_REQ_INVALID;
697		xpt_done(ccb);
698		break;
699	case XPT_PATH_INQ:		/* Path routing inquiry */
700	{
701		struct ccb_pathinq *cpi = &ccb->cpi;
702
703		cpi->version_num = 1;
704		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
705		cpi->target_sprt = 0;
706		cpi->hba_misc = 0;
707		cpi->hba_eng_cnt = 0;
708		cpi->max_target = ADW_MAX_TID;
709		cpi->max_lun = ADW_MAX_LUN;
710		cpi->initiator_id = adw->initiator_id;
711		cpi->bus_id = cam_sim_bus(sim);
712		cpi->base_transfer_speed = 3300;
713		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
714		strlcpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
715		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
716		cpi->unit_number = cam_sim_unit(sim);
717		cpi->transport = XPORT_SPI;
718		cpi->transport_version = 2;
719		cpi->protocol = PROTO_SCSI;
720		cpi->protocol_version = SCSI_REV_2;
721		cpi->ccb_h.status = CAM_REQ_CMP;
722		xpt_done(ccb);
723		break;
724	}
725	default:
726		ccb->ccb_h.status = CAM_REQ_INVALID;
727		xpt_done(ccb);
728		break;
729	}
730}
731
732static void
733adw_poll(struct cam_sim *sim)
734{
735	adw_intr_locked(cam_sim_softc(sim));
736}
737
738static void
739adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
740{
741}
742
743struct adw_softc *
744adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
745{
746	struct	 adw_softc *adw;
747
748	adw = device_get_softc(dev);
749	LIST_INIT(&adw->pending_ccbs);
750	SLIST_INIT(&adw->sg_maps);
751	mtx_init(&adw->lock, "adw", NULL, MTX_DEF);
752	adw->device = dev;
753	adw->regs_res_type = regs_type;
754	adw->regs_res_id = regs_id;
755	adw->regs = regs;
756	return(adw);
757}
758
759void
760adw_free(struct adw_softc *adw)
761{
762	switch (adw->init_level) {
763	case 9:
764	{
765		struct sg_map_node *sg_map;
766
767		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
768			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
769			bus_dmamap_unload(adw->sg_dmat,
770					  sg_map->sg_dmamap);
771			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
772					sg_map->sg_dmamap);
773			free(sg_map, M_DEVBUF);
774		}
775		bus_dma_tag_destroy(adw->sg_dmat);
776	}
777	case 8:
778		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
779	case 7:
780		bus_dmamem_free(adw->acb_dmat, adw->acbs,
781				adw->acb_dmamap);
782	case 6:
783		bus_dma_tag_destroy(adw->acb_dmat);
784	case 5:
785		bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
786	case 4:
787		bus_dmamem_free(adw->carrier_dmat, adw->carriers,
788				adw->carrier_dmamap);
789	case 3:
790		bus_dma_tag_destroy(adw->carrier_dmat);
791	case 2:
792		bus_dma_tag_destroy(adw->buffer_dmat);
793	case 1:
794		bus_dma_tag_destroy(adw->parent_dmat);
795	case 0:
796		break;
797	}
798
799	if (adw->regs != NULL)
800		bus_release_resource(adw->device,
801				     adw->regs_res_type,
802				     adw->regs_res_id,
803				     adw->regs);
804
805	if (adw->irq != NULL)
806		bus_release_resource(adw->device,
807				     adw->irq_res_type,
808				     0, adw->irq);
809
810	if (adw->sim != NULL) {
811		if (adw->path != NULL) {
812			xpt_async(AC_LOST_DEVICE, adw->path, NULL);
813			xpt_free_path(adw->path);
814		}
815		xpt_bus_deregister(cam_sim_path(adw->sim));
816		cam_sim_free(adw->sim, /*free_devq*/TRUE);
817	}
818	mtx_destroy(&adw->lock);
819}
820
821int
822adw_init(struct adw_softc *adw)
823{
824	struct	  adw_eeprom eep_config;
825	u_int	  tid;
826	u_int	  i;
827	u_int16_t checksum;
828	u_int16_t scsicfg1;
829
830	checksum = adw_eeprom_read(adw, &eep_config);
831	bcopy(eep_config.serial_number, adw->serial_number,
832	      sizeof(adw->serial_number));
833	if (checksum != eep_config.checksum) {
834		u_int16_t serial_number[3];
835
836		adw->flags |= ADW_EEPROM_FAILED;
837		device_printf(adw->device,
838		    "EEPROM checksum failed.  Restoring Defaults\n");
839
840	        /*
841		 * Restore the default EEPROM settings.
842		 * Assume the 6 byte board serial number that was read
843		 * from EEPROM is correct even if the EEPROM checksum
844		 * failed.
845		 */
846		bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
847		bcopy(adw->serial_number, eep_config.serial_number,
848		      sizeof(serial_number));
849		adw_eeprom_write(adw, &eep_config);
850	}
851
852	/* Pull eeprom information into our softc. */
853	adw->bios_ctrl = eep_config.bios_ctrl;
854	adw->user_wdtr = eep_config.wdtr_able;
855	for (tid = 0; tid < ADW_MAX_TID; tid++) {
856		u_int	  mc_sdtr;
857		u_int16_t tid_mask;
858
859		tid_mask = 0x1 << tid;
860		if ((adw->features & ADW_ULTRA) != 0) {
861			/*
862			 * Ultra chips store sdtr and ultraenb
863			 * bits in their seeprom, so we must
864			 * construct valid mc_sdtr entries for
865			 * indirectly.
866			 */
867			if (eep_config.sync1.sync_enable & tid_mask) {
868				if (eep_config.sync2.ultra_enable & tid_mask)
869					mc_sdtr = ADW_MC_SDTR_20;
870				else
871					mc_sdtr = ADW_MC_SDTR_10;
872			} else
873				mc_sdtr = ADW_MC_SDTR_ASYNC;
874		} else {
875			switch (ADW_TARGET_GROUP(tid)) {
876			case 3:
877				mc_sdtr = eep_config.sync4.sdtr4;
878				break;
879			case 2:
880				mc_sdtr = eep_config.sync3.sdtr3;
881				break;
882			case 1:
883				mc_sdtr = eep_config.sync2.sdtr2;
884				break;
885			default: /* Shut up compiler */
886			case 0:
887				mc_sdtr = eep_config.sync1.sdtr1;
888				break;
889			}
890			mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
891			mc_sdtr &= 0xFF;
892		}
893		adw_set_user_sdtr(adw, tid, mc_sdtr);
894	}
895	adw->user_tagenb = eep_config.tagqng_able;
896	adw->user_discenb = eep_config.disc_enable;
897	adw->max_acbs = eep_config.max_host_qng;
898	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
899
900	/*
901	 * Sanity check the number of host openings.
902	 */
903	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
904		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
905	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
906        	/* If the value is zero, assume it is uninitialized. */
907		if (adw->max_acbs == 0)
908			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
909		else
910			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
911	}
912
913	scsicfg1 = 0;
914	if ((adw->features & ADW_ULTRA2) != 0) {
915		switch (eep_config.termination_lvd) {
916		default:
917			device_printf(adw->device,
918			    "Invalid EEPROM LVD Termination Settings.\n");
919			device_printf(adw->device,
920			    "Reverting to Automatic LVD Termination\n");
921			/* FALLTHROUGH */
922		case ADW_EEPROM_TERM_AUTO:
923			break;
924		case ADW_EEPROM_TERM_BOTH_ON:
925			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
926			/* FALLTHROUGH */
927		case ADW_EEPROM_TERM_HIGH_ON:
928			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
929			/* FALLTHROUGH */
930		case ADW_EEPROM_TERM_OFF:
931			scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
932			break;
933		}
934	}
935
936	switch (eep_config.termination_se) {
937	default:
938		device_printf(adw->device,
939		    "Invalid SE EEPROM Termination Settings.\n");
940		device_printf(adw->device,
941		    "Reverting to Automatic SE Termination\n");
942		/* FALLTHROUGH */
943	case ADW_EEPROM_TERM_AUTO:
944		break;
945	case ADW_EEPROM_TERM_BOTH_ON:
946		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
947		/* FALLTHROUGH */
948	case ADW_EEPROM_TERM_HIGH_ON:
949		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
950		/* FALLTHROUGH */
951	case ADW_EEPROM_TERM_OFF:
952		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
953		break;
954	}
955	device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id);
956
957	/* DMA tag for mapping buffers into device visible space. */
958	if (bus_dma_tag_create(
959			/* parent	*/ adw->parent_dmat,
960			/* alignment	*/ 1,
961			/* boundary	*/ 0,
962			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
963			/* highaddr	*/ BUS_SPACE_MAXADDR,
964			/* filter	*/ NULL,
965			/* filterarg	*/ NULL,
966			/* maxsize	*/ DFLTPHYS,
967			/* nsegments	*/ ADW_SGSIZE,
968			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
969			/* flags	*/ BUS_DMA_ALLOCNOW,
970			/* lockfunc	*/ busdma_lock_mutex,
971			/* lockarg	*/ &adw->lock,
972			&adw->buffer_dmat) != 0) {
973		return (ENOMEM);
974	}
975
976	adw->init_level++;
977
978	/* DMA tag for our ccb carrier structures */
979	if (bus_dma_tag_create(
980			/* parent	*/ adw->parent_dmat,
981			/* alignment	*/ 0x10,
982			/* boundary	*/ 0,
983			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
984			/* highaddr	*/ BUS_SPACE_MAXADDR,
985			/* filter	*/ NULL,
986			/* filterarg	*/ NULL,
987			/* maxsize	*/ (adw->max_acbs +
988					    ADW_NUM_CARRIER_QUEUES + 1) *
989					    sizeof(struct adw_carrier),
990			/* nsegments	*/ 1,
991			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
992			/* flags	*/ 0,
993			/* lockfunc	*/ NULL,
994			/* lockarg	*/ NULL,
995			&adw->carrier_dmat) != 0) {
996		return (ENOMEM);
997        }
998
999	adw->init_level++;
1000
1001	/* Allocation for our ccb carrier structures */
1002	if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1003			     BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1004		return (ENOMEM);
1005	}
1006
1007	adw->init_level++;
1008
1009	/* And permanently map them */
1010	bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1011			adw->carriers,
1012			(adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1013			 * sizeof(struct adw_carrier),
1014			adwmapmem, &adw->carrier_busbase, /*flags*/0);
1015
1016	/* Clear them out. */
1017	bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1018			     * sizeof(struct adw_carrier));
1019
1020	/* Setup our free carrier list */
1021	adw->free_carriers = adw->carriers;
1022	for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1023		adw->carriers[i].carr_offset =
1024			carriervtobo(adw, &adw->carriers[i]);
1025		adw->carriers[i].carr_ba =
1026			carriervtob(adw, &adw->carriers[i]);
1027		adw->carriers[i].areq_ba = 0;
1028		adw->carriers[i].next_ba =
1029			carriervtobo(adw, &adw->carriers[i+1]);
1030	}
1031	/* Terminal carrier.  Never leaves the freelist */
1032	adw->carriers[i].carr_offset =
1033		carriervtobo(adw, &adw->carriers[i]);
1034	adw->carriers[i].carr_ba =
1035		carriervtob(adw, &adw->carriers[i]);
1036	adw->carriers[i].areq_ba = 0;
1037	adw->carriers[i].next_ba = ~0;
1038
1039	adw->init_level++;
1040
1041	/* DMA tag for our acb structures */
1042	if (bus_dma_tag_create(
1043			/* parent	*/ adw->parent_dmat,
1044			/* alignment	*/ 1,
1045			/* boundary	*/ 0,
1046			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1047			/* highaddr	*/ BUS_SPACE_MAXADDR,
1048			/* filter	*/ NULL,
1049			/* filterarg	*/ NULL,
1050			/* maxsize	*/ adw->max_acbs * sizeof(struct acb),
1051			/* nsegments	*/ 1,
1052			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1053			/* flags	*/ 0,
1054			/* lockfunc	*/ NULL,
1055			/* lockarg	*/ NULL,
1056			&adw->acb_dmat) != 0) {
1057		return (ENOMEM);
1058        }
1059
1060	adw->init_level++;
1061
1062	/* Allocation for our ccbs */
1063	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1064			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1065		return (ENOMEM);
1066
1067	adw->init_level++;
1068
1069	/* And permanently map them */
1070	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1071			adw->acbs,
1072			adw->max_acbs * sizeof(struct acb),
1073			adwmapmem, &adw->acb_busbase, /*flags*/0);
1074
1075	/* Clear them out. */
1076	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1077
1078	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1079	if (bus_dma_tag_create(
1080			/* parent	*/ adw->parent_dmat,
1081			/* alignment	*/ 1,
1082			/* boundary	*/ 0,
1083			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1084			/* highaddr	*/ BUS_SPACE_MAXADDR,
1085			/* filter	*/ NULL,
1086			/* filterarg	*/ NULL,
1087			/* maxsize	*/ PAGE_SIZE,
1088			/* nsegments	*/ 1,
1089			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1090			/* flags	*/ 0,
1091			/* lockfunc	*/ NULL,
1092			/* lockarg	*/ NULL,
1093			&adw->sg_dmat) != 0) {
1094		return (ENOMEM);
1095        }
1096
1097	adw->init_level++;
1098
1099	/* Allocate our first batch of ccbs */
1100	mtx_lock(&adw->lock);
1101	if (adwallocacbs(adw) == 0) {
1102		mtx_unlock(&adw->lock);
1103		return (ENOMEM);
1104	}
1105
1106	if (adw_init_chip(adw, scsicfg1) != 0) {
1107		mtx_unlock(&adw->lock);
1108		return (ENXIO);
1109	}
1110
1111	printf("Queue Depth %d\n", adw->max_acbs);
1112	mtx_unlock(&adw->lock);
1113
1114	return (0);
1115}
1116
1117/*
1118 * Attach all the sub-devices we can find
1119 */
1120int
1121adw_attach(struct adw_softc *adw)
1122{
1123	struct ccb_setasync csa;
1124	struct cam_devq *devq;
1125	int error;
1126
1127	/* Hook up our interrupt handler */
1128	error = bus_setup_intr(adw->device, adw->irq,
1129	    INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw,
1130	    &adw->ih);
1131	if (error != 0) {
1132		device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1133			      error);
1134		return (error);
1135	}
1136
1137	/* Start the Risc processor now that we are fully configured. */
1138	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1139
1140	/*
1141	 * Create the device queue for our SIM.
1142	 */
1143	devq = cam_simq_alloc(adw->max_acbs);
1144	if (devq == NULL)
1145		return (ENOMEM);
1146
1147	/*
1148	 * Construct our SIM entry.
1149	 */
1150	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw,
1151	    device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq);
1152	if (adw->sim == NULL)
1153		return (ENOMEM);
1154
1155	/*
1156	 * Register the bus.
1157	 */
1158	mtx_lock(&adw->lock);
1159	if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) {
1160		cam_sim_free(adw->sim, /*free devq*/TRUE);
1161		error = ENOMEM;
1162		goto fail;
1163	}
1164
1165	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1166			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1167	   == CAM_REQ_CMP) {
1168		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1169		csa.ccb_h.func_code = XPT_SASYNC_CB;
1170		csa.event_enable = AC_LOST_DEVICE;
1171		csa.callback = adw_async;
1172		csa.callback_arg = adw;
1173		xpt_action((union ccb *)&csa);
1174	}
1175
1176	gone_in_dev(adw->device, 12, "adw(4) driver");
1177fail:
1178	mtx_unlock(&adw->lock);
1179	return (error);
1180}
1181
1182void
1183adw_intr(void *arg)
1184{
1185	struct	adw_softc *adw;
1186
1187	adw = arg;
1188	mtx_lock(&adw->lock);
1189	adw_intr_locked(adw);
1190	mtx_unlock(&adw->lock);
1191}
1192
1193void
1194adw_intr_locked(struct adw_softc *adw)
1195{
1196	u_int	int_stat;
1197
1198	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1199		return;
1200
1201	/* Reading the register clears the interrupt. */
1202	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1203
1204	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1205		u_int intrb_code;
1206
1207		/* Async Microcode Event */
1208		intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1209		switch (intrb_code) {
1210		case ADW_ASYNC_CARRIER_READY_FAILURE:
1211			/*
1212			 * The RISC missed our update of
1213			 * the commandq.
1214			 */
1215			if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1216				adw_tickle_risc(adw, ADW_TICKLE_A);
1217			break;
1218    		case ADW_ASYNC_SCSI_BUS_RESET_DET:
1219			/*
1220			 * The firmware detected a SCSI Bus reset.
1221			 */
1222			device_printf(adw->device, "Someone Reset the Bus\n");
1223			adw_handle_bus_reset(adw, /*initiated*/FALSE);
1224			break;
1225		case ADW_ASYNC_RDMA_FAILURE:
1226			/*
1227			 * Handle RDMA failure by resetting the
1228			 * SCSI Bus and chip.
1229			 */
1230#if 0 /* XXX */
1231			AdvResetChipAndSB(adv_dvc_varp);
1232#endif
1233			break;
1234
1235		case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1236			/*
1237			 * Host generated SCSI bus reset occurred.
1238			 */
1239			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1240        		break;
1241    		default:
1242			printf("adw_intr: unknown async code 0x%x\n",
1243			       intrb_code);
1244			break;
1245		}
1246	}
1247
1248	/*
1249	 * Run down the RequestQ.
1250	 */
1251	while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1252		struct adw_carrier *free_carrier;
1253		struct acb *acb;
1254		union ccb *ccb;
1255
1256#if 0
1257		printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1258		       adw->responseq->carr_offset,
1259		       adw->responseq->carr_ba,
1260		       adw->responseq->areq_ba,
1261		       adw->responseq->next_ba);
1262#endif
1263		/*
1264		 * The firmware copies the adw_scsi_req_q.acb_baddr
1265		 * field into the areq_ba field of the carrier.
1266		 */
1267		acb = acbbotov(adw, adw->responseq->areq_ba);
1268
1269		/*
1270		 * The least significant four bits of the next_ba
1271		 * field are used as flags.  Mask them out and then
1272		 * advance through the list.
1273		 */
1274		free_carrier = adw->responseq;
1275		adw->responseq =
1276		    carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1277		free_carrier->next_ba = adw->free_carriers->carr_offset;
1278		adw->free_carriers = free_carrier;
1279
1280		/* Process CCB */
1281		ccb = acb->ccb;
1282		callout_stop(&acb->timer);
1283		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1284			bus_dmasync_op_t op;
1285
1286			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1287				op = BUS_DMASYNC_POSTREAD;
1288			else
1289				op = BUS_DMASYNC_POSTWRITE;
1290			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1291			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1292			ccb->csio.resid = acb->queue.data_cnt;
1293		} else
1294			ccb->csio.resid = 0;
1295
1296		/* Common Cases inline... */
1297		if (acb->queue.host_status == QHSTA_NO_ERROR
1298		 && (acb->queue.done_status == QD_NO_ERROR
1299		  || acb->queue.done_status == QD_WITH_ERROR)) {
1300			ccb->csio.scsi_status = acb->queue.scsi_status;
1301			ccb->ccb_h.status = 0;
1302			switch (ccb->csio.scsi_status) {
1303			case SCSI_STATUS_OK:
1304				ccb->ccb_h.status |= CAM_REQ_CMP;
1305				break;
1306			case SCSI_STATUS_CHECK_COND:
1307			case SCSI_STATUS_CMD_TERMINATED:
1308				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1309				      ccb->csio.sense_len);
1310				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1311				ccb->csio.sense_resid = acb->queue.sense_len;
1312				/* FALLTHROUGH */
1313			default:
1314				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1315						  |  CAM_DEV_QFRZN;
1316				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1317				break;
1318			}
1319			adwfreeacb(adw, acb);
1320			xpt_done(ccb);
1321		} else {
1322			adwprocesserror(adw, acb);
1323		}
1324	}
1325}
1326
1327static void
1328adwprocesserror(struct adw_softc *adw, struct acb *acb)
1329{
1330	union ccb *ccb;
1331
1332	ccb = acb->ccb;
1333	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1334		ccb->ccb_h.status = CAM_REQ_ABORTED;
1335	} else {
1336
1337		switch (acb->queue.host_status) {
1338		case QHSTA_M_SEL_TIMEOUT:
1339			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1340			break;
1341		case QHSTA_M_SXFR_OFF_UFLW:
1342		case QHSTA_M_SXFR_OFF_OFLW:
1343		case QHSTA_M_DATA_OVER_RUN:
1344			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1345			break;
1346		case QHSTA_M_SXFR_DESELECTED:
1347		case QHSTA_M_UNEXPECTED_BUS_FREE:
1348			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1349			break;
1350		case QHSTA_M_SCSI_BUS_RESET:
1351		case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1352			ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1353			break;
1354		case QHSTA_M_BUS_DEVICE_RESET:
1355			ccb->ccb_h.status = CAM_BDR_SENT;
1356			break;
1357		case QHSTA_M_QUEUE_ABORTED:
1358			/* BDR or Bus Reset */
1359			xpt_print_path(adw->path);
1360			printf("Saw Queue Aborted\n");
1361			ccb->ccb_h.status = adw->last_reset;
1362			break;
1363		case QHSTA_M_SXFR_SDMA_ERR:
1364		case QHSTA_M_SXFR_SXFR_PERR:
1365		case QHSTA_M_RDMA_PERR:
1366			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1367			break;
1368		case QHSTA_M_WTM_TIMEOUT:
1369		case QHSTA_M_SXFR_WD_TMO:
1370		{
1371			/* The SCSI bus hung in a phase */
1372			xpt_print_path(adw->path);
1373			printf("Watch Dog timer expired.  Resetting bus\n");
1374			adw_reset_bus(adw);
1375			break;
1376		}
1377		case QHSTA_M_SXFR_XFR_PH_ERR:
1378			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1379			break;
1380		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1381			break;
1382		case QHSTA_M_BAD_CMPL_STATUS_IN:
1383			/* No command complete after a status message */
1384			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1385			break;
1386		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1387			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1388			break;
1389		case QHSTA_M_INVALID_DEVICE:
1390			ccb->ccb_h.status = CAM_PATH_INVALID;
1391			break;
1392		case QHSTA_M_NO_AUTO_REQ_SENSE:
1393			/*
1394			 * User didn't request sense, but we got a
1395			 * check condition.
1396			 */
1397			ccb->csio.scsi_status = acb->queue.scsi_status;
1398			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1399			break;
1400		default:
1401			panic("%s: Unhandled Host status error %x",
1402			    device_get_nameunit(adw->device),
1403			    acb->queue.host_status);
1404			/* NOTREACHED */
1405		}
1406	}
1407	if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1408		if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1409		 || ccb->ccb_h.status == CAM_BDR_SENT)
1410		 	ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1411	}
1412	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1413		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1414		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1415	}
1416	adwfreeacb(adw, acb);
1417	xpt_done(ccb);
1418}
1419
1420static void
1421adwtimeout(void *arg)
1422{
1423	struct acb	     *acb;
1424	union  ccb	     *ccb;
1425	struct adw_softc     *adw;
1426	adw_idle_cmd_status_t status;
1427	int		      target_id;
1428
1429	acb = (struct acb *)arg;
1430	ccb = acb->ccb;
1431	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1432	xpt_print_path(ccb->ccb_h.path);
1433	printf("ACB %p - timed out\n", (void *)acb);
1434
1435	mtx_assert(&adw->lock, MA_OWNED);
1436
1437	if ((acb->state & ACB_ACTIVE) == 0) {
1438		xpt_print_path(ccb->ccb_h.path);
1439		printf("ACB %p - timed out CCB already completed\n",
1440		       (void *)acb);
1441		return;
1442	}
1443
1444	acb->state |= ACB_RECOVERY_ACB;
1445	target_id = ccb->ccb_h.target_id;
1446
1447	/* Attempt a BDR first */
1448	status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1449				   ccb->ccb_h.target_id);
1450	if (status == ADW_IDLE_CMD_SUCCESS) {
1451		device_printf(adw->device,
1452		    "BDR Delivered.  No longer in timeout\n");
1453		adw_handle_device_reset(adw, target_id);
1454	} else {
1455		adw_reset_bus(adw);
1456		xpt_print_path(adw->path);
1457		printf("Bus Reset Delivered.  No longer in timeout\n");
1458	}
1459}
1460
1461static void
1462adw_handle_device_reset(struct adw_softc *adw, u_int target)
1463{
1464	struct cam_path *path;
1465	cam_status error;
1466
1467	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1468				target, CAM_LUN_WILDCARD);
1469
1470	if (error == CAM_REQ_CMP) {
1471		xpt_async(AC_SENT_BDR, path, NULL);
1472		xpt_free_path(path);
1473	}
1474	adw->last_reset = CAM_BDR_SENT;
1475}
1476
1477static void
1478adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1479{
1480	if (initiated) {
1481		/*
1482		 * The microcode currently sets the SCSI Bus Reset signal
1483		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1484		 * command above.  But the SCSI Bus Reset Hold Time in the
1485		 * microcode is not deterministic (it may in fact be for less
1486		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1487		 * the Adv Library sets the SCSI Bus Reset signal for
1488		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1489		 * than 25 us.
1490		 */
1491		u_int scsi_ctrl;
1492
1493	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1494		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1495		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1496		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1497
1498		/*
1499		 * We will perform the async notification when the
1500		 * SCSI Reset interrupt occurs.
1501		 */
1502	} else
1503		xpt_async(AC_BUS_RESET, adw->path, NULL);
1504	adw->last_reset = CAM_SCSI_BUS_RESET;
1505}
1506MODULE_DEPEND(adw, cam, 1, 1, 1);
1507
1508