adwcam.c revision 281826
1280288Sjkim/*-
2280288Sjkim * CAM SCSI interface for the Advanced Systems Inc.
3280288Sjkim * Second Generation SCSI controllers.
4280288Sjkim *
5280288Sjkim * Product specific probe and attach routines can be found in:
6280288Sjkim *
7280288Sjkim * adw_pci.c	ABP[3]940UW, ABP950UW, ABP3940U2W
8280288Sjkim *
9280288Sjkim * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10280288Sjkim * All rights reserved.
11280288Sjkim *
12280288Sjkim * Redistribution and use in source and binary forms, with or without
13280288Sjkim * modification, are permitted provided that the following conditions
14280288Sjkim * are met:
15280288Sjkim * 1. Redistributions of source code must retain the above copyright
16280288Sjkim *    notice, this list of conditions, and the following disclaimer,
17280288Sjkim *    without modification.
18280288Sjkim * 2. The name of the author may not be used to endorse or promote products
19280288Sjkim *    derived from this software without specific prior written permission.
20280288Sjkim *
21280288Sjkim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22280288Sjkim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23280288Sjkim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24280288Sjkim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25280288Sjkim * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26280288Sjkim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27280288Sjkim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28280288Sjkim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29280288Sjkim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30280288Sjkim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31280288Sjkim * SUCH DAMAGE.
32280288Sjkim */
33280288Sjkim/*
34280288Sjkim * Ported from:
35280288Sjkim * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36280288Sjkim *
37280288Sjkim * Copyright (c) 1995-1998 Advanced System Products, Inc.
38280288Sjkim * All Rights Reserved.
39280288Sjkim *
40280288Sjkim * Redistribution and use in source and binary forms, with or without
41280288Sjkim * modification, are permitted provided that redistributions of source
42280288Sjkim * code retain the above copyright notice and this comment without
43280288Sjkim * modification.
44280288Sjkim */
45280288Sjkim
46280288Sjkim#include <sys/cdefs.h>
47280288Sjkim__FBSDID("$FreeBSD: stable/10/sys/dev/advansys/adwcam.c 281826 2015-04-21 11:27:50Z mav $");
48280288Sjkim
49280288Sjkim#include <sys/param.h>
50280288Sjkim#include <sys/conf.h>
51280288Sjkim#include <sys/systm.h>
52280288Sjkim#include <sys/kernel.h>
53280288Sjkim#include <sys/malloc.h>
54280288Sjkim#include <sys/lock.h>
55280288Sjkim#include <sys/module.h>
56280288Sjkim#include <sys/mutex.h>
57280288Sjkim#include <sys/bus.h>
58280288Sjkim
59280288Sjkim#include <machine/bus.h>
60280288Sjkim#include <machine/resource.h>
61280288Sjkim
62280288Sjkim#include <sys/rman.h>
63280288Sjkim
64280288Sjkim#include <cam/cam.h>
65280288Sjkim#include <cam/cam_ccb.h>
66280288Sjkim#include <cam/cam_sim.h>
67280288Sjkim#include <cam/cam_xpt_sim.h>
68280288Sjkim#include <cam/cam_debug.h>
69280288Sjkim
70280288Sjkim#include <cam/scsi/scsi_message.h>
71280288Sjkim
72280288Sjkim#include <dev/advansys/adwvar.h>
73280288Sjkim
74280288Sjkim/* Definitions for our use of the SIM private CCB area */
75280288Sjkim#define ccb_acb_ptr spriv_ptr0
76280288Sjkim#define ccb_adw_ptr spriv_ptr1
77280288Sjkim
78280288Sjkimstatic __inline struct acb*	adwgetacb(struct adw_softc *adw);
79280288Sjkimstatic __inline void		adwfreeacb(struct adw_softc *adw,
80280288Sjkim					   struct acb *acb);
81280288Sjkim
82280288Sjkimstatic void		adwmapmem(void *arg, bus_dma_segment_t *segs,
83280288Sjkim				  int nseg, int error);
84280288Sjkimstatic struct sg_map_node*
85280288Sjkim			adwallocsgmap(struct adw_softc *adw);
86280288Sjkimstatic int		adwallocacbs(struct adw_softc *adw);
87280288Sjkim
88280288Sjkimstatic void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
89280288Sjkim				      int nseg, int error);
90280288Sjkimstatic void		adw_action(struct cam_sim *sim, union ccb *ccb);
91280288Sjkimstatic void		adw_intr_locked(struct adw_softc *adw);
92280288Sjkimstatic void		adw_poll(struct cam_sim *sim);
93280288Sjkimstatic void		adw_async(void *callback_arg, u_int32_t code,
94280288Sjkim				  struct cam_path *path, void *arg);
95280288Sjkimstatic void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
96280288Sjkimstatic void		adwtimeout(void *arg);
97280288Sjkimstatic void		adw_handle_device_reset(struct adw_softc *adw,
98280288Sjkim						u_int target);
99280288Sjkimstatic void		adw_handle_bus_reset(struct adw_softc *adw,
100280288Sjkim					     int initiated);
101280288Sjkim
102280288Sjkimstatic __inline struct acb*
103280288Sjkimadwgetacb(struct adw_softc *adw)
104280288Sjkim{
105280288Sjkim	struct	acb* acb;
106280288Sjkim
107280288Sjkim	if (!dumping)
108280288Sjkim		mtx_assert(&adw->lock, MA_OWNED);
109280288Sjkim	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
110280288Sjkim		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
111280288Sjkim	} else if (adw->num_acbs < adw->max_acbs) {
112280288Sjkim		adwallocacbs(adw);
113280288Sjkim		acb = SLIST_FIRST(&adw->free_acb_list);
114280288Sjkim		if (acb == NULL)
115280288Sjkim			device_printf(adw->device, "Can't malloc ACB\n");
116280288Sjkim		else {
117280288Sjkim			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
118280288Sjkim		}
119280288Sjkim	}
120280288Sjkim
121280288Sjkim	return (acb);
122280288Sjkim}
123280288Sjkim
124280288Sjkimstatic __inline void
125280288Sjkimadwfreeacb(struct adw_softc *adw, struct acb *acb)
126280288Sjkim{
127280288Sjkim
128280288Sjkim	if (!dumping)
129280288Sjkim		mtx_assert(&adw->lock, MA_OWNED);
130280288Sjkim	if ((acb->state & ACB_ACTIVE) != 0)
131280288Sjkim		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
132280288Sjkim	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
133280288Sjkim		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
134280288Sjkim	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
135280288Sjkim	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
136280288Sjkim		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
137280288Sjkim		adw->state &= ~ADW_RESOURCE_SHORTAGE;
138280288Sjkim	}
139280288Sjkim	acb->state = ACB_FREE;
140280288Sjkim	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
141280288Sjkim}
142280288Sjkim
143280288Sjkimstatic void
144280288Sjkimadwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
145280288Sjkim{
146280288Sjkim	bus_addr_t *busaddrp;
147280288Sjkim
148280288Sjkim	busaddrp = (bus_addr_t *)arg;
149280288Sjkim	*busaddrp = segs->ds_addr;
150280288Sjkim}
151280288Sjkim
152280288Sjkimstatic struct sg_map_node *
153280288Sjkimadwallocsgmap(struct adw_softc *adw)
154280288Sjkim{
155280288Sjkim	struct sg_map_node *sg_map;
156280288Sjkim
157280288Sjkim	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
158280288Sjkim
159280288Sjkim	if (sg_map == NULL)
160280288Sjkim		return (NULL);
161280288Sjkim
162280288Sjkim	/* Allocate S/G space for the next batch of ACBS */
163280288Sjkim	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
164280288Sjkim			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
165280288Sjkim		free(sg_map, M_DEVBUF);
166280288Sjkim		return (NULL);
167280288Sjkim	}
168280288Sjkim
169280288Sjkim	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
170280288Sjkim
171280288Sjkim	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
172280288Sjkim			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
173280288Sjkim
174280288Sjkim	bzero(sg_map->sg_vaddr, PAGE_SIZE);
175280288Sjkim	return (sg_map);
176280288Sjkim}
177280288Sjkim
178280288Sjkim/*
179280288Sjkim * Allocate another chunk of CCB's. Return count of entries added.
180280288Sjkim */
181280288Sjkimstatic int
182280288Sjkimadwallocacbs(struct adw_softc *adw)
183280288Sjkim{
184280288Sjkim	struct acb *next_acb;
185280288Sjkim	struct sg_map_node *sg_map;
186280288Sjkim	bus_addr_t busaddr;
187280288Sjkim	struct adw_sg_block *blocks;
188280288Sjkim	int newcount;
189280288Sjkim	int i;
190280288Sjkim
191280288Sjkim	next_acb = &adw->acbs[adw->num_acbs];
192280288Sjkim	sg_map = adwallocsgmap(adw);
193280288Sjkim
194280288Sjkim	if (sg_map == NULL)
195280288Sjkim		return (0);
196280288Sjkim
197280288Sjkim	blocks = sg_map->sg_vaddr;
198280288Sjkim	busaddr = sg_map->sg_physaddr;
199280288Sjkim
200280288Sjkim	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
201280288Sjkim	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
202280288Sjkim		int error;
203280288Sjkim
204280288Sjkim		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
205280288Sjkim					  &next_acb->dmamap);
206280288Sjkim		if (error != 0)
207280288Sjkim			break;
208280288Sjkim		next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
209280288Sjkim		next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
210280288Sjkim		next_acb->queue.sense_baddr =
211280288Sjkim		    acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
212280288Sjkim		next_acb->sg_blocks = blocks;
213280288Sjkim		next_acb->sg_busaddr = busaddr;
214280288Sjkim		next_acb->state = ACB_FREE;
215280288Sjkim		callout_init_mtx(&next_acb->timer, &adw->lock, 0);
216280288Sjkim		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
217280288Sjkim		blocks += ADW_SG_BLOCKCNT;
218280288Sjkim		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
219280288Sjkim		next_acb++;
220280288Sjkim		adw->num_acbs++;
221280288Sjkim	}
222280288Sjkim	return (i);
223280288Sjkim}
224280288Sjkim
225280288Sjkimstatic void
226280288Sjkimadwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
227280288Sjkim{
228280288Sjkim	struct	 acb *acb;
229280288Sjkim	union	 ccb *ccb;
230280288Sjkim	struct	 adw_softc *adw;
231280288Sjkim
232280288Sjkim	acb = (struct acb *)arg;
233280288Sjkim	ccb = acb->ccb;
234280288Sjkim	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
235280288Sjkim
236280288Sjkim	if (!dumping)
237280288Sjkim		mtx_assert(&adw->lock, MA_OWNED);
238280288Sjkim	if (error != 0) {
239280288Sjkim		if (error != EFBIG)
240280288Sjkim			device_printf(adw->device, "Unexepected error 0x%x "
241280288Sjkim			    "returned from bus_dmamap_load\n", error);
242280288Sjkim		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
243280288Sjkim			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
244280288Sjkim			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
245280288Sjkim		}
246280288Sjkim		adwfreeacb(adw, acb);
247280288Sjkim		xpt_done(ccb);
248280288Sjkim		return;
249280288Sjkim	}
250280288Sjkim
251280288Sjkim	if (nseg != 0) {
252280288Sjkim		bus_dmasync_op_t op;
253280288Sjkim
254280288Sjkim		acb->queue.data_addr = dm_segs[0].ds_addr;
255280288Sjkim		acb->queue.data_cnt = ccb->csio.dxfer_len;
256280288Sjkim		if (nseg > 1) {
257280288Sjkim			struct adw_sg_block *sg_block;
258280288Sjkim			struct adw_sg_elm *sg;
259280288Sjkim			bus_addr_t sg_busaddr;
260280288Sjkim			u_int sg_index;
261280288Sjkim			bus_dma_segment_t *end_seg;
262280288Sjkim
263280288Sjkim			end_seg = dm_segs + nseg;
264280288Sjkim
265280288Sjkim			sg_busaddr = acb->sg_busaddr;
266280288Sjkim			sg_index = 0;
267280288Sjkim			/* Copy the segments into our SG list */
268280288Sjkim			for (sg_block = acb->sg_blocks;; sg_block++) {
269280288Sjkim				u_int i;
270280288Sjkim
271280288Sjkim				sg = sg_block->sg_list;
272280288Sjkim				for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
273280288Sjkim					if (dm_segs >= end_seg)
274280288Sjkim						break;
275280288Sjkim
276280288Sjkim					sg->sg_addr = dm_segs->ds_addr;
277280288Sjkim					sg->sg_count = dm_segs->ds_len;
278280288Sjkim					sg++;
279280288Sjkim					dm_segs++;
280280288Sjkim				}
281280288Sjkim				sg_block->sg_cnt = i;
282280288Sjkim				sg_index += i;
283280288Sjkim				if (dm_segs == end_seg) {
284280288Sjkim					sg_block->sg_busaddr_next = 0;
285280288Sjkim					break;
286280288Sjkim				} else {
287280288Sjkim					sg_busaddr +=
288280288Sjkim					    sizeof(struct adw_sg_block);
289280288Sjkim					sg_block->sg_busaddr_next = sg_busaddr;
290280288Sjkim				}
291280288Sjkim			}
292280288Sjkim			acb->queue.sg_real_addr = acb->sg_busaddr;
293280288Sjkim		} else {
294280288Sjkim			acb->queue.sg_real_addr = 0;
295280288Sjkim		}
296280288Sjkim
297280288Sjkim		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
298280288Sjkim			op = BUS_DMASYNC_PREREAD;
299280288Sjkim		else
300280288Sjkim			op = BUS_DMASYNC_PREWRITE;
301280288Sjkim
302280288Sjkim		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
303280288Sjkim
304280288Sjkim	} else {
305280288Sjkim		acb->queue.data_addr = 0;
306280288Sjkim		acb->queue.data_cnt = 0;
307280288Sjkim		acb->queue.sg_real_addr = 0;
308280288Sjkim	}
309280288Sjkim
310280288Sjkim	/*
311280288Sjkim	 * Last time we need to check if this CCB needs to
312280288Sjkim	 * be aborted.
313280288Sjkim	 */
314280288Sjkim	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
315280288Sjkim		if (nseg != 0)
316280288Sjkim			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
317280288Sjkim		adwfreeacb(adw, acb);
318280288Sjkim		xpt_done(ccb);
319280288Sjkim		return;
320280288Sjkim	}
321280288Sjkim
322280288Sjkim	acb->state |= ACB_ACTIVE;
323280288Sjkim	ccb->ccb_h.status |= CAM_SIM_QUEUED;
324280288Sjkim	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
325280288Sjkim	callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
326280288Sjkim	    adwtimeout, acb, 0);
327280288Sjkim
328280288Sjkim	adw_send_acb(adw, acb, acbvtob(adw, acb));
329280288Sjkim}
330280288Sjkim
331280288Sjkimstatic void
332280288Sjkimadw_action(struct cam_sim *sim, union ccb *ccb)
333280288Sjkim{
334280288Sjkim	struct	adw_softc *adw;
335280288Sjkim
336280288Sjkim	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
337280288Sjkim
338280288Sjkim	adw = (struct adw_softc *)cam_sim_softc(sim);
339280288Sjkim	if (!dumping)
340280288Sjkim		mtx_assert(&adw->lock, MA_OWNED);
341280288Sjkim
342280288Sjkim	switch (ccb->ccb_h.func_code) {
343280288Sjkim	/* Common cases first */
344280288Sjkim	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
345280288Sjkim	{
346280288Sjkim		struct	ccb_scsiio *csio;
347280288Sjkim		struct	ccb_hdr *ccbh;
348280288Sjkim		struct	acb *acb;
349280288Sjkim		int error;
350280288Sjkim
351280288Sjkim		csio = &ccb->csio;
352280288Sjkim		ccbh = &ccb->ccb_h;
353280288Sjkim
354280288Sjkim		/* Max supported CDB length is 12 bytes */
355280288Sjkim		if (csio->cdb_len > 12) {
356280288Sjkim			ccb->ccb_h.status = CAM_REQ_INVALID;
357280288Sjkim			xpt_done(ccb);
358280288Sjkim			return;
359280288Sjkim		}
360280288Sjkim
361280288Sjkim		if ((acb = adwgetacb(adw)) == NULL) {
362280288Sjkim			adw->state |= ADW_RESOURCE_SHORTAGE;
363280288Sjkim			xpt_freeze_simq(sim, /*count*/1);
364280288Sjkim			ccb->ccb_h.status = CAM_REQUEUE_REQ;
365280288Sjkim			xpt_done(ccb);
366280288Sjkim			return;
367280288Sjkim		}
368280288Sjkim
369280288Sjkim		/* Link acb and ccb so we can find one from the other */
370280288Sjkim		acb->ccb = ccb;
371280288Sjkim		ccb->ccb_h.ccb_acb_ptr = acb;
372280288Sjkim		ccb->ccb_h.ccb_adw_ptr = adw;
373280288Sjkim
374280288Sjkim		acb->queue.cntl = 0;
375280288Sjkim		acb->queue.target_cmd = 0;
376280288Sjkim		acb->queue.target_id = ccb->ccb_h.target_id;
377280288Sjkim		acb->queue.target_lun = ccb->ccb_h.target_lun;
378280288Sjkim
379280288Sjkim		acb->queue.mflag = 0;
380280288Sjkim		acb->queue.sense_len =
381280288Sjkim			MIN(csio->sense_len, sizeof(acb->sense_data));
382280288Sjkim		acb->queue.cdb_len = csio->cdb_len;
383280288Sjkim		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
384280288Sjkim			switch (csio->tag_action) {
385280288Sjkim			case MSG_SIMPLE_Q_TAG:
386280288Sjkim				acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
387280288Sjkim				break;
388280288Sjkim			case MSG_HEAD_OF_Q_TAG:
389280288Sjkim				acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
390280288Sjkim				break;
391280288Sjkim			case MSG_ORDERED_Q_TAG:
392280288Sjkim				acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
393280288Sjkim				break;
394280288Sjkim			default:
395280288Sjkim				acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
396280288Sjkim				break;
397280288Sjkim			}
398280288Sjkim		} else
399280288Sjkim			acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
400280288Sjkim
401280288Sjkim		if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
402280288Sjkim			acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
403280288Sjkim
404280288Sjkim		acb->queue.done_status = 0;
405280288Sjkim		acb->queue.scsi_status = 0;
406280288Sjkim		acb->queue.host_status = 0;
407280288Sjkim		acb->queue.sg_wk_ix = 0;
408280288Sjkim		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
409280288Sjkim			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
410280288Sjkim				bcopy(csio->cdb_io.cdb_ptr,
411280288Sjkim				      acb->queue.cdb, csio->cdb_len);
412280288Sjkim			} else {
413280288Sjkim				/* I guess I could map it in... */
414280288Sjkim				ccb->ccb_h.status = CAM_REQ_INVALID;
415280288Sjkim				adwfreeacb(adw, acb);
416280288Sjkim				xpt_done(ccb);
417280288Sjkim				return;
418280288Sjkim			}
419280288Sjkim		} else {
420280288Sjkim			bcopy(csio->cdb_io.cdb_bytes,
421280288Sjkim			      acb->queue.cdb, csio->cdb_len);
422280288Sjkim		}
423280288Sjkim
424280288Sjkim		error = bus_dmamap_load_ccb(adw->buffer_dmat,
425280288Sjkim					    acb->dmamap,
426280288Sjkim					    ccb,
427280288Sjkim					    adwexecuteacb,
428280288Sjkim					    acb, /*flags*/0);
429280288Sjkim		if (error == EINPROGRESS) {
430280288Sjkim			/*
431280288Sjkim			 * So as to maintain ordering, freeze the controller
432280288Sjkim			 * queue until our mapping is returned.
433280288Sjkim			 */
434280288Sjkim			xpt_freeze_simq(sim, 1);
435280288Sjkim			acb->state |= CAM_RELEASE_SIMQ;
436280288Sjkim		}
437280288Sjkim		break;
438280288Sjkim	}
439280288Sjkim	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
440280288Sjkim	{
441280288Sjkim		adw_idle_cmd_status_t status;
442280288Sjkim
443280288Sjkim		status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
444280288Sjkim					   ccb->ccb_h.target_id);
445280288Sjkim		if (status == ADW_IDLE_CMD_SUCCESS) {
446280288Sjkim			ccb->ccb_h.status = CAM_REQ_CMP;
447280288Sjkim			if (bootverbose) {
448280288Sjkim				xpt_print_path(ccb->ccb_h.path);
449280288Sjkim				printf("BDR Delivered\n");
450280288Sjkim			}
451280288Sjkim		} else
452280288Sjkim			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
453280288Sjkim		xpt_done(ccb);
454280288Sjkim		break;
455280288Sjkim	}
456280288Sjkim	case XPT_ABORT:			/* Abort the specified CCB */
457280288Sjkim		/* XXX Implement */
458280288Sjkim		ccb->ccb_h.status = CAM_REQ_INVALID;
459280288Sjkim		xpt_done(ccb);
460280288Sjkim		break;
461280288Sjkim	case XPT_SET_TRAN_SETTINGS:
462280288Sjkim	{
463280288Sjkim		struct ccb_trans_settings_scsi *scsi;
464280288Sjkim		struct ccb_trans_settings_spi *spi;
465280288Sjkim		struct	  ccb_trans_settings *cts;
466280288Sjkim		u_int	  target_mask;
467280288Sjkim
468280288Sjkim		cts = &ccb->cts;
469280288Sjkim		target_mask = 0x01 << ccb->ccb_h.target_id;
470280288Sjkim
471280288Sjkim		scsi = &cts->proto_specific.scsi;
472280288Sjkim		spi = &cts->xport_specific.spi;
473280288Sjkim		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
474280288Sjkim			u_int sdtrdone;
475280288Sjkim
476280288Sjkim			sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
477280288Sjkim			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
478280288Sjkim				u_int discenb;
479280288Sjkim
480280288Sjkim				discenb =
481280288Sjkim				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
482280288Sjkim
483280288Sjkim				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
484280288Sjkim					discenb |= target_mask;
485280288Sjkim				else
486280288Sjkim					discenb &= ~target_mask;
487280288Sjkim
488280288Sjkim				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
489280288Sjkim						  discenb);
490280288Sjkim			}
491280288Sjkim
492280288Sjkim			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
493280288Sjkim
494280288Sjkim				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
495280288Sjkim					adw->tagenb |= target_mask;
496280288Sjkim				else
497280288Sjkim					adw->tagenb &= ~target_mask;
498280288Sjkim			}
499280288Sjkim
500280288Sjkim			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
501280288Sjkim				u_int wdtrenb_orig;
502280288Sjkim				u_int wdtrenb;
503280288Sjkim				u_int wdtrdone;
504280288Sjkim
505280288Sjkim				wdtrenb_orig =
506280288Sjkim				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
507280288Sjkim				wdtrenb = wdtrenb_orig;
508280288Sjkim				wdtrdone = adw_lram_read_16(adw,
509280288Sjkim							    ADW_MC_WDTR_DONE);
510280288Sjkim				switch (spi->bus_width) {
511280288Sjkim				case MSG_EXT_WDTR_BUS_32_BIT:
512280288Sjkim				case MSG_EXT_WDTR_BUS_16_BIT:
513280288Sjkim					wdtrenb |= target_mask;
514280288Sjkim					break;
515280288Sjkim				case MSG_EXT_WDTR_BUS_8_BIT:
516280288Sjkim				default:
517280288Sjkim					wdtrenb &= ~target_mask;
518280288Sjkim					break;
519280288Sjkim				}
520280288Sjkim				if (wdtrenb != wdtrenb_orig) {
521280288Sjkim					adw_lram_write_16(adw,
522280288Sjkim							  ADW_MC_WDTR_ABLE,
523280288Sjkim							  wdtrenb);
524280288Sjkim					wdtrdone &= ~target_mask;
525280288Sjkim					adw_lram_write_16(adw,
526280288Sjkim							  ADW_MC_WDTR_DONE,
527280288Sjkim							  wdtrdone);
528280288Sjkim					/* Wide negotiation forces async */
529280288Sjkim					sdtrdone &= ~target_mask;
530280288Sjkim					adw_lram_write_16(adw,
531280288Sjkim							  ADW_MC_SDTR_DONE,
532280288Sjkim							  sdtrdone);
533280288Sjkim				}
534280288Sjkim			}
535280288Sjkim
536280288Sjkim			if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
537280288Sjkim			 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
538280288Sjkim				u_int sdtr_orig;
539280288Sjkim				u_int sdtr;
540280288Sjkim				u_int sdtrable_orig;
541280288Sjkim				u_int sdtrable;
542280288Sjkim
543280288Sjkim				sdtr = adw_get_chip_sdtr(adw,
544280288Sjkim							 ccb->ccb_h.target_id);
545280288Sjkim				sdtr_orig = sdtr;
546280288Sjkim				sdtrable = adw_lram_read_16(adw,
547280288Sjkim							    ADW_MC_SDTR_ABLE);
548280288Sjkim				sdtrable_orig = sdtrable;
549280288Sjkim
550280288Sjkim				if ((spi->valid
551280288Sjkim				   & CTS_SPI_VALID_SYNC_RATE) != 0) {
552280288Sjkim
553280288Sjkim					sdtr =
554280288Sjkim					    adw_find_sdtr(adw,
555280288Sjkim							  spi->sync_period);
556280288Sjkim				}
557280288Sjkim
558280288Sjkim				if ((spi->valid
559280288Sjkim				   & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
560280288Sjkim					if (spi->sync_offset == 0)
561280288Sjkim						sdtr = ADW_MC_SDTR_ASYNC;
562280288Sjkim				}
563280288Sjkim
564280288Sjkim				if (sdtr == ADW_MC_SDTR_ASYNC)
565280288Sjkim					sdtrable &= ~target_mask;
566280288Sjkim				else
567280288Sjkim					sdtrable |= target_mask;
568280288Sjkim				if (sdtr != sdtr_orig
569280288Sjkim				 || sdtrable != sdtrable_orig) {
570280288Sjkim					adw_set_chip_sdtr(adw,
571280288Sjkim							  ccb->ccb_h.target_id,
572280288Sjkim							  sdtr);
573280288Sjkim					sdtrdone &= ~target_mask;
574280288Sjkim					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
575280288Sjkim							  sdtrable);
576280288Sjkim					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
577280288Sjkim							  sdtrdone);
578280288Sjkim
579280288Sjkim				}
580280288Sjkim			}
581280288Sjkim		}
582280288Sjkim		ccb->ccb_h.status = CAM_REQ_CMP;
583280288Sjkim		xpt_done(ccb);
584280288Sjkim		break;
585280288Sjkim	}
586280288Sjkim	case XPT_GET_TRAN_SETTINGS:
587280288Sjkim	/* Get default/user set transfer settings for the target */
588280288Sjkim	{
589280288Sjkim		struct ccb_trans_settings_scsi *scsi;
590280288Sjkim		struct ccb_trans_settings_spi *spi;
591280288Sjkim		struct	ccb_trans_settings *cts;
592280288Sjkim		u_int	target_mask;
593280288Sjkim
594280288Sjkim		cts = &ccb->cts;
595280288Sjkim		target_mask = 0x01 << ccb->ccb_h.target_id;
596280288Sjkim		cts->protocol = PROTO_SCSI;
597280288Sjkim		cts->protocol_version = SCSI_REV_2;
598280288Sjkim		cts->transport = XPORT_SPI;
599280288Sjkim		cts->transport_version = 2;
600280288Sjkim
601280288Sjkim		scsi = &cts->proto_specific.scsi;
602280288Sjkim		spi = &cts->xport_specific.spi;
603280288Sjkim		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
604280288Sjkim			u_int mc_sdtr;
605280288Sjkim
606280288Sjkim			spi->flags = 0;
607280288Sjkim			if ((adw->user_discenb & target_mask) != 0)
608280288Sjkim				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
609280288Sjkim
610280288Sjkim			if ((adw->user_tagenb & target_mask) != 0)
611280288Sjkim				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
612280288Sjkim
613280288Sjkim			if ((adw->user_wdtr & target_mask) != 0)
614280288Sjkim				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
615280288Sjkim			else
616280288Sjkim				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
617280288Sjkim
618280288Sjkim			mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
619280288Sjkim			spi->sync_period = adw_find_period(adw, mc_sdtr);
620280288Sjkim			if (spi->sync_period != 0)
621280288Sjkim				spi->sync_offset = 15; /* XXX ??? */
622280288Sjkim			else
623280288Sjkim				spi->sync_offset = 0;
624280288Sjkim
625280288Sjkim
626280288Sjkim		} else {
627280288Sjkim			u_int targ_tinfo;
628280288Sjkim
629280288Sjkim			spi->flags = 0;
630280288Sjkim			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
631280288Sjkim			  & target_mask) != 0)
632280288Sjkim				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
633280288Sjkim
634280288Sjkim			if ((adw->tagenb & target_mask) != 0)
635280288Sjkim				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
636280288Sjkim
637280288Sjkim			targ_tinfo =
638280288Sjkim			    adw_lram_read_16(adw,
639280288Sjkim					     ADW_MC_DEVICE_HSHK_CFG_TABLE
640280288Sjkim					     + (2 * ccb->ccb_h.target_id));
641280288Sjkim
642280288Sjkim			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
643280288Sjkim				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
644280288Sjkim			else
645280288Sjkim				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
646280288Sjkim
647280288Sjkim			spi->sync_period =
648280288Sjkim			    adw_hshk_cfg_period_factor(targ_tinfo);
649280288Sjkim
650280288Sjkim			spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
651280288Sjkim			if (spi->sync_period == 0)
652280288Sjkim				spi->sync_offset = 0;
653280288Sjkim
654280288Sjkim			if (spi->sync_offset == 0)
655280288Sjkim				spi->sync_period = 0;
656280288Sjkim		}
657280288Sjkim
658280288Sjkim		spi->valid = CTS_SPI_VALID_SYNC_RATE
659280288Sjkim			   | CTS_SPI_VALID_SYNC_OFFSET
660280288Sjkim			   | CTS_SPI_VALID_BUS_WIDTH
661280288Sjkim			   | CTS_SPI_VALID_DISC;
662280288Sjkim		scsi->valid = CTS_SCSI_VALID_TQ;
663280288Sjkim		ccb->ccb_h.status = CAM_REQ_CMP;
664280288Sjkim		xpt_done(ccb);
665280288Sjkim		break;
666280288Sjkim	}
667280288Sjkim	case XPT_CALC_GEOMETRY:
668280288Sjkim	{
669280288Sjkim		/*
670280288Sjkim		 * XXX Use Adaptec translation until I find out how to
671280288Sjkim		 *     get this information from the card.
672280288Sjkim		 */
673280288Sjkim		cam_calc_geometry(&ccb->ccg, /*extended*/1);
674280288Sjkim		xpt_done(ccb);
675280288Sjkim		break;
676280288Sjkim	}
677280288Sjkim	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
678280288Sjkim	{
679280288Sjkim		int failure;
680280288Sjkim
681280288Sjkim		failure = adw_reset_bus(adw);
682280288Sjkim		if (failure != 0) {
683280288Sjkim			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
684280288Sjkim		} else {
685280288Sjkim			if (bootverbose) {
686280288Sjkim				xpt_print_path(adw->path);
687280288Sjkim				printf("Bus Reset Delivered\n");
688280288Sjkim			}
689280288Sjkim			ccb->ccb_h.status = CAM_REQ_CMP;
690280288Sjkim		}
691280288Sjkim		xpt_done(ccb);
692280288Sjkim		break;
693280288Sjkim	}
694280288Sjkim	case XPT_TERM_IO:		/* Terminate the I/O process */
695280288Sjkim		/* XXX Implement */
696280288Sjkim		ccb->ccb_h.status = CAM_REQ_INVALID;
697280288Sjkim		xpt_done(ccb);
698280288Sjkim		break;
699280288Sjkim	case XPT_PATH_INQ:		/* Path routing inquiry */
700280288Sjkim	{
701280288Sjkim		struct ccb_pathinq *cpi = &ccb->cpi;
702280288Sjkim
703280288Sjkim		cpi->version_num = 1;
704280288Sjkim		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
705280288Sjkim		cpi->target_sprt = 0;
706280288Sjkim		cpi->hba_misc = 0;
707280288Sjkim		cpi->hba_eng_cnt = 0;
708280288Sjkim		cpi->max_target = ADW_MAX_TID;
709280288Sjkim		cpi->max_lun = ADW_MAX_LUN;
710280288Sjkim		cpi->initiator_id = adw->initiator_id;
711280288Sjkim		cpi->bus_id = cam_sim_bus(sim);
712280288Sjkim		cpi->base_transfer_speed = 3300;
713280288Sjkim		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
714280288Sjkim		strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
715280288Sjkim		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
716280288Sjkim		cpi->unit_number = cam_sim_unit(sim);
717280288Sjkim                cpi->transport = XPORT_SPI;
718280288Sjkim                cpi->transport_version = 2;
719280288Sjkim                cpi->protocol = PROTO_SCSI;
720280288Sjkim                cpi->protocol_version = SCSI_REV_2;
721280288Sjkim		cpi->ccb_h.status = CAM_REQ_CMP;
722280288Sjkim		xpt_done(ccb);
723280288Sjkim		break;
724280288Sjkim	}
725280288Sjkim	default:
726280288Sjkim		ccb->ccb_h.status = CAM_REQ_INVALID;
727280288Sjkim		xpt_done(ccb);
728280288Sjkim		break;
729280288Sjkim	}
730280288Sjkim}
731280288Sjkim
732280288Sjkimstatic void
733280288Sjkimadw_poll(struct cam_sim *sim)
734280288Sjkim{
735280288Sjkim	adw_intr_locked(cam_sim_softc(sim));
736280288Sjkim}
737280288Sjkim
738280288Sjkimstatic void
739280288Sjkimadw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
740280288Sjkim{
741280288Sjkim}
742280288Sjkim
743280288Sjkimstruct adw_softc *
744280288Sjkimadw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
745280288Sjkim{
746280288Sjkim	struct	 adw_softc *adw;
747280288Sjkim
748280288Sjkim	adw = device_get_softc(dev);
749280288Sjkim	LIST_INIT(&adw->pending_ccbs);
750280288Sjkim	SLIST_INIT(&adw->sg_maps);
751280288Sjkim	mtx_init(&adw->lock, "adw", NULL, MTX_DEF);
752291721Sjkim	adw->device = dev;
753291721Sjkim	adw->regs_res_type = regs_type;
754291721Sjkim	adw->regs_res_id = regs_id;
755291721Sjkim	adw->regs = regs;
756291721Sjkim	return(adw);
757291721Sjkim}
758291721Sjkim
759291721Sjkimvoid
760291721Sjkimadw_free(struct adw_softc *adw)
761291721Sjkim{
762291721Sjkim	switch (adw->init_level) {
763291721Sjkim	case 9:
764291721Sjkim	{
765291721Sjkim		struct sg_map_node *sg_map;
766291721Sjkim
767291721Sjkim		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
768			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
769			bus_dmamap_unload(adw->sg_dmat,
770					  sg_map->sg_dmamap);
771			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
772					sg_map->sg_dmamap);
773			free(sg_map, M_DEVBUF);
774		}
775		bus_dma_tag_destroy(adw->sg_dmat);
776	}
777	case 8:
778		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
779	case 7:
780		bus_dmamem_free(adw->acb_dmat, adw->acbs,
781				adw->acb_dmamap);
782		bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
783	case 6:
784		bus_dma_tag_destroy(adw->acb_dmat);
785	case 5:
786		bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
787	case 4:
788		bus_dmamem_free(adw->carrier_dmat, adw->carriers,
789				adw->carrier_dmamap);
790		bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
791	case 3:
792		bus_dma_tag_destroy(adw->carrier_dmat);
793	case 2:
794		bus_dma_tag_destroy(adw->buffer_dmat);
795	case 1:
796		bus_dma_tag_destroy(adw->parent_dmat);
797	case 0:
798		break;
799	}
800
801	if (adw->regs != NULL)
802		bus_release_resource(adw->device,
803				     adw->regs_res_type,
804				     adw->regs_res_id,
805				     adw->regs);
806
807	if (adw->irq != NULL)
808		bus_release_resource(adw->device,
809				     adw->irq_res_type,
810				     0, adw->irq);
811
812	if (adw->sim != NULL) {
813		if (adw->path != NULL) {
814			xpt_async(AC_LOST_DEVICE, adw->path, NULL);
815			xpt_free_path(adw->path);
816		}
817		xpt_bus_deregister(cam_sim_path(adw->sim));
818		cam_sim_free(adw->sim, /*free_devq*/TRUE);
819	}
820	mtx_destroy(&adw->lock);
821}
822
823int
824adw_init(struct adw_softc *adw)
825{
826	struct	  adw_eeprom eep_config;
827	u_int	  tid;
828	u_int	  i;
829	u_int16_t checksum;
830	u_int16_t scsicfg1;
831
832	checksum = adw_eeprom_read(adw, &eep_config);
833	bcopy(eep_config.serial_number, adw->serial_number,
834	      sizeof(adw->serial_number));
835	if (checksum != eep_config.checksum) {
836		u_int16_t serial_number[3];
837
838		adw->flags |= ADW_EEPROM_FAILED;
839		device_printf(adw->device,
840		    "EEPROM checksum failed.  Restoring Defaults\n");
841
842	        /*
843		 * Restore the default EEPROM settings.
844		 * Assume the 6 byte board serial number that was read
845		 * from EEPROM is correct even if the EEPROM checksum
846		 * failed.
847		 */
848		bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
849		bcopy(adw->serial_number, eep_config.serial_number,
850		      sizeof(serial_number));
851		adw_eeprom_write(adw, &eep_config);
852	}
853
854	/* Pull eeprom information into our softc. */
855	adw->bios_ctrl = eep_config.bios_ctrl;
856	adw->user_wdtr = eep_config.wdtr_able;
857	for (tid = 0; tid < ADW_MAX_TID; tid++) {
858		u_int	  mc_sdtr;
859		u_int16_t tid_mask;
860
861		tid_mask = 0x1 << tid;
862		if ((adw->features & ADW_ULTRA) != 0) {
863			/*
864			 * Ultra chips store sdtr and ultraenb
865			 * bits in their seeprom, so we must
866			 * construct valid mc_sdtr entries for
867			 * indirectly.
868			 */
869			if (eep_config.sync1.sync_enable & tid_mask) {
870				if (eep_config.sync2.ultra_enable & tid_mask)
871					mc_sdtr = ADW_MC_SDTR_20;
872				else
873					mc_sdtr = ADW_MC_SDTR_10;
874			} else
875				mc_sdtr = ADW_MC_SDTR_ASYNC;
876		} else {
877			switch (ADW_TARGET_GROUP(tid)) {
878			case 3:
879				mc_sdtr = eep_config.sync4.sdtr4;
880				break;
881			case 2:
882				mc_sdtr = eep_config.sync3.sdtr3;
883				break;
884			case 1:
885				mc_sdtr = eep_config.sync2.sdtr2;
886				break;
887			default: /* Shut up compiler */
888			case 0:
889				mc_sdtr = eep_config.sync1.sdtr1;
890				break;
891			}
892			mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
893			mc_sdtr &= 0xFF;
894		}
895		adw_set_user_sdtr(adw, tid, mc_sdtr);
896	}
897	adw->user_tagenb = eep_config.tagqng_able;
898	adw->user_discenb = eep_config.disc_enable;
899	adw->max_acbs = eep_config.max_host_qng;
900	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
901
902	/*
903	 * Sanity check the number of host openings.
904	 */
905	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
906		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
907	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
908        	/* If the value is zero, assume it is uninitialized. */
909		if (adw->max_acbs == 0)
910			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
911		else
912			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
913	}
914
915	scsicfg1 = 0;
916	if ((adw->features & ADW_ULTRA2) != 0) {
917		switch (eep_config.termination_lvd) {
918		default:
919			device_printf(adw->device,
920			    "Invalid EEPROM LVD Termination Settings.\n");
921			device_printf(adw->device,
922			    "Reverting to Automatic LVD Termination\n");
923			/* FALLTHROUGH */
924		case ADW_EEPROM_TERM_AUTO:
925			break;
926		case ADW_EEPROM_TERM_BOTH_ON:
927			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
928			/* FALLTHROUGH */
929		case ADW_EEPROM_TERM_HIGH_ON:
930			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
931			/* FALLTHROUGH */
932		case ADW_EEPROM_TERM_OFF:
933			scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
934			break;
935		}
936	}
937
938	switch (eep_config.termination_se) {
939	default:
940		device_printf(adw->device,
941		    "Invalid SE EEPROM Termination Settings.\n");
942		device_printf(adw->device,
943		    "Reverting to Automatic SE Termination\n");
944		/* FALLTHROUGH */
945	case ADW_EEPROM_TERM_AUTO:
946		break;
947	case ADW_EEPROM_TERM_BOTH_ON:
948		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
949		/* FALLTHROUGH */
950	case ADW_EEPROM_TERM_HIGH_ON:
951		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
952		/* FALLTHROUGH */
953	case ADW_EEPROM_TERM_OFF:
954		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
955		break;
956	}
957	device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id);
958
959	/* DMA tag for mapping buffers into device visible space. */
960	if (bus_dma_tag_create(
961			/* parent	*/ adw->parent_dmat,
962			/* alignment	*/ 1,
963			/* boundary	*/ 0,
964			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
965			/* highaddr	*/ BUS_SPACE_MAXADDR,
966			/* filter	*/ NULL,
967			/* filterarg	*/ NULL,
968			/* maxsize	*/ DFLTPHYS,
969			/* nsegments	*/ ADW_SGSIZE,
970			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
971			/* flags	*/ BUS_DMA_ALLOCNOW,
972			/* lockfunc	*/ busdma_lock_mutex,
973			/* lockarg	*/ &adw->lock,
974			&adw->buffer_dmat) != 0) {
975		return (ENOMEM);
976	}
977
978	adw->init_level++;
979
980	/* DMA tag for our ccb carrier structures */
981	if (bus_dma_tag_create(
982			/* parent	*/ adw->parent_dmat,
983			/* alignment	*/ 0x10,
984			/* boundary	*/ 0,
985			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
986			/* highaddr	*/ BUS_SPACE_MAXADDR,
987			/* filter	*/ NULL,
988			/* filterarg	*/ NULL,
989			/* maxsize	*/ (adw->max_acbs +
990					    ADW_NUM_CARRIER_QUEUES + 1) *
991					    sizeof(struct adw_carrier),
992			/* nsegments	*/ 1,
993			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
994			/* flags	*/ 0,
995			/* lockfunc	*/ NULL,
996			/* lockarg	*/ NULL,
997			&adw->carrier_dmat) != 0) {
998		return (ENOMEM);
999        }
1000
1001	adw->init_level++;
1002
1003	/* Allocation for our ccb carrier structures */
1004	if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1005			     BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1006		return (ENOMEM);
1007	}
1008
1009	adw->init_level++;
1010
1011	/* And permanently map them */
1012	bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1013			adw->carriers,
1014			(adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1015			 * sizeof(struct adw_carrier),
1016			adwmapmem, &adw->carrier_busbase, /*flags*/0);
1017
1018	/* Clear them out. */
1019	bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1020			     * sizeof(struct adw_carrier));
1021
1022	/* Setup our free carrier list */
1023	adw->free_carriers = adw->carriers;
1024	for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1025		adw->carriers[i].carr_offset =
1026			carriervtobo(adw, &adw->carriers[i]);
1027		adw->carriers[i].carr_ba =
1028			carriervtob(adw, &adw->carriers[i]);
1029		adw->carriers[i].areq_ba = 0;
1030		adw->carriers[i].next_ba =
1031			carriervtobo(adw, &adw->carriers[i+1]);
1032	}
1033	/* Terminal carrier.  Never leaves the freelist */
1034	adw->carriers[i].carr_offset =
1035		carriervtobo(adw, &adw->carriers[i]);
1036	adw->carriers[i].carr_ba =
1037		carriervtob(adw, &adw->carriers[i]);
1038	adw->carriers[i].areq_ba = 0;
1039	adw->carriers[i].next_ba = ~0;
1040
1041	adw->init_level++;
1042
1043	/* DMA tag for our acb structures */
1044	if (bus_dma_tag_create(
1045			/* parent	*/ adw->parent_dmat,
1046			/* alignment	*/ 1,
1047			/* boundary	*/ 0,
1048			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1049			/* highaddr	*/ BUS_SPACE_MAXADDR,
1050			/* filter	*/ NULL,
1051			/* filterarg	*/ NULL,
1052			/* maxsize	*/ adw->max_acbs * sizeof(struct acb),
1053			/* nsegments	*/ 1,
1054			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1055			/* flags	*/ 0,
1056			/* lockfunc	*/ NULL,
1057			/* lockarg	*/ NULL,
1058			&adw->acb_dmat) != 0) {
1059		return (ENOMEM);
1060        }
1061
1062	adw->init_level++;
1063
1064	/* Allocation for our ccbs */
1065	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1066			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1067		return (ENOMEM);
1068
1069	adw->init_level++;
1070
1071	/* And permanently map them */
1072	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1073			adw->acbs,
1074			adw->max_acbs * sizeof(struct acb),
1075			adwmapmem, &adw->acb_busbase, /*flags*/0);
1076
1077	/* Clear them out. */
1078	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1079
1080	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1081	if (bus_dma_tag_create(
1082			/* parent	*/ adw->parent_dmat,
1083			/* alignment	*/ 1,
1084			/* boundary	*/ 0,
1085			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1086			/* highaddr	*/ BUS_SPACE_MAXADDR,
1087			/* filter	*/ NULL,
1088			/* filterarg	*/ NULL,
1089			/* maxsize	*/ PAGE_SIZE,
1090			/* nsegments	*/ 1,
1091			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1092			/* flags	*/ 0,
1093			/* lockfunc	*/ NULL,
1094			/* lockarg	*/ NULL,
1095			&adw->sg_dmat) != 0) {
1096		return (ENOMEM);
1097        }
1098
1099	adw->init_level++;
1100
1101	/* Allocate our first batch of ccbs */
1102	mtx_lock(&adw->lock);
1103	if (adwallocacbs(adw) == 0) {
1104		mtx_unlock(&adw->lock);
1105		return (ENOMEM);
1106	}
1107
1108	if (adw_init_chip(adw, scsicfg1) != 0) {
1109		mtx_unlock(&adw->lock);
1110		return (ENXIO);
1111	}
1112
1113	printf("Queue Depth %d\n", adw->max_acbs);
1114	mtx_unlock(&adw->lock);
1115
1116	return (0);
1117}
1118
1119/*
1120 * Attach all the sub-devices we can find
1121 */
1122int
1123adw_attach(struct adw_softc *adw)
1124{
1125	struct ccb_setasync csa;
1126	struct cam_devq *devq;
1127	int error;
1128
1129	/* Hook up our interrupt handler */
1130	error = bus_setup_intr(adw->device, adw->irq,
1131	    INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw,
1132	    &adw->ih);
1133	if (error != 0) {
1134		device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1135			      error);
1136		return (error);
1137	}
1138
1139	/* Start the Risc processor now that we are fully configured. */
1140	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1141
1142	/*
1143	 * Create the device queue for our SIM.
1144	 */
1145	devq = cam_simq_alloc(adw->max_acbs);
1146	if (devq == NULL)
1147		return (ENOMEM);
1148
1149	/*
1150	 * Construct our SIM entry.
1151	 */
1152	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw,
1153	    device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq);
1154	if (adw->sim == NULL)
1155		return (ENOMEM);
1156
1157	/*
1158	 * Register the bus.
1159	 */
1160	mtx_lock(&adw->lock);
1161	if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) {
1162		cam_sim_free(adw->sim, /*free devq*/TRUE);
1163		error = ENOMEM;
1164		goto fail;
1165	}
1166
1167	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1168			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1169	   == CAM_REQ_CMP) {
1170		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1171		csa.ccb_h.func_code = XPT_SASYNC_CB;
1172		csa.event_enable = AC_LOST_DEVICE;
1173		csa.callback = adw_async;
1174		csa.callback_arg = adw;
1175		xpt_action((union ccb *)&csa);
1176	}
1177
1178fail:
1179	mtx_unlock(&adw->lock);
1180	return (error);
1181}
1182
1183void
1184adw_intr(void *arg)
1185{
1186	struct	adw_softc *adw;
1187
1188	adw = arg;
1189	mtx_lock(&adw->lock);
1190	adw_intr_locked(adw);
1191	mtx_unlock(&adw->lock);
1192}
1193
1194void
1195adw_intr_locked(struct adw_softc *adw)
1196{
1197	u_int	int_stat;
1198
1199	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1200		return;
1201
1202	/* Reading the register clears the interrupt. */
1203	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1204
1205	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1206		u_int intrb_code;
1207
1208		/* Async Microcode Event */
1209		intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1210		switch (intrb_code) {
1211		case ADW_ASYNC_CARRIER_READY_FAILURE:
1212			/*
1213			 * The RISC missed our update of
1214			 * the commandq.
1215			 */
1216			if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1217				adw_tickle_risc(adw, ADW_TICKLE_A);
1218			break;
1219    		case ADW_ASYNC_SCSI_BUS_RESET_DET:
1220			/*
1221			 * The firmware detected a SCSI Bus reset.
1222			 */
1223			device_printf(adw->device, "Someone Reset the Bus\n");
1224			adw_handle_bus_reset(adw, /*initiated*/FALSE);
1225			break;
1226		case ADW_ASYNC_RDMA_FAILURE:
1227			/*
1228			 * Handle RDMA failure by resetting the
1229			 * SCSI Bus and chip.
1230			 */
1231#if 0 /* XXX */
1232			AdvResetChipAndSB(adv_dvc_varp);
1233#endif
1234			break;
1235
1236		case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1237			/*
1238			 * Host generated SCSI bus reset occurred.
1239			 */
1240			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1241        		break;
1242    		default:
1243			printf("adw_intr: unknown async code 0x%x\n",
1244			       intrb_code);
1245			break;
1246		}
1247	}
1248
1249	/*
1250	 * Run down the RequestQ.
1251	 */
1252	while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1253		struct adw_carrier *free_carrier;
1254		struct acb *acb;
1255		union ccb *ccb;
1256
1257#if 0
1258		printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1259		       adw->responseq->carr_offset,
1260		       adw->responseq->carr_ba,
1261		       adw->responseq->areq_ba,
1262		       adw->responseq->next_ba);
1263#endif
1264		/*
1265		 * The firmware copies the adw_scsi_req_q.acb_baddr
1266		 * field into the areq_ba field of the carrier.
1267		 */
1268		acb = acbbotov(adw, adw->responseq->areq_ba);
1269
1270		/*
1271		 * The least significant four bits of the next_ba
1272		 * field are used as flags.  Mask them out and then
1273		 * advance through the list.
1274		 */
1275		free_carrier = adw->responseq;
1276		adw->responseq =
1277		    carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1278		free_carrier->next_ba = adw->free_carriers->carr_offset;
1279		adw->free_carriers = free_carrier;
1280
1281		/* Process CCB */
1282		ccb = acb->ccb;
1283		callout_stop(&acb->timer);
1284		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1285			bus_dmasync_op_t op;
1286
1287			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1288				op = BUS_DMASYNC_POSTREAD;
1289			else
1290				op = BUS_DMASYNC_POSTWRITE;
1291			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1292			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1293			ccb->csio.resid = acb->queue.data_cnt;
1294		} else
1295			ccb->csio.resid = 0;
1296
1297		/* Common Cases inline... */
1298		if (acb->queue.host_status == QHSTA_NO_ERROR
1299		 && (acb->queue.done_status == QD_NO_ERROR
1300		  || acb->queue.done_status == QD_WITH_ERROR)) {
1301			ccb->csio.scsi_status = acb->queue.scsi_status;
1302			ccb->ccb_h.status = 0;
1303			switch (ccb->csio.scsi_status) {
1304			case SCSI_STATUS_OK:
1305				ccb->ccb_h.status |= CAM_REQ_CMP;
1306				break;
1307			case SCSI_STATUS_CHECK_COND:
1308			case SCSI_STATUS_CMD_TERMINATED:
1309				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1310				      ccb->csio.sense_len);
1311				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1312				ccb->csio.sense_resid = acb->queue.sense_len;
1313				/* FALLTHROUGH */
1314			default:
1315				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1316						  |  CAM_DEV_QFRZN;
1317				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1318				break;
1319			}
1320			adwfreeacb(adw, acb);
1321			xpt_done(ccb);
1322		} else {
1323			adwprocesserror(adw, acb);
1324		}
1325	}
1326}
1327
1328static void
1329adwprocesserror(struct adw_softc *adw, struct acb *acb)
1330{
1331	union ccb *ccb;
1332
1333	ccb = acb->ccb;
1334	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1335		ccb->ccb_h.status = CAM_REQ_ABORTED;
1336	} else {
1337
1338		switch (acb->queue.host_status) {
1339		case QHSTA_M_SEL_TIMEOUT:
1340			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1341			break;
1342		case QHSTA_M_SXFR_OFF_UFLW:
1343		case QHSTA_M_SXFR_OFF_OFLW:
1344		case QHSTA_M_DATA_OVER_RUN:
1345			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1346			break;
1347		case QHSTA_M_SXFR_DESELECTED:
1348		case QHSTA_M_UNEXPECTED_BUS_FREE:
1349			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1350			break;
1351		case QHSTA_M_SCSI_BUS_RESET:
1352		case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1353			ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1354			break;
1355		case QHSTA_M_BUS_DEVICE_RESET:
1356			ccb->ccb_h.status = CAM_BDR_SENT;
1357			break;
1358		case QHSTA_M_QUEUE_ABORTED:
1359			/* BDR or Bus Reset */
1360			xpt_print_path(adw->path);
1361			printf("Saw Queue Aborted\n");
1362			ccb->ccb_h.status = adw->last_reset;
1363			break;
1364		case QHSTA_M_SXFR_SDMA_ERR:
1365		case QHSTA_M_SXFR_SXFR_PERR:
1366		case QHSTA_M_RDMA_PERR:
1367			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1368			break;
1369		case QHSTA_M_WTM_TIMEOUT:
1370		case QHSTA_M_SXFR_WD_TMO:
1371		{
1372			/* The SCSI bus hung in a phase */
1373			xpt_print_path(adw->path);
1374			printf("Watch Dog timer expired.  Resetting bus\n");
1375			adw_reset_bus(adw);
1376			break;
1377		}
1378		case QHSTA_M_SXFR_XFR_PH_ERR:
1379			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1380			break;
1381		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1382			break;
1383		case QHSTA_M_BAD_CMPL_STATUS_IN:
1384			/* No command complete after a status message */
1385			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1386			break;
1387		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1388			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1389			break;
1390		case QHSTA_M_INVALID_DEVICE:
1391			ccb->ccb_h.status = CAM_PATH_INVALID;
1392			break;
1393		case QHSTA_M_NO_AUTO_REQ_SENSE:
1394			/*
1395			 * User didn't request sense, but we got a
1396			 * check condition.
1397			 */
1398			ccb->csio.scsi_status = acb->queue.scsi_status;
1399			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1400			break;
1401		default:
1402			panic("%s: Unhandled Host status error %x",
1403			    device_get_nameunit(adw->device),
1404			    acb->queue.host_status);
1405			/* NOTREACHED */
1406		}
1407	}
1408	if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1409		if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1410		 || ccb->ccb_h.status == CAM_BDR_SENT)
1411		 	ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1412	}
1413	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1414		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1415		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1416	}
1417	adwfreeacb(adw, acb);
1418	xpt_done(ccb);
1419}
1420
1421static void
1422adwtimeout(void *arg)
1423{
1424	struct acb	     *acb;
1425	union  ccb	     *ccb;
1426	struct adw_softc     *adw;
1427	adw_idle_cmd_status_t status;
1428	int		      target_id;
1429
1430	acb = (struct acb *)arg;
1431	ccb = acb->ccb;
1432	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1433	xpt_print_path(ccb->ccb_h.path);
1434	printf("ACB %p - timed out\n", (void *)acb);
1435
1436	mtx_assert(&adw->lock, MA_OWNED);
1437
1438	if ((acb->state & ACB_ACTIVE) == 0) {
1439		xpt_print_path(ccb->ccb_h.path);
1440		printf("ACB %p - timed out CCB already completed\n",
1441		       (void *)acb);
1442		return;
1443	}
1444
1445	acb->state |= ACB_RECOVERY_ACB;
1446	target_id = ccb->ccb_h.target_id;
1447
1448	/* Attempt a BDR first */
1449	status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1450				   ccb->ccb_h.target_id);
1451	if (status == ADW_IDLE_CMD_SUCCESS) {
1452		device_printf(adw->device,
1453		    "BDR Delivered.  No longer in timeout\n");
1454		adw_handle_device_reset(adw, target_id);
1455	} else {
1456		adw_reset_bus(adw);
1457		xpt_print_path(adw->path);
1458		printf("Bus Reset Delivered.  No longer in timeout\n");
1459	}
1460}
1461
1462static void
1463adw_handle_device_reset(struct adw_softc *adw, u_int target)
1464{
1465	struct cam_path *path;
1466	cam_status error;
1467
1468	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1469				target, CAM_LUN_WILDCARD);
1470
1471	if (error == CAM_REQ_CMP) {
1472		xpt_async(AC_SENT_BDR, path, NULL);
1473		xpt_free_path(path);
1474	}
1475	adw->last_reset = CAM_BDR_SENT;
1476}
1477
1478static void
1479adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1480{
1481	if (initiated) {
1482		/*
1483		 * The microcode currently sets the SCSI Bus Reset signal
1484		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1485		 * command above.  But the SCSI Bus Reset Hold Time in the
1486		 * microcode is not deterministic (it may in fact be for less
1487		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1488		 * the Adv Library sets the SCSI Bus Reset signal for
1489		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1490		 * than 25 us.
1491		 */
1492		u_int scsi_ctrl;
1493
1494	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1495		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1496		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1497		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1498
1499		/*
1500		 * We will perform the async notification when the
1501		 * SCSI Reset interrupt occurs.
1502		 */
1503	} else
1504		xpt_async(AC_BUS_RESET, adw->path, NULL);
1505	adw->last_reset = CAM_SCSI_BUS_RESET;
1506}
1507MODULE_DEPEND(adw, cam, 1, 1, 1);
1508
1509