advansys.c revision 139749
1/*-
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c	ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c	ABP742, ABP752
7 * pci/adv_pci.c	ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 *			ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
9 *			ABP970, ABP970U
10 *
11 * Copyright (c) 1996-2000 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions, and the following disclaimer,
19 *    without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35/*-
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1997 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48#include <sys/cdefs.h>
49__FBSDID("$FreeBSD: head/sys/dev/advansys/advansys.c 139749 2005-01-06 01:43:34Z imp $");
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/malloc.h>
54#include <sys/kernel.h>
55#include <sys/lock.h>
56#include <sys/mutex.h>
57
58#include <machine/bus_pio.h>
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/bus.h>
62#include <sys/rman.h>
63
64#include <cam/cam.h>
65#include <cam/cam_ccb.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_debug.h>
70
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_message.h>
73
74#include <vm/vm.h>
75#include <vm/vm_param.h>
76#include <vm/pmap.h>
77
78#include <dev/advansys/advansys.h>
79
80static void	adv_action(struct cam_sim *sim, union ccb *ccb);
81static void	adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
82				int nsegments, int error);
83static void	adv_poll(struct cam_sim *sim);
84static void	adv_run_doneq(struct adv_softc *adv);
85static struct adv_ccb_info *
86		adv_alloc_ccb_info(struct adv_softc *adv);
87static void	adv_destroy_ccb_info(struct adv_softc *adv,
88				     struct adv_ccb_info *cinfo);
89static __inline struct adv_ccb_info *
90		adv_get_ccb_info(struct adv_softc *adv);
91static __inline void adv_free_ccb_info(struct adv_softc *adv,
92				       struct adv_ccb_info *cinfo);
93static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
94static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
95static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
96
97static __inline struct adv_ccb_info *
98adv_get_ccb_info(struct adv_softc *adv)
99{
100	struct adv_ccb_info *cinfo;
101	int opri;
102
103	opri = splcam();
104	if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
105		SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
106	} else {
107		cinfo = adv_alloc_ccb_info(adv);
108	}
109	splx(opri);
110
111	return (cinfo);
112}
113
114static __inline void
115adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
116{
117	int opri;
118
119	opri = splcam();
120	cinfo->state = ACCB_FREE;
121	SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
122	splx(opri);
123}
124
125static __inline void
126adv_set_state(struct adv_softc *adv, adv_state state)
127{
128	if (adv->state == 0)
129		xpt_freeze_simq(adv->sim, /*count*/1);
130	adv->state |= state;
131}
132
133static __inline void
134adv_clear_state(struct adv_softc *adv, union ccb* ccb)
135{
136	if (adv->state != 0)
137		adv_clear_state_really(adv, ccb);
138}
139
140static void
141adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
142{
143	if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
144		adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
145	if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
146		int openings;
147
148		openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
149		if (openings >= adv->openings_needed) {
150			adv->state &= ~ADV_RESOURCE_SHORTAGE;
151			adv->openings_needed = 0;
152		}
153	}
154
155	if ((adv->state & ADV_IN_TIMEOUT) != 0) {
156		struct adv_ccb_info *cinfo;
157
158		cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
159		if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
160			struct ccb_hdr *ccb_h;
161
162			/*
163			 * We now traverse our list of pending CCBs
164			 * and reinstate their timeouts.
165			 */
166			ccb_h = LIST_FIRST(&adv->pending_ccbs);
167			while (ccb_h != NULL) {
168				ccb_h->timeout_ch =
169				    timeout(adv_timeout, (caddr_t)ccb_h,
170					    (ccb_h->timeout * hz) / 1000);
171				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
172			}
173			adv->state &= ~ADV_IN_TIMEOUT;
174			printf("%s: No longer in timeout\n", adv_name(adv));
175		}
176	}
177	if (adv->state == 0)
178		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
179}
180
181void
182adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
183{
184	bus_addr_t* physaddr;
185
186	physaddr = (bus_addr_t*)arg;
187	*physaddr = segs->ds_addr;
188}
189
190char *
191adv_name(struct adv_softc *adv)
192{
193	static char name[10];
194
195	snprintf(name, sizeof(name), "adv%d", adv->unit);
196	return (name);
197}
198
199static void
200adv_action(struct cam_sim *sim, union ccb *ccb)
201{
202	struct adv_softc *adv;
203
204	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
205
206	adv = (struct adv_softc *)cam_sim_softc(sim);
207
208	switch (ccb->ccb_h.func_code) {
209	/* Common cases first */
210	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
211	{
212		struct	ccb_hdr *ccb_h;
213		struct	ccb_scsiio *csio;
214		struct	adv_ccb_info *cinfo;
215
216		ccb_h = &ccb->ccb_h;
217		csio = &ccb->csio;
218		cinfo = adv_get_ccb_info(adv);
219		if (cinfo == NULL)
220			panic("XXX Handle CCB info error!!!");
221
222		ccb_h->ccb_cinfo_ptr = cinfo;
223		cinfo->ccb = ccb;
224
225		/* Only use S/G if there is a transfer */
226		if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
227			if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
228				/*
229				 * We've been given a pointer
230				 * to a single buffer
231				 */
232				if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
233					int s;
234					int error;
235
236					s = splsoftvm();
237					error =
238					    bus_dmamap_load(adv->buffer_dmat,
239							    cinfo->dmamap,
240							    csio->data_ptr,
241							    csio->dxfer_len,
242							    adv_execute_ccb,
243							    csio, /*flags*/0);
244					if (error == EINPROGRESS) {
245						/*
246						 * So as to maintain ordering,
247						 * freeze the controller queue
248						 * until our mapping is
249						 * returned.
250						 */
251						adv_set_state(adv,
252							      ADV_BUSDMA_BLOCK);
253					}
254					splx(s);
255				} else {
256					struct bus_dma_segment seg;
257
258					/* Pointer to physical buffer */
259					seg.ds_addr =
260					     (bus_addr_t)csio->data_ptr;
261					seg.ds_len = csio->dxfer_len;
262					adv_execute_ccb(csio, &seg, 1, 0);
263				}
264			} else {
265				struct bus_dma_segment *segs;
266				if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
267					panic("adv_setup_data - Physical "
268					      "segment pointers unsupported");
269
270				if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
271					panic("adv_setup_data - Virtual "
272					      "segment addresses unsupported");
273
274				/* Just use the segments provided */
275				segs = (struct bus_dma_segment *)csio->data_ptr;
276				adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
277			}
278		} else {
279			adv_execute_ccb(ccb, NULL, 0, 0);
280		}
281		break;
282	}
283	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
284	case XPT_TARGET_IO:	/* Execute target I/O request */
285	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
286	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
287	case XPT_EN_LUN:		/* Enable LUN as a target */
288	case XPT_ABORT:			/* Abort the specified CCB */
289		/* XXX Implement */
290		ccb->ccb_h.status = CAM_REQ_INVALID;
291		xpt_done(ccb);
292		break;
293	case XPT_SET_TRAN_SETTINGS:
294	{
295		struct	 ccb_trans_settings *cts;
296		target_bit_vector targ_mask;
297		struct adv_transinfo *tconf;
298		u_int	 update_type;
299		int	 s;
300
301		cts = &ccb->cts;
302		targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
303		update_type = 0;
304
305		/*
306		 * The user must specify which type of settings he wishes
307		 * to change.
308		 */
309		if (((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
310		 && ((cts->flags & CCB_TRANS_USER_SETTINGS) == 0)) {
311			tconf = &adv->tinfo[cts->ccb_h.target_id].current;
312			update_type |= ADV_TRANS_GOAL;
313		} else if (((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
314			&& ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0)) {
315			tconf = &adv->tinfo[cts->ccb_h.target_id].user;
316			update_type |= ADV_TRANS_USER;
317		} else {
318			ccb->ccb_h.status = CAM_REQ_INVALID;
319			break;
320		}
321
322		s = splcam();
323
324		if ((update_type & ADV_TRANS_GOAL) != 0) {
325			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
326				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
327					adv->disc_enable |= targ_mask;
328				else
329					adv->disc_enable &= ~targ_mask;
330				adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
331						 adv->disc_enable);
332			}
333
334			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
335				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
336					adv->cmd_qng_enabled |= targ_mask;
337				else
338					adv->cmd_qng_enabled &= ~targ_mask;
339			}
340		}
341
342		if ((update_type & ADV_TRANS_USER) != 0) {
343			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
344				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
345					adv->user_disc_enable |= targ_mask;
346				else
347					adv->user_disc_enable &= ~targ_mask;
348			}
349
350			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
351				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
352					adv->user_cmd_qng_enabled |= targ_mask;
353				else
354					adv->user_cmd_qng_enabled &= ~targ_mask;
355			}
356		}
357
358		/*
359		 * If the user specifies either the sync rate, or offset,
360		 * but not both, the unspecified parameter defaults to its
361		 * current value in transfer negotiations.
362		 */
363		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
364		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
365			/*
366			 * If the user provided a sync rate but no offset,
367			 * use the current offset.
368			 */
369			if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
370				cts->sync_offset = tconf->offset;
371
372			/*
373			 * If the user provided an offset but no sync rate,
374			 * use the current sync rate.
375			 */
376			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
377				cts->sync_period = tconf->period;
378
379			adv_period_offset_to_sdtr(adv, &cts->sync_period,
380						  &cts->sync_offset,
381						  cts->ccb_h.target_id);
382
383			adv_set_syncrate(adv, /*struct cam_path */NULL,
384					 cts->ccb_h.target_id, cts->sync_period,
385					 cts->sync_offset, update_type);
386		}
387
388		splx(s);
389		ccb->ccb_h.status = CAM_REQ_CMP;
390		xpt_done(ccb);
391		break;
392	}
393	case XPT_GET_TRAN_SETTINGS:
394	/* Get default/user set transfer settings for the target */
395	{
396		struct ccb_trans_settings *cts;
397		struct adv_transinfo *tconf;
398		target_bit_vector target_mask;
399		int s;
400
401		cts = &ccb->cts;
402		target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
403
404		cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
405
406		s = splcam();
407		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
408			tconf = &adv->tinfo[cts->ccb_h.target_id].current;
409			if ((adv->disc_enable & target_mask) != 0)
410				cts->flags |= CCB_TRANS_DISC_ENB;
411			if ((adv->cmd_qng_enabled & target_mask) != 0)
412				cts->flags |= CCB_TRANS_TAG_ENB;
413		} else {
414			tconf = &adv->tinfo[cts->ccb_h.target_id].user;
415			if ((adv->user_disc_enable & target_mask) != 0)
416				cts->flags |= CCB_TRANS_DISC_ENB;
417			if ((adv->user_cmd_qng_enabled & target_mask) != 0)
418				cts->flags |= CCB_TRANS_TAG_ENB;
419		}
420
421		cts->sync_period = tconf->period;
422		cts->sync_offset = tconf->offset;
423		splx(s);
424
425		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
426		cts->valid = CCB_TRANS_SYNC_RATE_VALID
427			   | CCB_TRANS_SYNC_OFFSET_VALID
428			   | CCB_TRANS_BUS_WIDTH_VALID
429			   | CCB_TRANS_DISC_VALID
430			   | CCB_TRANS_TQ_VALID;
431		ccb->ccb_h.status = CAM_REQ_CMP;
432		xpt_done(ccb);
433		break;
434	}
435	case XPT_CALC_GEOMETRY:
436	{
437		int	  extended;
438
439		extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
440		cam_calc_geometry(&ccb->ccg, extended);
441		xpt_done(ccb);
442		break;
443	}
444	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
445	{
446		int s;
447
448		s = splcam();
449		adv_stop_execution(adv);
450		adv_reset_bus(adv, /*initiate_reset*/TRUE);
451		adv_start_execution(adv);
452		splx(s);
453
454		ccb->ccb_h.status = CAM_REQ_CMP;
455		xpt_done(ccb);
456		break;
457	}
458	case XPT_TERM_IO:		/* Terminate the I/O process */
459		/* XXX Implement */
460		ccb->ccb_h.status = CAM_REQ_INVALID;
461		xpt_done(ccb);
462		break;
463	case XPT_PATH_INQ:		/* Path routing inquiry */
464	{
465		struct ccb_pathinq *cpi = &ccb->cpi;
466
467		cpi->version_num = 1; /* XXX??? */
468		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
469		cpi->target_sprt = 0;
470		cpi->hba_misc = 0;
471		cpi->hba_eng_cnt = 0;
472		cpi->max_target = 7;
473		cpi->max_lun = 7;
474		cpi->initiator_id = adv->scsi_id;
475		cpi->bus_id = cam_sim_bus(sim);
476		cpi->base_transfer_speed = 3300;
477		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
478		strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
479		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
480		cpi->unit_number = cam_sim_unit(sim);
481		cpi->ccb_h.status = CAM_REQ_CMP;
482		xpt_done(ccb);
483		break;
484	}
485	default:
486		ccb->ccb_h.status = CAM_REQ_INVALID;
487		xpt_done(ccb);
488		break;
489	}
490}
491
492/*
493 * Currently, the output of bus_dmammap_load suits our needs just
494 * fine, but should it change, we'd need to do something here.
495 */
496#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
497
498static void
499adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
500		int nsegments, int error)
501{
502	struct	ccb_scsiio *csio;
503	struct	ccb_hdr *ccb_h;
504	struct	cam_sim *sim;
505        struct	adv_softc *adv;
506	struct	adv_ccb_info *cinfo;
507	struct	adv_scsi_q scsiq;
508	struct	adv_sg_head sghead;
509	int	s;
510
511	csio = (struct ccb_scsiio *)arg;
512	ccb_h = &csio->ccb_h;
513	sim = xpt_path_sim(ccb_h->path);
514	adv = (struct adv_softc *)cam_sim_softc(sim);
515	cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
516
517	/*
518	 * Setup our done routine to release the simq on
519	 * the next ccb that completes.
520	 */
521	if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
522		adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
523
524	if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
525		if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
526			/* XXX Need phystovirt!!!! */
527			/* How about pmap_kenter??? */
528			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
529		} else {
530			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
531		}
532	} else {
533		scsiq.cdbptr = csio->cdb_io.cdb_bytes;
534	}
535	/*
536	 * Build up the request
537	 */
538	scsiq.q1.status = 0;
539	scsiq.q1.q_no = 0;
540	scsiq.q1.cntl = 0;
541	scsiq.q1.sg_queue_cnt = 0;
542	scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
543	scsiq.q1.target_lun = ccb_h->target_lun;
544	scsiq.q1.sense_len = csio->sense_len;
545	scsiq.q1.extra_bytes = 0;
546	scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
547	scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
548					      ccb_h->target_lun);
549	scsiq.q2.flag = 0;
550	scsiq.q2.cdb_len = csio->cdb_len;
551	if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
552		scsiq.q2.tag_code = csio->tag_action;
553	else
554		scsiq.q2.tag_code = 0;
555	scsiq.q2.vm_id = 0;
556
557	if (nsegments != 0) {
558		bus_dmasync_op_t op;
559
560		scsiq.q1.data_addr = dm_segs->ds_addr;
561                scsiq.q1.data_cnt = dm_segs->ds_len;
562		if (nsegments > 1) {
563			scsiq.q1.cntl |= QC_SG_HEAD;
564			sghead.entry_cnt
565			    = sghead.entry_to_copy
566			    = nsegments;
567			sghead.res = 0;
568			sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
569			scsiq.sg_head = &sghead;
570		} else {
571			scsiq.sg_head = NULL;
572		}
573		if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
574			op = BUS_DMASYNC_PREREAD;
575		else
576			op = BUS_DMASYNC_PREWRITE;
577		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
578	} else {
579		scsiq.q1.data_addr = 0;
580		scsiq.q1.data_cnt = 0;
581		scsiq.sg_head = NULL;
582	}
583
584	s = splcam();
585
586	/*
587	 * Last time we need to check if this SCB needs to
588	 * be aborted.
589	 */
590	if (ccb_h->status != CAM_REQ_INPROG) {
591		if (nsegments != 0)
592			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
593		adv_clear_state(adv, (union ccb *)csio);
594		adv_free_ccb_info(adv, cinfo);
595		xpt_done((union ccb *)csio);
596		splx(s);
597		return;
598	}
599
600	if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
601		/* Temporary resource shortage */
602		adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
603		if (nsegments != 0)
604			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
605		csio->ccb_h.status = CAM_REQUEUE_REQ;
606		adv_clear_state(adv, (union ccb *)csio);
607		adv_free_ccb_info(adv, cinfo);
608		xpt_done((union ccb *)csio);
609		splx(s);
610		return;
611	}
612	cinfo->state |= ACCB_ACTIVE;
613	ccb_h->status |= CAM_SIM_QUEUED;
614	LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
615	/* Schedule our timeout */
616	ccb_h->timeout_ch =
617	    timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
618	splx(s);
619}
620
621static struct adv_ccb_info *
622adv_alloc_ccb_info(struct adv_softc *adv)
623{
624	int error;
625	struct adv_ccb_info *cinfo;
626
627	cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
628	cinfo->state = ACCB_FREE;
629	error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
630				  &cinfo->dmamap);
631	if (error != 0) {
632		printf("%s: Unable to allocate CCB info "
633		       "dmamap - error %d\n", adv_name(adv), error);
634		return (NULL);
635	}
636	adv->ccb_infos_allocated++;
637	return (cinfo);
638}
639
640static void
641adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
642{
643	bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
644}
645
646void
647adv_timeout(void *arg)
648{
649	int s;
650	union ccb *ccb;
651	struct adv_softc *adv;
652	struct adv_ccb_info *cinfo;
653
654	ccb = (union ccb *)arg;
655	adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
656	cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
657
658	xpt_print_path(ccb->ccb_h.path);
659	printf("Timed out\n");
660
661	s = splcam();
662	/* Have we been taken care of already?? */
663	if (cinfo == NULL || cinfo->state == ACCB_FREE) {
664		splx(s);
665		return;
666	}
667
668	adv_stop_execution(adv);
669
670	if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
671		struct ccb_hdr *ccb_h;
672
673		/*
674		 * In order to simplify the recovery process, we ask the XPT
675		 * layer to halt the queue of new transactions and we traverse
676		 * the list of pending CCBs and remove their timeouts. This
677		 * means that the driver attempts to clear only one error
678		 * condition at a time.  In general, timeouts that occur
679		 * close together are related anyway, so there is no benefit
680		 * in attempting to handle errors in parrallel.  Timeouts will
681		 * be reinstated when the recovery process ends.
682		 */
683		adv_set_state(adv, ADV_IN_TIMEOUT);
684
685		/* This CCB is the CCB representing our recovery actions */
686		cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
687
688		ccb_h = LIST_FIRST(&adv->pending_ccbs);
689		while (ccb_h != NULL) {
690			untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
691			ccb_h = LIST_NEXT(ccb_h, sim_links.le);
692		}
693
694		/* XXX Should send a BDR */
695		/* Attempt an abort as our first tact */
696		xpt_print_path(ccb->ccb_h.path);
697		printf("Attempting abort\n");
698		adv_abort_ccb(adv, ccb->ccb_h.target_id,
699			      ccb->ccb_h.target_lun, ccb,
700			      CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
701		ccb->ccb_h.timeout_ch =
702		    timeout(adv_timeout, ccb, 2 * hz);
703	} else {
704		/* Our attempt to perform an abort failed, go for a reset */
705		xpt_print_path(ccb->ccb_h.path);
706		printf("Resetting bus\n");
707		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
708		ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
709		adv_reset_bus(adv, /*initiate_reset*/TRUE);
710	}
711	adv_start_execution(adv);
712	splx(s);
713}
714
715struct adv_softc *
716adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
717{
718	struct adv_softc *adv = device_get_softc(dev);
719
720	/*
721	 * Allocate a storage area for us
722	 */
723	LIST_INIT(&adv->pending_ccbs);
724	SLIST_INIT(&adv->free_ccb_infos);
725	adv->dev = dev;
726	adv->unit = device_get_unit(dev);
727	adv->tag = tag;
728	adv->bsh = bsh;
729
730	return(adv);
731}
732
733void
734adv_free(struct adv_softc *adv)
735{
736	switch (adv->init_level) {
737	case 6:
738	{
739		struct adv_ccb_info *cinfo;
740
741		while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
742			SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
743			adv_destroy_ccb_info(adv, cinfo);
744		}
745
746		bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
747	}
748	case 5:
749		bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
750                                adv->sense_dmamap);
751	case 4:
752		bus_dma_tag_destroy(adv->sense_dmat);
753	case 3:
754		bus_dma_tag_destroy(adv->buffer_dmat);
755	case 2:
756		bus_dma_tag_destroy(adv->parent_dmat);
757	case 1:
758		if (adv->ccb_infos != NULL)
759			free(adv->ccb_infos, M_DEVBUF);
760	case 0:
761		break;
762	}
763}
764
765int
766adv_init(struct adv_softc *adv)
767{
768	struct	  adv_eeprom_config eeprom_config;
769	int	  checksum, i;
770	int	  max_sync;
771	u_int16_t config_lsw;
772	u_int16_t config_msw;
773
774	adv_lib_init(adv);
775
776  	/*
777	 * Stop script execution.
778	 */
779	adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
780	adv_stop_execution(adv);
781	if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
782		printf("adv%d: Unable to halt adapter. Initialization"
783		       "failed\n", adv->unit);
784		return (1);
785	}
786	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
787	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
788		printf("adv%d: Unable to set program counter. Initialization"
789		       "failed\n", adv->unit);
790		return (1);
791	}
792
793	config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
794	config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
795
796	if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
797		config_msw &= ~ADV_CFG_MSW_CLR_MASK;
798		/*
799		 * XXX The Linux code flags this as an error,
800		 * but what should we report to the user???
801		 * It seems that clearing the config register
802		 * makes this error recoverable.
803		 */
804		ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
805	}
806
807	/* Suck in the configuration from the EEProm */
808	checksum = adv_get_eeprom_config(adv, &eeprom_config);
809
810	if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
811		/*
812		 * XXX The Linux code sets a warning level for this
813		 * condition, yet nothing of meaning is printed to
814		 * the user.  What does this mean???
815		 */
816		if (adv->chip_version == 3) {
817			if (eeprom_config.cfg_lsw != config_lsw)
818				eeprom_config.cfg_lsw = config_lsw;
819			if (eeprom_config.cfg_msw != config_msw) {
820				eeprom_config.cfg_msw = config_msw;
821			}
822		}
823	}
824	if (checksum == eeprom_config.chksum) {
825
826		/* Range/Sanity checking */
827		if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
828			eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
829		}
830		if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
831			eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
832		}
833		if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
834			eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
835		}
836		if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
837			eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
838		}
839		adv->max_openings = eeprom_config.max_total_qng;
840		adv->user_disc_enable = eeprom_config.disc_enable;
841		adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
842		adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
843		adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
844		EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
845		adv->control = eeprom_config.cntl;
846		for (i = 0; i <= ADV_MAX_TID; i++) {
847			u_int8_t sync_data;
848
849			if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
850				sync_data = 0;
851			else
852				sync_data = eeprom_config.sdtr_data[i];
853			adv_sdtr_to_period_offset(adv,
854						  sync_data,
855						  &adv->tinfo[i].user.period,
856						  &adv->tinfo[i].user.offset,
857						  i);
858		}
859		config_lsw = eeprom_config.cfg_lsw;
860		eeprom_config.cfg_msw = config_msw;
861	} else {
862		u_int8_t sync_data;
863
864		printf("adv%d: Warning EEPROM Checksum mismatch. "
865		       "Using default device parameters\n", adv->unit);
866
867		/* Set reasonable defaults since we can't read the EEPROM */
868		adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
869		adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
870		adv->disc_enable = TARGET_BIT_VECTOR_SET;
871		adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
872		adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
873		adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
874		adv->scsi_id = 7;
875		adv->control = 0xFFFF;
876
877		if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
878			/* Default to no Ultra to support the 3030 */
879			adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
880		sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
881		for (i = 0; i <= ADV_MAX_TID; i++) {
882			adv_sdtr_to_period_offset(adv, sync_data,
883						  &adv->tinfo[i].user.period,
884						  &adv->tinfo[i].user.offset,
885						  i);
886		}
887		config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
888	}
889	config_msw &= ~ADV_CFG_MSW_CLR_MASK;
890	config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
891	if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
892	 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
893		/* 25ns or 10MHz */
894		max_sync = 25;
895	else
896		/* Unlimited */
897		max_sync = 0;
898	for (i = 0; i <= ADV_MAX_TID; i++) {
899		if (adv->tinfo[i].user.period < max_sync)
900			adv->tinfo[i].user.period = max_sync;
901	}
902
903	if (adv_test_external_lram(adv) == 0) {
904		if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
905			eeprom_config.max_total_qng =
906			    ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
907			eeprom_config.max_tag_qng =
908			    ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
909		} else {
910			eeprom_config.cfg_msw |= 0x0800;
911			config_msw |= 0x0800;
912			eeprom_config.max_total_qng =
913			     ADV_MAX_PCI_INRAM_TOTAL_QNG;
914			eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
915		}
916		adv->max_openings = eeprom_config.max_total_qng;
917	}
918	ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
919	ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
920#if 0
921	/*
922	 * Don't write the eeprom data back for now.
923	 * I'd rather not mess up the user's card.  We also don't
924	 * fully sanitize the eeprom settings above for the write-back
925	 * to be 100% correct.
926	 */
927	if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
928		printf("%s: WARNING! Failure writing to EEPROM.\n",
929		       adv_name(adv));
930#endif
931
932	adv_set_chip_scsiid(adv, adv->scsi_id);
933	if (adv_init_lram_and_mcode(adv))
934		return (1);
935
936	adv->disc_enable = adv->user_disc_enable;
937
938	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
939	for (i = 0; i <= ADV_MAX_TID; i++) {
940		/*
941		 * Start off in async mode.
942		 */
943		adv_set_syncrate(adv, /*struct cam_path */NULL,
944				 i, /*period*/0, /*offset*/0,
945				 ADV_TRANS_CUR);
946		/*
947		 * Enable the use of tagged commands on all targets.
948		 * This allows the kernel driver to make up it's own mind
949		 * as it sees fit to tag queue instead of having the
950		 * firmware try and second guess the tag_code settins.
951		 */
952		adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
953				 adv->max_openings);
954	}
955	adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
956	adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
957	printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
958	       adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0)
959			  ? "Ultra SCSI" : "SCSI",
960	       adv->scsi_id, adv->max_openings);
961	return (0);
962}
963
964void
965adv_intr(void *arg)
966{
967	struct	  adv_softc *adv;
968	u_int16_t chipstat;
969	u_int16_t saved_ram_addr;
970	u_int8_t  ctrl_reg;
971	u_int8_t  saved_ctrl_reg;
972	u_int8_t  host_flag;
973
974	adv = (struct adv_softc *)arg;
975
976	chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
977
978	/* Is it for us? */
979	if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
980		return;
981
982	ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
983	saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
984				       ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
985				       ADV_CC_TEST));
986
987	if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
988		printf("Detected Bus Reset\n");
989		adv_reset_bus(adv, /*initiate_reset*/FALSE);
990		return;
991	}
992
993	if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
994
995		saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
996		host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
997		adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
998				 host_flag | ADV_HOST_FLAG_IN_ISR);
999
1000		adv_ack_interrupt(adv);
1001
1002		if ((chipstat & ADV_CSW_HALTED) != 0
1003		 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
1004			adv_isr_chip_halted(adv);
1005			saved_ctrl_reg &= ~ADV_CC_HALT;
1006		} else {
1007			adv_run_doneq(adv);
1008		}
1009		ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
1010#ifdef DIAGNOSTIC
1011		if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
1012			panic("adv_intr: Unable to set LRAM addr");
1013#endif
1014		adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
1015	}
1016
1017	ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
1018}
1019
1020static void
1021adv_run_doneq(struct adv_softc *adv)
1022{
1023	struct adv_q_done_info scsiq;
1024	u_int		  doneq_head;
1025	u_int		  done_qno;
1026
1027	doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
1028	done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
1029				   + ADV_SCSIQ_B_FWD);
1030	while (done_qno != ADV_QLINK_END) {
1031		union ccb* ccb;
1032		struct adv_ccb_info *cinfo;
1033		u_int done_qaddr;
1034		u_int sg_queue_cnt;
1035		int   aborted;
1036
1037		done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1038
1039		/* Pull status from this request */
1040		sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
1041						   adv->max_dma_count);
1042
1043		/* Mark it as free */
1044		adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
1045				 scsiq.q_status & ~(QS_READY|QS_ABORTED));
1046
1047		/* Process request based on retrieved info */
1048		if ((scsiq.cntl & QC_SG_HEAD) != 0) {
1049			u_int i;
1050
1051			/*
1052			 * S/G based request.  Free all of the queue
1053			 * structures that contained S/G information.
1054			 */
1055			for (i = 0; i < sg_queue_cnt; i++) {
1056				done_qno = adv_read_lram_8(adv, done_qaddr
1057							   + ADV_SCSIQ_B_FWD);
1058
1059#ifdef DIAGNOSTIC
1060				if (done_qno == ADV_QLINK_END) {
1061					panic("adv_qdone: Corrupted SG "
1062					      "list encountered");
1063				}
1064#endif
1065				done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1066
1067				/* Mark SG queue as free */
1068				adv_write_lram_8(adv, done_qaddr
1069						 + ADV_SCSIQ_B_STATUS, QS_FREE);
1070			}
1071		} else
1072			sg_queue_cnt = 0;
1073#ifdef DIAGNOSTIC
1074		if (adv->cur_active < (sg_queue_cnt + 1))
1075			panic("adv_qdone: Attempting to free more "
1076			      "queues than are active");
1077#endif
1078		adv->cur_active -= sg_queue_cnt + 1;
1079
1080		aborted = (scsiq.q_status & QS_ABORTED) != 0;
1081
1082		if ((scsiq.q_status != QS_DONE)
1083		 && (scsiq.q_status & QS_ABORTED) == 0)
1084			panic("adv_qdone: completed scsiq with unknown status");
1085
1086		scsiq.remain_bytes += scsiq.extra_bytes;
1087
1088		if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1089		    (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1090			if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1091				scsiq.d3.done_stat = QD_NO_ERROR;
1092				scsiq.d3.host_stat = QHSTA_NO_ERROR;
1093			}
1094		}
1095
1096		cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
1097		ccb = cinfo->ccb;
1098		ccb->csio.resid = scsiq.remain_bytes;
1099		adv_done(adv, ccb,
1100			 scsiq.d3.done_stat, scsiq.d3.host_stat,
1101			 scsiq.d3.scsi_stat, scsiq.q_no);
1102
1103		doneq_head = done_qno;
1104		done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1105	}
1106	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1107}
1108
1109
1110void
1111adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1112	 u_int host_stat, u_int scsi_status, u_int q_no)
1113{
1114	struct	   adv_ccb_info *cinfo;
1115
1116	cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1117	LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1118	untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1119	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1120		bus_dmasync_op_t op;
1121
1122		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1123			op = BUS_DMASYNC_POSTREAD;
1124		else
1125			op = BUS_DMASYNC_POSTWRITE;
1126		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1127		bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1128	}
1129
1130	switch (done_stat) {
1131	case QD_NO_ERROR:
1132		if (host_stat == QHSTA_NO_ERROR) {
1133			ccb->ccb_h.status = CAM_REQ_CMP;
1134			break;
1135		}
1136		xpt_print_path(ccb->ccb_h.path);
1137		printf("adv_done - queue done without error, "
1138		       "but host status non-zero(%x)\n", host_stat);
1139		/*FALLTHROUGH*/
1140	case QD_WITH_ERROR:
1141		switch (host_stat) {
1142		case QHSTA_M_TARGET_STATUS_BUSY:
1143		case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
1144			/*
1145			 * Assume that if we were a tagged transaction
1146			 * the target reported queue full.  Otherwise,
1147			 * report busy.  The firmware really should just
1148			 * pass the original status back up to us even
1149			 * if it thinks the target was in error for
1150			 * returning this status as no other transactions
1151			 * from this initiator are in effect, but this
1152			 * ignores multi-initiator setups and there is
1153			 * evidence that the firmware gets its per-device
1154			 * transaction counts screwed up occassionally.
1155			 */
1156			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1157			if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
1158			 && host_stat != QHSTA_M_TARGET_STATUS_BUSY)
1159				scsi_status = SCSI_STATUS_QUEUE_FULL;
1160			else
1161				scsi_status = SCSI_STATUS_BUSY;
1162			adv_abort_ccb(adv, ccb->ccb_h.target_id,
1163				      ccb->ccb_h.target_lun,
1164				      /*ccb*/NULL, CAM_REQUEUE_REQ,
1165				      /*queued_only*/TRUE);
1166			/*FALLTHROUGH*/
1167		case QHSTA_M_NO_AUTO_REQ_SENSE:
1168		case QHSTA_NO_ERROR:
1169			ccb->csio.scsi_status = scsi_status;
1170			switch (scsi_status) {
1171			case SCSI_STATUS_CHECK_COND:
1172			case SCSI_STATUS_CMD_TERMINATED:
1173				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1174				/* Structure copy */
1175				ccb->csio.sense_data =
1176				    adv->sense_buffers[q_no - 1];
1177				/* FALLTHROUGH */
1178			case SCSI_STATUS_BUSY:
1179			case SCSI_STATUS_RESERV_CONFLICT:
1180			case SCSI_STATUS_QUEUE_FULL:
1181			case SCSI_STATUS_COND_MET:
1182			case SCSI_STATUS_INTERMED:
1183			case SCSI_STATUS_INTERMED_COND_MET:
1184				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1185				break;
1186			case SCSI_STATUS_OK:
1187				ccb->ccb_h.status |= CAM_REQ_CMP;
1188				break;
1189			}
1190			break;
1191		case QHSTA_M_SEL_TIMEOUT:
1192			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1193			break;
1194		case QHSTA_M_DATA_OVER_RUN:
1195			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1196			break;
1197		case QHSTA_M_UNEXPECTED_BUS_FREE:
1198			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1199			break;
1200		case QHSTA_M_BAD_BUS_PHASE_SEQ:
1201			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1202			break;
1203		case QHSTA_M_BAD_CMPL_STATUS_IN:
1204			/* No command complete after a status message */
1205			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1206			break;
1207		case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
1208		case QHSTA_M_WTM_TIMEOUT:
1209		case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
1210			/* The SCSI bus hung in a phase */
1211			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1212			adv_reset_bus(adv, /*initiate_reset*/TRUE);
1213			break;
1214		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1215			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1216			break;
1217		case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
1218		case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
1219		case QHSTA_D_HOST_ABORT_FAILED:
1220		case QHSTA_D_EXE_SCSI_Q_FAILED:
1221		case QHSTA_D_ASPI_NO_BUF_POOL:
1222		case QHSTA_M_BAD_TAG_CODE:
1223		case QHSTA_D_LRAM_CMP_ERROR:
1224		case QHSTA_M_MICRO_CODE_ERROR_HALT:
1225		default:
1226			panic("%s: Unhandled Host status error %x",
1227			      adv_name(adv), host_stat);
1228			/* NOTREACHED */
1229		}
1230		break;
1231
1232	case QD_ABORTED_BY_HOST:
1233		/* Don't clobber any, more explicit, error codes we've set */
1234		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1235			ccb->ccb_h.status = CAM_REQ_ABORTED;
1236		break;
1237
1238	default:
1239		xpt_print_path(ccb->ccb_h.path);
1240		printf("adv_done - queue done with unknown status %x:%x\n",
1241		       done_stat, host_stat);
1242		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1243		break;
1244	}
1245	adv_clear_state(adv, ccb);
1246	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1247	 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1248		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1249		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1250	}
1251	adv_free_ccb_info(adv, cinfo);
1252	/*
1253	 * Null this out so that we catch driver bugs that cause a
1254	 * ccb to be completed twice.
1255	 */
1256	ccb->ccb_h.ccb_cinfo_ptr = NULL;
1257	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1258	xpt_done(ccb);
1259}
1260
1261/*
1262 * Function to poll for command completion when
1263 * interrupts are disabled (crash dumps)
1264 */
1265static void
1266adv_poll(struct cam_sim *sim)
1267{
1268	adv_intr(cam_sim_softc(sim));
1269}
1270
1271/*
1272 * Attach all the sub-devices we can find
1273 */
1274int
1275adv_attach(adv)
1276	struct adv_softc *adv;
1277{
1278	struct ccb_setasync csa;
1279	struct cam_devq *devq;
1280	int max_sg;
1281
1282	/*
1283	 * Allocate an array of ccb mapping structures.  We put the
1284	 * index of the ccb_info structure into the queue representing
1285	 * a transaction and use it for mapping the queue to the
1286	 * upper level SCSI transaction it represents.
1287	 */
1288	adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings,
1289				M_DEVBUF, M_NOWAIT);
1290
1291	if (adv->ccb_infos == NULL)
1292		return (ENOMEM);
1293
1294	adv->init_level++;
1295
1296	/*
1297	 * Create our DMA tags.  These tags define the kinds of device
1298	 * accessible memory allocations and memory mappings we will
1299	 * need to perform during normal operation.
1300	 *
1301	 * Unless we need to further restrict the allocation, we rely
1302	 * on the restrictions of the parent dmat, hence the common
1303	 * use of MAXADDR and MAXSIZE.
1304	 *
1305	 * The ASC boards use chains of "queues" (the transactional
1306	 * resources on the board) to represent long S/G lists.
1307	 * The first queue represents the command and holds a
1308	 * single address and data pair.  The queues that follow
1309	 * can each hold ADV_SG_LIST_PER_Q entries.  Given the
1310	 * total number of queues, we can express the largest
1311	 * transaction we can map.  We reserve a few queues for
1312	 * error recovery.  Take those into account as well.
1313	 *
1314	 * There is a way to take an interrupt to download the
1315	 * next batch of S/G entries if there are more than 255
1316	 * of them (the counter in the queue structure is a u_int8_t).
1317	 * We don't use this feature, so limit the S/G list size
1318	 * accordingly.
1319	 */
1320	max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
1321	if (max_sg > 255)
1322		max_sg = 255;
1323
1324	/* DMA tag for mapping buffers into device visible space. */
1325	if (bus_dma_tag_create(
1326			/* parent	*/ adv->parent_dmat,
1327			/* alignment	*/ 1,
1328			/* boundary	*/ 0,
1329			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1330			/* highaddr	*/ BUS_SPACE_MAXADDR,
1331			/* filter	*/ NULL,
1332			/* filterarg	*/ NULL,
1333			/* maxsize	*/ MAXPHYS,
1334			/* nsegments	*/ max_sg,
1335			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1336			/* flags	*/ BUS_DMA_ALLOCNOW,
1337			/* lockfunc	*/ busdma_lock_mutex,
1338			/* lockarg	*/ &Giant,
1339			&adv->buffer_dmat) != 0) {
1340		return (ENXIO);
1341	}
1342	adv->init_level++;
1343
1344	/* DMA tag for our sense buffers */
1345	if (bus_dma_tag_create(
1346			/* parent	*/ adv->parent_dmat,
1347			/* alignment	*/ 1,
1348			/* boundary	*/ 0,
1349			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1350			/* highaddr	*/ BUS_SPACE_MAXADDR,
1351			/* filter	*/ NULL,
1352			/* filterarg	*/ NULL,
1353			/* maxsize	*/ sizeof(struct scsi_sense_data) *
1354					   adv->max_openings,
1355			/* nsegments	*/ 1,
1356			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1357			/* flags	*/ 0,
1358			/* lockfunc	*/ busdma_lock_mutex,
1359			/* lockarg	*/ &Giant,
1360			&adv->sense_dmat) != 0) {
1361		return (ENXIO);
1362        }
1363
1364	adv->init_level++;
1365
1366	/* Allocation for our sense buffers */
1367	if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1368			     BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1369		return (ENOMEM);
1370	}
1371
1372	adv->init_level++;
1373
1374	/* And permanently map them */
1375	bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1376       			adv->sense_buffers,
1377			sizeof(struct scsi_sense_data)*adv->max_openings,
1378			adv_map, &adv->sense_physbase, /*flags*/0);
1379
1380	adv->init_level++;
1381
1382	/*
1383	 * Fire up the chip
1384	 */
1385	if (adv_start_chip(adv) != 1) {
1386		printf("adv%d: Unable to start on board processor. Aborting.\n",
1387		       adv->unit);
1388		return (ENXIO);
1389	}
1390
1391	/*
1392	 * Create the device queue for our SIM.
1393	 */
1394	devq = cam_simq_alloc(adv->max_openings);
1395	if (devq == NULL)
1396		return (ENOMEM);
1397
1398	/*
1399	 * Construct our SIM entry.
1400	 */
1401	adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1402				 1, adv->max_openings, devq);
1403	if (adv->sim == NULL)
1404		return (ENOMEM);
1405
1406	/*
1407	 * Register the bus.
1408	 *
1409	 * XXX Twin Channel EISA Cards???
1410	 */
1411	if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1412		cam_sim_free(adv->sim, /*free devq*/TRUE);
1413		return (ENXIO);
1414	}
1415
1416	if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1417			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1418	    != CAM_REQ_CMP) {
1419		xpt_bus_deregister(cam_sim_path(adv->sim));
1420		cam_sim_free(adv->sim, /*free devq*/TRUE);
1421		return (ENXIO);
1422	}
1423
1424	xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1425	csa.ccb_h.func_code = XPT_SASYNC_CB;
1426	csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1427	csa.callback = advasync;
1428	csa.callback_arg = adv;
1429	xpt_action((union ccb *)&csa);
1430	return (0);
1431}
1432