advansys.c revision 39505
1/*
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c	ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c	ABP742, ABP752
7 * pci/adv_pci.c	ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 *			ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
9 *			ABP970, ABP970U
10 *
11 * Copyright (c) 1996-1998 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions, and the following disclaimer,
19 *    without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *      $Id: advansys.c,v 1.1 1998/09/15 07:03:33 gibbs Exp $
36 */
37/*
38 * Ported from:
39 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
40 *
41 * Copyright (c) 1995-1997 Advanced System Products, Inc.
42 * All Rights Reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that redistributions of source
46 * code retain the above copyright notice and this comment without
47 * modification.
48 */
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/malloc.h>
53#include <sys/buf.h>
54#include <sys/kernel.h>
55
56#include <machine/bus_pio.h>
57#include <machine/bus.h>
58#include <machine/clock.h>
59
60#include <cam/cam.h>
61#include <cam/cam_ccb.h>
62#include <cam/cam_sim.h>
63#include <cam/cam_xpt_sim.h>
64#include <cam/cam_xpt_periph.h>
65#include <cam/cam_debug.h>
66
67#include <cam/scsi/scsi_all.h>
68#include <cam/scsi/scsi_message.h>
69
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <vm/pmap.h>
73
74#include <dev/advansys/advansys.h>
75
76u_long adv_unit;
77
78static void	advminphys(struct buf *bp);
79static void	adv_action(struct cam_sim *sim, union ccb *ccb);
80static void	adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
81				int nsegments, int error);
82static void	adv_poll(struct cam_sim *sim);
83static void	adv_run_doneq(struct adv_softc *adv);
84static struct adv_ccb_info *
85		adv_alloc_ccb_info(struct adv_softc *adv);
86static void	adv_destroy_ccb_info(struct adv_softc *adv,
87				  struct adv_ccb_info *cinfo);
88static __inline struct adv_ccb_info *
89		adv_get_ccb_info(struct adv_softc *adv);
90static __inline void adv_free_ccb_info(struct adv_softc *adv,
91				       struct adv_ccb_info *cinfo);
92
93
94struct adv_softc *advsoftcs[NADV];   /* XXX Config should handle this */
95
96static __inline struct adv_ccb_info *
97adv_get_ccb_info(struct adv_softc *adv)
98{
99	struct adv_ccb_info *cinfo;
100	int opri;
101
102	opri = splcam();
103	if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
104		SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
105	} else {
106		cinfo = adv_alloc_ccb_info(adv);
107	}
108	splx(opri);
109
110	return (cinfo);
111}
112
113static __inline void
114adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
115{
116	int opri;
117
118	opri = splcam();
119	cinfo->state = ACCB_FREE;
120	SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
121	splx(opri);
122}
123
124void
125adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
126{
127	bus_addr_t* physaddr;
128
129	physaddr = (bus_addr_t*)arg;
130	*physaddr = segs->ds_addr;
131}
132
133char *
134adv_name(struct adv_softc *adv)
135{
136	static char name[10];
137
138	sprintf(name, "adv%d", adv->unit);
139	return (name);
140}
141
142static void
143adv_action(struct cam_sim *sim, union ccb *ccb)
144{
145	struct adv_softc *adv;
146
147	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
148
149	adv = (struct adv_softc *)cam_sim_softc(sim);
150
151	switch (ccb->ccb_h.func_code) {
152	/* Common cases first */
153	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
154	{
155		struct	ccb_hdr *ccb_h;
156		struct	ccb_scsiio *csio;
157		struct	adv_ccb_info *cinfo;
158
159		ccb_h = &ccb->ccb_h;
160		csio = &ccb->csio;
161		cinfo = adv_get_ccb_info(adv);
162		cinfo->state |= ACCB_ACTIVE;
163
164		if (cinfo == NULL)
165			panic("XXX Handle CCB info error!!!");
166
167		ccb_h->ccb_cinfo_ptr = cinfo;
168
169		/* Only use S/G if there is a transfer */
170		if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
171			if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
172				/*
173				 * We've been given a pointer
174				 * to a single buffer
175				 */
176				if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
177					int s;
178					int error;
179
180					s = splsoftvm();
181					error =
182					    bus_dmamap_load(adv->buffer_dmat,
183							    cinfo->dmamap,
184							    csio->data_ptr,
185							    csio->dxfer_len,
186							    adv_execute_ccb,
187							    csio, /*flags*/0);
188					if (error == EINPROGRESS) {
189						/*
190						 * So as to maintain ordering,
191						 * freeze the controller queue
192						 * until our mapping is
193						 * returned.
194						 */
195						xpt_freeze_simq(adv->sim,
196								/*count*/1);
197						cinfo->state |=
198						    ACCB_RELEASE_SIMQ;
199					}
200					splx(s);
201				} else {
202					struct bus_dma_segment seg;
203
204					/* Pointer to physical buffer */
205					seg.ds_addr =
206					     (bus_addr_t)csio->data_ptr;
207					seg.ds_len = csio->dxfer_len;
208					adv_execute_ccb(csio, &seg, 1, 0);
209				}
210			} else {
211				struct bus_dma_segment *segs;
212				if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
213					panic("adv_setup_data - Physical "
214					      "segment pointers unsupported");
215
216				if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
217					panic("adv_setup_data - Virtual "
218					      "segment addresses unsupported");
219
220				/* Just use the segments provided */
221				segs = (struct bus_dma_segment *)csio->data_ptr;
222				adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
223			}
224		} else {
225			adv_execute_ccb(ccb, NULL, 0, 0);
226		}
227		break;
228	}
229	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
230	case XPT_TARGET_IO:	/* Execute target I/O request */
231	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
232	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
233	case XPT_EN_LUN:		/* Enable LUN as a target */
234	case XPT_ABORT:			/* Abort the specified CCB */
235		/* XXX Implement */
236		ccb->ccb_h.status = CAM_REQ_INVALID;
237		xpt_done(ccb);
238		break;
239	case XPT_SET_TRAN_SETTINGS:
240	{
241		struct	 ccb_trans_settings *cts;
242		u_int	 offset;
243		target_bit_vector targ_mask;
244		struct adv_target_transinfo *tconf;
245		u_int	 update_type;
246		int	 s;
247
248		cts = &ccb->cts;
249		targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
250		tconf = &adv->tinfo[cts->ccb_h.target_id];
251		update_type = 0;
252		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
253			update_type |= ADV_TRANS_GOAL;
254		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
255			update_type |= ADV_TRANS_USER;
256
257		s = splcam();
258
259		if ((update_type & ADV_TRANS_GOAL) != 0) {
260			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
261				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
262					adv->disc_enable |= targ_mask;
263				else
264					adv->disc_enable &= ~targ_mask;
265				adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
266						 adv->disc_enable);
267			}
268
269			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
270				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
271					adv->cmd_qng_enabled |= targ_mask;
272				else
273					adv->cmd_qng_enabled &= ~targ_mask;
274			}
275		}
276
277		if ((update_type & ADV_TRANS_USER) != 0) {
278			if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
279				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
280					adv->user_disc_enable |= targ_mask;
281				else
282					adv->user_disc_enable &= ~targ_mask;
283			}
284
285			if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
286				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
287					adv->user_cmd_qng_enabled |= targ_mask;
288				else
289					adv->user_cmd_qng_enabled &= ~targ_mask;
290			}
291		}
292
293		if ((cts->valid &  CCB_TRANS_SYNC_RATE_VALID) != 0) {
294			if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
295				cts->sync_offset = 0;
296
297			adv_period_offset_to_sdtr(adv, &cts->sync_period,
298						  &cts->sync_offset,
299						  cts->ccb_h.target_id);
300
301			adv_set_syncrate(adv, /*struct cam_path */NULL,
302					 cts->ccb_h.target_id, cts->sync_period,
303					 cts->sync_offset, update_type);
304		}
305		splx(s);
306		ccb->ccb_h.status = CAM_REQ_CMP;
307		xpt_done(ccb);
308		break;
309	}
310	case XPT_GET_TRAN_SETTINGS:
311	/* Get default/user set transfer settings for the target */
312	{
313		struct ccb_trans_settings *cts;
314		struct adv_transinfo *tconf;
315		target_bit_vector target_mask;
316		int s;
317
318		cts = &ccb->cts;
319		target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
320
321		cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
322
323		s = splcam();
324		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
325			tconf = &adv->tinfo[cts->ccb_h.target_id].current;
326			if ((adv->disc_enable & target_mask) != 0)
327				cts->flags |= CCB_TRANS_DISC_ENB;
328			if ((adv->cmd_qng_enabled & target_mask) != 0)
329				cts->flags |= CCB_TRANS_TAG_ENB;
330		} else {
331			tconf = &adv->tinfo[cts->ccb_h.target_id].user;
332			if ((adv->user_disc_enable & target_mask) != 0)
333				cts->flags |= CCB_TRANS_DISC_ENB;
334			if ((adv->user_cmd_qng_enabled & target_mask) != 0)
335				cts->flags |= CCB_TRANS_TAG_ENB;
336		}
337
338		cts->sync_period = tconf->period;
339		cts->sync_offset = tconf->offset;
340		splx(s);
341
342		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
343		cts->valid = CCB_TRANS_SYNC_RATE_VALID
344			   | CCB_TRANS_SYNC_OFFSET_VALID
345			   | CCB_TRANS_BUS_WIDTH_VALID
346			   | CCB_TRANS_DISC_VALID
347			   | CCB_TRANS_TQ_VALID;
348		ccb->ccb_h.status = CAM_REQ_CMP;
349		xpt_done(ccb);
350		break;
351	}
352	case XPT_CALC_GEOMETRY:
353	{
354		struct	  ccb_calc_geometry *ccg;
355		u_int32_t size_mb;
356		u_int32_t secs_per_cylinder;
357		int	  extended;
358
359		ccg = &ccb->ccg;
360		size_mb = ccg->volume_size
361			/ ((1024L * 1024L) / ccg->block_size);
362		extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
363
364		if (size_mb > 1024 && extended) {
365			ccg->heads = 255;
366			ccg->secs_per_track = 63;
367		} else {
368			ccg->heads = 64;
369			ccg->secs_per_track = 32;
370		}
371		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
372		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
373		ccb->ccb_h.status = CAM_REQ_CMP;
374		xpt_done(ccb);
375		break;
376	}
377	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
378	{
379		int s;
380
381		s = splcam();
382		adv_stop_execution(adv);
383		adv_reset_bus(adv);
384		adv_start_execution(adv);
385		splx(s);
386
387		ccb->ccb_h.status = CAM_REQ_CMP;
388		xpt_done(ccb);
389		break;
390	}
391	case XPT_TERM_IO:		/* Terminate the I/O process */
392		/* XXX Implement */
393		ccb->ccb_h.status = CAM_REQ_INVALID;
394		xpt_done(ccb);
395		break;
396	case XPT_PATH_INQ:		/* Path routing inquiry */
397	{
398		struct ccb_pathinq *cpi = &ccb->cpi;
399
400		cpi->version_num = 1; /* XXX??? */
401		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
402		cpi->target_sprt = 0;
403		cpi->hba_misc = 0;
404		cpi->hba_eng_cnt = 0;
405		cpi->max_target = 7;
406		cpi->max_lun = 7;
407		cpi->initiator_id = adv->scsi_id;
408		cpi->bus_id = cam_sim_bus(sim);
409		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
410		strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
411		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
412		cpi->unit_number = cam_sim_unit(sim);
413		cpi->ccb_h.status = CAM_REQ_CMP;
414		xpt_done(ccb);
415		break;
416	}
417	default:
418		ccb->ccb_h.status = CAM_REQ_INVALID;
419		xpt_done(ccb);
420		break;
421	}
422}
423
424/*
425 * Currently, the output of bus_dmammap_load suits our needs just
426 * fine, but should it change, we'd need to do something here.
427 */
428#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
429
430static void
431adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
432		int nsegments, int error)
433{
434	struct	ccb_scsiio *csio;
435	struct	ccb_hdr *ccb_h;
436	struct	cam_sim *sim;
437        struct	adv_softc *adv;
438	struct	adv_ccb_info *cinfo;
439	struct	adv_scsi_q scsiq;
440	struct	adv_sg_head sghead;
441	int	s;
442
443	csio = (struct ccb_scsiio *)arg;
444	ccb_h = &csio->ccb_h;
445	sim = xpt_path_sim(ccb_h->path);
446	adv = (struct adv_softc *)cam_sim_softc(sim);
447	cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
448
449	if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
450		if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
451			/* XXX Need phystovirt!!!! */
452			/* How about pmap_kenter??? */
453			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
454		} else {
455			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
456		}
457	} else {
458		scsiq.cdbptr = csio->cdb_io.cdb_bytes;
459	}
460	/*
461	 * Build up the request
462	 */
463	scsiq.q1.status = 0;
464	scsiq.q1.q_no = 0;
465	scsiq.q1.cntl = 0;
466	scsiq.q1.sg_queue_cnt = 0;
467	scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
468	scsiq.q1.target_lun = ccb_h->target_lun;
469	scsiq.q1.sense_len = csio->sense_len;
470	scsiq.q1.extra_bytes = 0;
471	scsiq.q2.ccb_ptr = (u_int32_t)csio;
472	scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
473					      ccb_h->target_lun);
474	scsiq.q2.flag = 0;
475	scsiq.q2.cdb_len = csio->cdb_len;
476	if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
477		scsiq.q2.tag_code = csio->tag_action;
478	else
479		scsiq.q2.tag_code = 0;
480	scsiq.q2.vm_id = 0;
481
482	s = splcam();
483
484	/*
485	 * Last time we need to check if this SCB needs to
486	 * be aborted.
487	 */
488	if (ccb_h->status != CAM_REQ_INPROG) {
489		if (nsegments != 0) {
490			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
491		}
492		if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) {
493			ccb_h->status |= CAM_RELEASE_SIMQ;
494		}
495		adv_free_ccb_info(adv, cinfo);
496		xpt_done((union ccb *)csio);
497		splx(s);
498		return;
499	}
500
501	if (nsegments != 0) {
502		bus_dmasync_op_t op;
503
504		scsiq.q1.data_addr = dm_segs->ds_addr;
505                scsiq.q1.data_cnt = dm_segs->ds_len;
506		if (nsegments > 1) {
507			scsiq.q1.cntl |= QC_SG_HEAD;
508			sghead.entry_cnt
509			    = sghead.entry_to_copy
510			    = nsegments;
511			sghead.res = 0;
512			sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
513			scsiq.sg_head = &sghead;
514		} else {
515			scsiq.sg_head = NULL;
516		}
517		if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
518			op = BUS_DMASYNC_PREREAD;
519		else
520			op = BUS_DMASYNC_PREWRITE;
521		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
522	} else {
523		scsiq.q1.data_addr = 0;
524		scsiq.q1.data_cnt = 0;
525		scsiq.sg_head = NULL;
526	}
527
528	if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
529		/* Temporary resource shortage */
530		if (nsegments != 0) {
531			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
532		}
533		ccb_h->status = CAM_REQUEUE_REQ;
534		if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
535			ccb_h->status |= CAM_RELEASE_SIMQ;
536
537		/* Unfreeze when resources are available */
538		xpt_freeze_simq(adv->sim, /*count*/1);
539
540		adv_free_ccb_info(adv, cinfo);
541		xpt_done((union ccb *)csio);
542		splx(s);
543		return;
544	}
545	ccb_h->status |= CAM_SIM_QUEUED;
546	LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
547	/* Schedule our timeout */
548	ccb_h->timeout_ch =
549	    timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
550	splx(s);
551}
552
553static struct adv_ccb_info *
554adv_alloc_ccb_info(struct adv_softc *adv)
555{
556	int error;
557	struct adv_ccb_info *cinfo;
558
559	cinfo = malloc(sizeof(*cinfo), M_DEVBUF, M_NOWAIT);
560	if (cinfo == NULL)
561		printf("%s: Can't malloc CCB info\n", adv_name(adv));
562	cinfo->state = ACCB_FREE;
563	error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
564				  &cinfo->dmamap);
565	if (error != 0) {
566		printf("%s: Unable to allocate CCB info "
567		       "dmamap - error %d\n", adv_name(adv), error);
568		free(cinfo, M_DEVBUF);
569		cinfo = NULL;
570	}
571	return (cinfo);
572}
573
574static void
575adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
576{
577	bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
578	free(cinfo, M_DEVBUF);
579}
580
581static void
582advminphys(struct buf *bp)
583{
584	if (bp->b_bcount > ((ADV_MAX_SG_LIST - 1) * PAGE_SIZE))
585		bp->b_bcount = ((ADV_MAX_SG_LIST - 1) * PAGE_SIZE);
586}
587
588void
589adv_timeout(void *arg)
590{
591	int s;
592	union ccb *ccb;
593	struct adv_softc *adv;
594	struct adv_ccb_info *cinfo;
595
596	ccb = (union ccb *)arg;
597	adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
598	cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
599
600	xpt_print_path(ccb->ccb_h.path);
601	printf("Timed out\n");
602
603	s = splcam();
604	/* Have we been taken care of already?? */
605	if (cinfo == NULL || cinfo->state == ACCB_FREE) {
606		splx(s);
607		return;
608	}
609
610	adv_stop_execution(adv);
611
612	if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
613		struct ccb_hdr *ccb_h;
614
615		/*
616		 * In order to simplify the recovery process, we ask the XPT
617		 * layer to halt the queue of new transactions and we traverse
618		 * the list of pending CCBs and remove their timeouts. This
619		 * means that the driver attempts to clear only one error
620		 * condition at a time.  In general, timeouts that occur
621		 * close together are related anyway, so there is no benefit
622		 * in attempting to handle errors in parrallel.  Timeouts will
623		 * be reinstated when the recovery process ends.
624		 */
625		if ((cinfo->state & ACCB_RELEASE_SIMQ) == 0) {
626			xpt_freeze_simq(adv->sim, /*count*/1);
627			cinfo->state |= ACCB_RELEASE_SIMQ;
628		}
629
630		/* This CCB is the CCB representing our recovery actions */
631		cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
632
633		ccb_h = LIST_FIRST(&adv->pending_ccbs);
634		while (ccb_h != NULL) {
635			untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
636			ccb_h = LIST_NEXT(ccb_h, sim_links.le);
637		}
638
639		/* XXX Should send a BDR */
640		/* Attempt an abort as our first tact */
641		xpt_print_path(ccb->ccb_h.path);
642		printf("Attempting abort\n");
643		adv_abort_ccb(adv, ccb->ccb_h.target_id,
644			      ccb->ccb_h.target_lun, ccb,
645			      CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
646		ccb->ccb_h.timeout_ch =
647		    timeout(adv_timeout, ccb, 2 * hz);
648	} else {
649		/* Our attempt to perform an abort failed, go for a reset */
650		xpt_print_path(ccb->ccb_h.path);
651		printf("Resetting bus\n");
652		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
653		ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
654		adv_reset_bus(adv);
655	}
656	adv_start_execution(adv);
657	splx(s);
658}
659
660struct adv_softc *
661adv_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh)
662{
663	struct	 adv_softc *adv;
664	int	 i;
665
666	if (unit >= NADV) {
667		printf("adv: unit number (%d) too high\n", unit);
668		return NULL;
669	}
670
671	/*
672	 * Allocate a storage area for us
673	 */
674	if (advsoftcs[unit]) {
675		printf("adv%d: memory already allocated\n", unit);
676		return NULL;
677	}
678
679	adv = malloc(sizeof(struct adv_softc), M_DEVBUF, M_NOWAIT);
680	if (!adv) {
681		printf("adv%d: cannot malloc!\n", unit);
682		return NULL;
683	}
684	bzero(adv, sizeof(struct adv_softc));
685	LIST_INIT(&adv->pending_ccbs);
686	SLIST_INIT(&adv->free_ccb_infos);
687	advsoftcs[unit] = adv;
688	adv->unit = unit;
689	adv->tag = tag;
690	adv->bsh = bsh;
691
692	return(adv);
693}
694
695void
696adv_free(struct adv_softc *adv)
697{
698	switch (adv->init_level) {
699	case 5:
700	{
701		struct adv_ccb_info *cinfo;
702
703		while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
704			SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
705			adv_free_ccb_info(adv, cinfo);
706		}
707
708		bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
709	}
710	case 4:
711		bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
712                                adv->sense_dmamap);
713	case 3:
714		bus_dma_tag_destroy(adv->sense_dmat);
715	case 2:
716		bus_dma_tag_destroy(adv->buffer_dmat);
717	case 1:
718		bus_dma_tag_destroy(adv->parent_dmat);
719	case 0:
720		break;
721	}
722	free(adv, M_DEVBUF);
723}
724
725int
726adv_init(struct adv_softc *adv)
727{
728	struct	  adv_eeprom_config eeprom_config;
729	int	  checksum, i;
730	u_int16_t config_lsw;
731	u_int16_t config_msw;
732
733	adv_reset_chip_and_scsi_bus(adv);
734	adv_lib_init(adv);
735
736        /*
737         * Stop script execution.
738         */
739        adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
740        adv_stop_execution(adv);
741	if (adv_is_chip_halted(adv) == 0) {
742		printf("adv%d: Unable to halt adapter. Initialization"
743		       "failed\n", adv->unit);
744		return (1);
745	}
746	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
747	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
748		printf("adv%d: Unable to set program counter. Initialization"
749		       "failed\n", adv->unit);
750		return (1);
751	}
752
753	config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
754	config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
755
756	if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
757		config_msw &= (~(ADV_CFG_MSW_CLR_MASK));
758		/*
759		 * XXX The Linux code flags this as an error,
760		 * but what should we report to the user???
761		 * It seems that clearing the config register
762		 * makes this error recoverable.
763		 */
764		ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
765	}
766
767	/* Suck in the configuration from the EEProm */
768	checksum = adv_get_eeprom_config(adv, &eeprom_config);
769
770	eeprom_config.cfg_msw &= (~(ADV_CFG_MSW_CLR_MASK));
771
772	if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
773		/*
774		 * XXX The Linux code sets a warning level for this
775		 * condition, yet nothing of meaning is printed to
776		 * the user.  What does this mean???
777		 */
778		if (adv->chip_version == 3) {
779			if (eeprom_config.cfg_lsw != config_lsw) {
780				eeprom_config.cfg_lsw =
781						ADV_INW(adv, ADV_CONFIG_LSW);
782			}
783			if (eeprom_config.cfg_msw != config_msw) {
784				eeprom_config.cfg_msw =
785						ADV_INW(adv, ADV_CONFIG_MSW);
786			}
787		}
788	}
789	eeprom_config.cfg_lsw |= ADV_CFG_LSW_HOST_INT_ON;
790	if (adv_test_external_lram(adv) == 0) {
791		/*
792		 * XXX What about non PCI cards with no
793		 *     external LRAM????
794		 */
795		if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
796			eeprom_config.max_total_qng =
797			    ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
798			eeprom_config.max_tag_qng =
799			    ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
800		} else {
801			eeprom_config.cfg_msw |= 0x0800;
802			config_msw |= 0x0800;
803			ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
804			eeprom_config.max_total_qng =
805			     ADV_MAX_PCI_INRAM_TOTAL_QNG;
806			eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
807		}
808		adv->max_openings = eeprom_config.max_total_qng;
809	}
810	if (checksum == eeprom_config.chksum) {
811		/* Range/Sanity checking */
812		if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
813			eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
814		}
815		if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
816			eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
817		}
818		if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
819			eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
820		}
821		if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
822			eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
823		}
824		adv->max_openings = eeprom_config.max_total_qng;
825
826		adv->user_disc_enable = eeprom_config.disc_enable;
827		adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
828		adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
829		adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
830		EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
831		adv->control = eeprom_config.cntl;
832		for (i = 0; i <= ADV_MAX_TID; i++)
833			adv_sdtr_to_period_offset(adv,
834						  eeprom_config.sdtr_data[i],
835						  &adv->tinfo[i].user.period,
836						  &adv->tinfo[i].user.offset,
837						  i);
838	} else {
839		u_int8_t sync_data;
840
841		printf("adv%d: Warning EEPROM Checksum mismatch. "
842		       "Using default device parameters\n", adv->unit);
843
844		/* Set reasonable defaults since we can't read the EEPROM */
845		adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
846		adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
847		adv->disc_enable = TARGET_BIT_VECTOR_SET;
848		adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
849		adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
850		adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
851		adv->scsi_id = 7;
852
853		sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
854		for (i = 0; i <= ADV_MAX_TID; i++)
855			adv_sdtr_to_period_offset(adv, sync_data,
856						  &adv->tinfo[i].user.period,
857						  &adv->tinfo[i].user.offset,
858						  i);
859	}
860
861	if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
862		printf("%s: WARNING! Failure writing to EEPROM.\n",
863		       adv_name(adv));
864
865	adv_set_chip_scsiid(adv, adv->scsi_id);
866	if (adv_init_lram_and_mcode(adv))
867		return (1);
868
869	adv->disc_enable = adv->user_disc_enable;
870
871	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
872	for (i = 0; i <= ADV_MAX_TID; i++) {
873		/*
874		 * Start off in async mode.
875		 */
876		adv_set_syncrate(adv, /*struct cam_path */NULL,
877				 i, /*period*/0, /*offset*/0,
878				 ADV_TRANS_CUR);
879		/*
880		 * Enable the use of tagged commands on all targets.
881		 * This allows the kernel driver to make up it's own mind
882		 * as it sees fit to tag queue instead of having the
883		 * firmware try and second guess the tag_code settins.
884		 */
885		adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
886				 adv->max_openings);
887	}
888	adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
889	adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
890	printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
891	       adv->unit, (adv->type & ADV_ULTRA) ? "Ultra SCSI" : "SCSI",
892	       adv->scsi_id, adv->max_openings);
893	return (0);
894}
895
896void
897adv_intr(void *arg)
898{
899	struct	  adv_softc *adv;
900	u_int16_t chipstat;
901	u_int16_t saved_ram_addr;
902	u_int8_t  ctrl_reg;
903	u_int8_t  saved_ctrl_reg;
904	int	  status;
905	u_int8_t  host_flag;
906
907	adv = (struct adv_softc *)arg;
908
909	ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
910	saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
911				       ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
912				       ADV_CC_TEST));
913
914
915	if ((chipstat = ADV_INW(adv, ADV_CHIP_STATUS)) & ADV_CSW_INT_PENDING) {
916
917		saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
918		host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
919		adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
920				 host_flag | ADV_HOST_FLAG_IN_ISR);
921
922		adv_ack_interrupt(adv);
923
924		if ((chipstat & ADV_CSW_HALTED)
925		    && (ctrl_reg & ADV_CC_SINGLE_STEP)) {
926			adv_isr_chip_halted(adv);
927			saved_ctrl_reg &= ~ADV_CC_HALT;
928		} else {
929			adv_run_doneq(adv);
930		}
931		ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
932#ifdef DIAGNOSTIC
933		if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
934			panic("adv_intr: Unable to set LRAM addr");
935#endif
936		adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
937	}
938
939	ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
940}
941
942void
943adv_run_doneq(struct adv_softc *adv)
944{
945	struct adv_q_done_info scsiq;
946	u_int		  doneq_head;
947	u_int		  done_qno;
948
949	doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
950	done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
951				   + ADV_SCSIQ_B_FWD);
952	while (done_qno != ADV_QLINK_END) {
953		union ccb* ccb;
954		u_int done_qaddr;
955		u_int sg_queue_cnt;
956		int   aborted;
957
958		done_qaddr = ADV_QNO_TO_QADDR(done_qno);
959
960		/* Pull status from this request */
961		sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
962						   adv->max_dma_count);
963
964		/* Mark it as free */
965		adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
966				 scsiq.q_status & ~(QS_READY|QS_ABORTED));
967
968		/* Process request based on retrieved info */
969		if ((scsiq.cntl & QC_SG_HEAD) != 0) {
970			u_int i;
971
972			/*
973			 * S/G based request.  Free all of the queue
974			 * structures that contained S/G information.
975			 */
976			for (i = 0; i < sg_queue_cnt; i++) {
977				done_qno = adv_read_lram_8(adv, done_qaddr
978							   + ADV_SCSIQ_B_FWD);
979
980#ifdef DIAGNOSTIC
981				if (done_qno == ADV_QLINK_END) {
982					panic("adv_qdone: Corrupted SG "
983					      "list encountered");
984				}
985#endif
986				done_qaddr = ADV_QNO_TO_QADDR(done_qno);
987
988				/* Mark SG queue as free */
989				adv_write_lram_8(adv, done_qaddr
990						 + ADV_SCSIQ_B_STATUS, QS_FREE);
991			}
992		} else
993			sg_queue_cnt = 0;
994#ifdef DIAGNOSTIC
995		if (adv->cur_active < (sg_queue_cnt + 1))
996			panic("adv_qdone: Attempting to free more "
997			      "queues than are active");
998#endif
999		adv->cur_active -= sg_queue_cnt + 1;
1000
1001		aborted = (scsiq.q_status & QS_ABORTED) != 0;
1002
1003		if ((scsiq.q_status != QS_DONE)
1004		 && (scsiq.q_status & QS_ABORTED) == 0)
1005			panic("adv_qdone: completed scsiq with unknown status");
1006
1007		scsiq.remain_bytes += scsiq.extra_bytes;
1008
1009		if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1010		    (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1011			if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1012				scsiq.d3.done_stat = QD_NO_ERROR;
1013				scsiq.d3.host_stat = QHSTA_NO_ERROR;
1014			}
1015		}
1016
1017		ccb = (union ccb *)scsiq.d2.ccb_ptr;
1018		ccb->csio.resid = scsiq.remain_bytes;
1019		adv_done(adv, (union ccb *)scsiq.d2.ccb_ptr,
1020			 scsiq.d3.done_stat, scsiq.d3.host_stat,
1021			 scsiq.d3.scsi_stat, scsiq.q_no);
1022
1023		doneq_head = done_qno;
1024		done_qno = adv_read_lram_8(adv, done_qaddr
1025					   + ADV_SCSIQ_B_FWD);
1026	}
1027	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1028}
1029
1030
1031void
1032adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1033	 u_int host_stat, u_int scsi_status, u_int q_no)
1034{
1035	struct	   adv_ccb_info *cinfo;
1036
1037	cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1038	/*
1039	 * Null this out so that we catch driver bugs that cause a
1040	 * ccb to be completed twice.
1041	 */
1042	ccb->ccb_h.ccb_cinfo_ptr = NULL;
1043	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1044		bus_dmasync_op_t op;
1045
1046		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1047			op = BUS_DMASYNC_POSTREAD;
1048		else
1049			op = BUS_DMASYNC_POSTWRITE;
1050		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1051		bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1052	}
1053
1054	switch (done_stat) {
1055	case QD_NO_ERROR:
1056		switch (host_stat) {
1057		case QHSTA_NO_ERROR:
1058			ccb->ccb_h.status = CAM_REQ_CMP;
1059			break;
1060		case QHSTA_M_SEL_TIMEOUT:
1061			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1062			break;
1063		default:
1064			/* QHSTA error occurred */
1065			/* XXX Can I get more explicit information here? */
1066			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1067			break;
1068		}
1069		break;
1070
1071	case QD_WITH_ERROR:
1072		switch (host_stat) {
1073		case QHSTA_NO_ERROR:
1074			ccb->csio.scsi_status = scsi_status;
1075			switch (scsi_status) {
1076			case SCSI_STATUS_CHECK_COND:
1077			case SCSI_STATUS_CMD_TERMINATED:
1078				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1079				/* Structure copy */
1080				ccb->csio.sense_data =
1081				    adv->sense_buffers[q_no - 1];
1082				/* FALLTHROUGH */
1083			case SCSI_STATUS_BUSY:
1084			case SCSI_STATUS_RESERV_CONFLICT:
1085			case SCSI_STATUS_QUEUE_FULL:
1086			case SCSI_STATUS_COND_MET:
1087			case SCSI_STATUS_INTERMED:
1088			case SCSI_STATUS_INTERMED_COND_MET:
1089				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1090				break;
1091			case SCSI_STATUS_OK:
1092				ccb->ccb_h.status |= CAM_REQ_CMP;
1093				break;
1094			}
1095			break;
1096		case QHSTA_M_SEL_TIMEOUT:
1097			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1098			break;
1099		default:
1100			/* XXX Can I get more explicit information here? */
1101			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1102			break;
1103		}
1104		break;
1105
1106	case QD_ABORTED_BY_HOST:
1107		/* Don't clobber any, more explicit, error codes we've set */
1108		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1109			ccb->ccb_h.status = CAM_REQ_ABORTED;
1110		break;
1111
1112	default:
1113		printf("adv_done: Unknown done status 0x%x\n", done_stat);
1114		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1115		break;
1116	}
1117	if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
1118		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1119	else if (adv->openings_needed > 0) {
1120		int openings;
1121
1122		openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
1123		if (openings >= adv->openings_needed) {
1124			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1125			adv->openings_needed = 0;
1126		}
1127	}
1128	/* Remove from the pending list */
1129	LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1130
1131	untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1132	if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
1133		/*
1134		 * We now traverse our list of pending CCBs and reinstate
1135		 * their timeouts.
1136		 */
1137		struct cam_path *path;
1138		struct ccb_hdr *ccb_h;
1139		cam_status error;
1140
1141		ccb_h = LIST_FIRST(&adv->pending_ccbs);
1142		while (ccb_h != NULL) {
1143			ccb_h->timeout_ch =
1144			    timeout(adv_timeout, (caddr_t)ccb_h,
1145					    (ccb_h->timeout * hz) / 1000);
1146			ccb_h = LIST_NEXT(ccb_h, sim_links.le);
1147		}
1148		printf("%s: No longer in timeout\n", adv_name(adv));
1149	}
1150	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1151	 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1152		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1153		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1154	}
1155	adv_free_ccb_info(adv, cinfo);
1156	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1157	xpt_done(ccb);
1158}
1159
1160/*
1161 * Function to poll for command completion when
1162 * interrupts are disabled (crash dumps)
1163 */
1164static void
1165adv_poll(struct cam_sim *sim)
1166{
1167	adv_intr(cam_sim_softc(sim));
1168}
1169
1170/*
1171 * Attach all the sub-devices we can find
1172 */
1173int
1174adv_attach(adv)
1175	struct adv_softc *adv;
1176{
1177	struct ccb_setasync csa;
1178	struct cam_devq *devq;
1179
1180	/*
1181	 * Create our DMA tags.  These tags define the kinds of device
1182	 * accessable memory allocations and memory mappings we will
1183	 * need to perform during normal operation.
1184	 *
1185	 * Unless we need to further restrict the allocation, we rely
1186	 * on the restrictions of the parent dmat, hence the common
1187	 * use of MAXADDR and MAXSIZE.
1188	 */
1189
1190	/* DMA tag for mapping buffers into device visible space. */
1191	if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
1192			       /*lowaddr*/BUS_SPACE_MAXADDR,
1193			       /*highaddr*/BUS_SPACE_MAXADDR,
1194			       /*filter*/NULL, /*filterarg*/NULL,
1195			       /*maxsize*/MAXBSIZE,
1196			       /*nsegments*/ADV_MAX_SG_LIST,
1197			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1198			       /*flags*/BUS_DMA_ALLOCNOW,
1199			       &adv->buffer_dmat) != 0) {
1200		goto error_exit;
1201	}
1202	adv->init_level++;
1203
1204	/* DMA tag for our sense buffers */
1205	if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
1206			       /*lowaddr*/BUS_SPACE_MAXADDR,
1207			       /*highaddr*/BUS_SPACE_MAXADDR,
1208			       /*filter*/NULL, /*filterarg*/NULL,
1209			       sizeof(struct scsi_sense_data)*adv->max_openings,
1210			       /*nsegments*/1,
1211			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1212			       /*flags*/0, &adv->sense_dmat) != 0) {
1213		goto error_exit;
1214        }
1215
1216	adv->init_level++;
1217
1218	/* Allocation for our sense buffers */
1219	if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1220			     BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1221		goto error_exit;
1222	}
1223
1224	adv->init_level++;
1225
1226	/* And permanently map them */
1227	bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1228       			adv->sense_buffers,
1229			sizeof(struct scsi_sense_data)*adv->max_openings,
1230			adv_map, &adv->sense_physbase, /*flags*/0);
1231
1232	adv->init_level++;
1233
1234	/*
1235	 * Fire up the chip
1236	 */
1237	if (adv_start_chip(adv) != 1) {
1238		printf("adv%d: Unable to start on board processor. Aborting.\n",
1239		       adv->unit);
1240		return (0);
1241	}
1242
1243	/*
1244	 * Create the device queue for our SIM.
1245	 */
1246	devq = cam_simq_alloc(adv->max_openings);
1247	if (devq == NULL)
1248		return (0);
1249
1250	/*
1251	 * Construct our SIM entry.
1252	 */
1253	adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1254				 1, adv->max_openings, devq);
1255	if (adv->sim == NULL)
1256		return (0);
1257
1258	/*
1259	 * Register the bus.
1260	 *
1261	 * XXX Twin Channel EISA Cards???
1262	 */
1263	if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1264		cam_sim_free(adv->sim, /*free devq*/TRUE);
1265		return (0);
1266	}
1267
1268	if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1269			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1270	   == CAM_REQ_CMP) {
1271		xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1272		csa.ccb_h.func_code = XPT_SASYNC_CB;
1273		csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1274		csa.callback = advasync;
1275		csa.callback_arg = adv;
1276		xpt_action((union ccb *)&csa);
1277	}
1278	return (1);
1279
1280error_exit:
1281	return (0);
1282}
1283