aic7xxx_osm.c revision 102674
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#10 $
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 102674 2002-08-31 06:43:55Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_osm.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#ifndef AHC_TMODE_ENABLE
40#define AHC_TMODE_ENABLE 0
41#endif
42
43#define ccb_scb_ptr spriv_ptr0
44
45#if UNUSED
46static void	ahc_dump_targcmd(struct target_cmd *cmd);
47#endif
48static int	ahc_modevent(module_t mod, int type, void *data);
49static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
50static void	ahc_get_tran_settings(struct ahc_softc *ahc,
51				      int our_id, char channel,
52				      struct ccb_trans_settings *cts);
53static void	ahc_async(void *callback_arg, uint32_t code,
54			  struct cam_path *path, void *arg);
55static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
56				int nsegments, int error);
57static void	ahc_poll(struct cam_sim *sim);
58static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
59			       struct ccb_scsiio *csio, struct scb *scb);
60static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
61			      union ccb *ccb);
62static int	ahc_create_path(struct ahc_softc *ahc,
63				char channel, u_int target, u_int lun,
64				struct cam_path **path);
65
66static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
67
68static int
69ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
70	        u_int lun, struct cam_path **path)
71{
72	path_id_t path_id;
73
74	if (channel == 'B')
75		path_id = cam_sim_path(ahc->platform_data->sim_b);
76	else
77		path_id = cam_sim_path(ahc->platform_data->sim);
78
79	return (xpt_create_path(path, /*periph*/NULL,
80				path_id, target, lun));
81}
82
83int
84ahc_map_int(struct ahc_softc *ahc)
85{
86	int error;
87
88	/* Hook up our interrupt handler */
89	error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
90			       INTR_TYPE_CAM, ahc_platform_intr, ahc,
91			       &ahc->platform_data->ih);
92
93	if (error != 0)
94		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
95			      error);
96	return (error);
97}
98
99/*
100 * Attach all the sub-devices we can find
101 */
102int
103ahc_attach(struct ahc_softc *ahc)
104{
105	char   ahc_info[256];
106	struct ccb_setasync csa;
107	struct cam_devq *devq;
108	int bus_id;
109	int bus_id2;
110	struct cam_sim *sim;
111	struct cam_sim *sim2;
112	struct cam_path *path;
113	struct cam_path *path2;
114	long s;
115	int count;
116
117	count = 0;
118	sim = NULL;
119	sim2 = NULL;
120
121	ahc_controller_info(ahc, ahc_info);
122	printf("%s\n", ahc_info);
123	ahc_lock(ahc, &s);
124	/*
125	 * Attach secondary channel first if the user has
126	 * declared it the primary channel.
127	 */
128	if ((ahc->features & AHC_TWIN) != 0
129	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
130		bus_id = 1;
131		bus_id2 = 0;
132	} else {
133		bus_id = 0;
134		bus_id2 = 1;
135	}
136
137	/*
138	 * Create the device queue for our SIM(s).
139	 */
140	devq = cam_simq_alloc(AHC_MAX_QUEUE);
141	if (devq == NULL)
142		goto fail;
143
144	/*
145	 * Construct our first channel SIM entry
146	 */
147	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
148			    device_get_unit(ahc->dev_softc),
149			    1, AHC_MAX_QUEUE, devq);
150	if (sim == NULL) {
151		cam_simq_free(devq);
152		goto fail;
153	}
154
155	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
156		cam_sim_free(sim, /*free_devq*/TRUE);
157		sim = NULL;
158		goto fail;
159	}
160
161	if (xpt_create_path(&path, /*periph*/NULL,
162			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
163			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
164		xpt_bus_deregister(cam_sim_path(sim));
165		cam_sim_free(sim, /*free_devq*/TRUE);
166		sim = NULL;
167		goto fail;
168	}
169
170	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
171	csa.ccb_h.func_code = XPT_SASYNC_CB;
172	csa.event_enable = AC_LOST_DEVICE;
173	csa.callback = ahc_async;
174	csa.callback_arg = sim;
175	xpt_action((union ccb *)&csa);
176	count++;
177
178	if (ahc->features & AHC_TWIN) {
179		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
180				    ahc, device_get_unit(ahc->dev_softc), 1,
181				    AHC_MAX_QUEUE, devq);
182
183		if (sim2 == NULL) {
184			printf("ahc_attach: Unable to attach second "
185			       "bus due to resource shortage");
186			goto fail;
187		}
188
189		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
190			printf("ahc_attach: Unable to attach second "
191			       "bus due to resource shortage");
192			/*
193			 * We do not want to destroy the device queue
194			 * because the first bus is using it.
195			 */
196			cam_sim_free(sim2, /*free_devq*/FALSE);
197			goto fail;
198		}
199
200		if (xpt_create_path(&path2, /*periph*/NULL,
201				    cam_sim_path(sim2),
202				    CAM_TARGET_WILDCARD,
203				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
204			xpt_bus_deregister(cam_sim_path(sim2));
205			cam_sim_free(sim2, /*free_devq*/FALSE);
206			sim2 = NULL;
207			goto fail;
208		}
209		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
210		csa.ccb_h.func_code = XPT_SASYNC_CB;
211		csa.event_enable = AC_LOST_DEVICE;
212		csa.callback = ahc_async;
213		csa.callback_arg = sim2;
214		xpt_action((union ccb *)&csa);
215		count++;
216	}
217
218fail:
219	if ((ahc->features & AHC_TWIN) != 0
220	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
221		ahc->platform_data->sim_b = sim;
222		ahc->platform_data->path_b = path;
223		ahc->platform_data->sim = sim2;
224		ahc->platform_data->path = path2;
225	} else {
226		ahc->platform_data->sim = sim;
227		ahc->platform_data->path = path;
228		ahc->platform_data->sim_b = sim2;
229		ahc->platform_data->path_b = path2;
230	}
231
232	if (count != 0) {
233		/* We have to wait until after any system dumps... */
234		ahc->platform_data->eh =
235		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
236					  ahc, SHUTDOWN_PRI_DEFAULT);
237		ahc_intr_enable(ahc, TRUE);
238	}
239
240	ahc_unlock(ahc, &s);
241	return (count);
242}
243
244/*
245 * Catch an interrupt from the adapter
246 */
247void
248ahc_platform_intr(void *arg)
249{
250	struct	ahc_softc *ahc;
251
252	ahc = (struct ahc_softc *)arg;
253	ahc_intr(ahc);
254}
255
256/*
257 * We have an scb which has been processed by the
258 * adaptor, now we look to see how the operation
259 * went.
260 */
261void
262ahc_done(struct ahc_softc *ahc, struct scb *scb)
263{
264	union ccb *ccb;
265
266	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
267		  ("ahc_done - scb %d\n", scb->hscb->tag));
268
269	ccb = scb->io_ctx;
270	LIST_REMOVE(scb, pending_links);
271	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
272		struct scb_tailq *untagged_q;
273		int target_offset;
274
275		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
276		untagged_q = &ahc->untagged_queues[target_offset];
277		TAILQ_REMOVE(untagged_q, scb, links.tqe);
278		scb->flags &= ~SCB_UNTAGGEDQ;
279		ahc_run_untagged_queue(ahc, untagged_q);
280	}
281
282	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
283
284	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
285		bus_dmasync_op_t op;
286
287		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
288			op = BUS_DMASYNC_POSTREAD;
289		else
290			op = BUS_DMASYNC_POSTWRITE;
291		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
292		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
293	}
294
295	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
296		struct cam_path *ccb_path;
297
298		/*
299		 * If we have finally disconnected, clean up our
300		 * pending device state.
301		 * XXX - There may be error states that cause where
302		 *       we will remain connected.
303		 */
304		ccb_path = ccb->ccb_h.path;
305		if (ahc->pending_device != NULL
306		 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) {
307
308			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
309				ahc->pending_device = NULL;
310			} else {
311				if (bootverbose) {
312					xpt_print_path(ccb->ccb_h.path);
313					printf("Still connected\n");
314				}
315				ahc_freeze_ccb(ccb);
316			}
317		}
318
319		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
320			ccb->ccb_h.status |= CAM_REQ_CMP;
321		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
322		ahc_free_scb(ahc, scb);
323		xpt_done(ccb);
324		return;
325	}
326
327	/*
328	 * If the recovery SCB completes, we have to be
329	 * out of our timeout.
330	 */
331	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
332		struct	scb *list_scb;
333
334		/*
335		 * We were able to complete the command successfully,
336		 * so reinstate the timeouts for all other pending
337		 * commands.
338		 */
339		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
340			union ccb *ccb;
341			uint64_t time;
342
343			ccb = list_scb->io_ctx;
344			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
345				continue;
346
347			time = ccb->ccb_h.timeout;
348			time *= hz;
349			time /= 1000;
350			ccb->ccb_h.timeout_ch =
351			    timeout(ahc_timeout, list_scb, time);
352		}
353
354		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
355		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
356			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
357		ahc_print_path(ahc, scb);
358		printf("no longer in timeout, status = %x\n",
359		       ccb->ccb_h.status);
360	}
361
362	/* Don't clobber any existing error state */
363	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
364		ccb->ccb_h.status |= CAM_REQ_CMP;
365	} else if ((scb->flags & SCB_SENSE) != 0) {
366		/*
367		 * We performed autosense retrieval.
368		 *
369		 * Zero any sense not transferred by the
370		 * device.  The SCSI spec mandates that any
371		 * untransfered data should be assumed to be
372		 * zero.  Complete the 'bounce' of sense information
373		 * through buffers accessible via bus-space by
374		 * copying it into the clients csio.
375		 */
376		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
377		memcpy(&ccb->csio.sense_data,
378		       ahc_get_sense_buf(ahc, scb),
379		       (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK)
380		       - ccb->csio.sense_resid);
381		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
382	}
383	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
384	ahc_free_scb(ahc, scb);
385	xpt_done(ccb);
386}
387
388static void
389ahc_action(struct cam_sim *sim, union ccb *ccb)
390{
391	struct	ahc_softc *ahc;
392	struct	ahc_tmode_lstate *lstate;
393	u_int	target_id;
394	u_int	our_id;
395	long	s;
396
397	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
398
399	ahc = (struct ahc_softc *)cam_sim_softc(sim);
400
401	target_id = ccb->ccb_h.target_id;
402	our_id = SIM_SCSI_ID(ahc, sim);
403
404	switch (ccb->ccb_h.func_code) {
405	/* Common cases first */
406	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
407	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
408	{
409		struct	   ahc_tmode_tstate *tstate;
410		cam_status status;
411
412		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
413					     &lstate, TRUE);
414
415		if (status != CAM_REQ_CMP) {
416			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
417				/* Response from the black hole device */
418				tstate = NULL;
419				lstate = ahc->black_hole;
420			} else {
421				ccb->ccb_h.status = status;
422				xpt_done(ccb);
423				break;
424			}
425		}
426		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
427
428			ahc_lock(ahc, &s);
429			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
430					  sim_links.sle);
431			ccb->ccb_h.status = CAM_REQ_INPROG;
432			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
433				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
434			ahc_unlock(ahc, &s);
435			break;
436		}
437
438		/*
439		 * The target_id represents the target we attempt to
440		 * select.  In target mode, this is the initiator of
441		 * the original command.
442		 */
443		our_id = target_id;
444		target_id = ccb->csio.init_id;
445		/* FALLTHROUGH */
446	}
447	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
448	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
449	{
450		struct	scb *scb;
451		struct	hardware_scb *hscb;
452
453		if ((ahc->flags & AHC_INITIATORROLE) == 0
454		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
455		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
456			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
457			xpt_done(ccb);
458			return;
459		}
460
461		/*
462		 * get an scb to use.
463		 */
464		ahc_lock(ahc, &s);
465		if ((scb = ahc_get_scb(ahc)) == NULL) {
466
467			xpt_freeze_simq(sim, /*count*/1);
468			ahc->flags |= AHC_RESOURCE_SHORTAGE;
469			ahc_unlock(ahc, &s);
470			ccb->ccb_h.status = CAM_REQUEUE_REQ;
471			xpt_done(ccb);
472			return;
473		}
474		ahc_unlock(ahc, &s);
475
476		hscb = scb->hscb;
477
478		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
479			  ("start scb(%p)\n", scb));
480		scb->io_ctx = ccb;
481		/*
482		 * So we can find the SCB when an abort is requested
483		 */
484		ccb->ccb_h.ccb_scb_ptr = scb;
485
486		/*
487		 * Put all the arguments for the xfer in the scb
488		 */
489		hscb->control = 0;
490		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
491		hscb->lun = ccb->ccb_h.target_lun;
492		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
493			hscb->cdb_len = 0;
494			scb->flags |= SCB_DEVICE_RESET;
495			hscb->control |= MK_MESSAGE;
496			ahc_execute_scb(scb, NULL, 0, 0);
497		} else {
498			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
499				struct target_data *tdata;
500
501				tdata = &hscb->shared_data.tdata;
502				if (ahc->pending_device == lstate)
503					scb->flags |= SCB_TARGET_IMMEDIATE;
504				hscb->control |= TARGET_SCB;
505				tdata->target_phases = IDENTIFY_SEEN;
506				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
507					tdata->target_phases |= SPHASE_PENDING;
508					tdata->scsi_status =
509					    ccb->csio.scsi_status;
510				}
511	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
512					tdata->target_phases |= NO_DISCONNECT;
513
514				tdata->initiator_tag = ccb->csio.tag_id;
515			}
516			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
517				hscb->control |= ccb->csio.tag_action;
518
519			ahc_setup_data(ahc, sim, &ccb->csio, scb);
520		}
521		break;
522	}
523	case XPT_NOTIFY_ACK:
524	case XPT_IMMED_NOTIFY:
525	{
526		struct	   ahc_tmode_tstate *tstate;
527		struct	   ahc_tmode_lstate *lstate;
528		cam_status status;
529
530		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
531					     &lstate, TRUE);
532
533		if (status != CAM_REQ_CMP) {
534			ccb->ccb_h.status = status;
535			xpt_done(ccb);
536			break;
537		}
538		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
539				  sim_links.sle);
540		ccb->ccb_h.status = CAM_REQ_INPROG;
541		ahc_send_lstate_events(ahc, lstate);
542		break;
543	}
544	case XPT_EN_LUN:		/* Enable LUN as a target */
545		ahc_handle_en_lun(ahc, sim, ccb);
546		xpt_done(ccb);
547		break;
548	case XPT_ABORT:			/* Abort the specified CCB */
549	{
550		ahc_abort_ccb(ahc, sim, ccb);
551		break;
552	}
553	case XPT_SET_TRAN_SETTINGS:
554	{
555#ifdef AHC_NEW_TRAN_SETTINGS
556		struct	ahc_devinfo devinfo;
557		struct	ccb_trans_settings *cts;
558		struct	ccb_trans_settings_scsi *scsi;
559		struct	ccb_trans_settings_spi *spi;
560		struct	ahc_initiator_tinfo *tinfo;
561		struct	ahc_tmode_tstate *tstate;
562		uint16_t *discenable;
563		uint16_t *tagenable;
564		u_int	update_type;
565
566		cts = &ccb->cts;
567		scsi = &cts->proto_specific.scsi;
568		spi = &cts->xport_specific.spi;
569		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
570				    cts->ccb_h.target_id,
571				    cts->ccb_h.target_lun,
572				    SIM_CHANNEL(ahc, sim),
573				    ROLE_UNKNOWN);
574		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
575					    devinfo.our_scsiid,
576					    devinfo.target, &tstate);
577		update_type = 0;
578		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
579			update_type |= AHC_TRANS_GOAL;
580			discenable = &tstate->discenable;
581			tagenable = &tstate->tagenable;
582			tinfo->curr.protocol_version =
583			    cts->protocol_version;
584			tinfo->curr.transport_version =
585			    cts->transport_version;
586			tinfo->goal.protocol_version =
587			    cts->protocol_version;
588			tinfo->goal.transport_version =
589			    cts->transport_version;
590		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
591			update_type |= AHC_TRANS_USER;
592			discenable = &ahc->user_discenable;
593			tagenable = &ahc->user_tagenable;
594			tinfo->user.protocol_version =
595			    cts->protocol_version;
596			tinfo->user.transport_version =
597			    cts->transport_version;
598		} else {
599			ccb->ccb_h.status = CAM_REQ_INVALID;
600			xpt_done(ccb);
601			break;
602		}
603
604		ahc_lock(ahc, &s);
605
606		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
607			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
608				*discenable |= devinfo.target_mask;
609			else
610				*discenable &= ~devinfo.target_mask;
611		}
612
613		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
614			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
615				*tagenable |= devinfo.target_mask;
616			else
617				*tagenable &= ~devinfo.target_mask;
618		}
619
620		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
621			ahc_validate_width(ahc, /*tinfo limit*/NULL,
622					   &spi->bus_width, ROLE_UNKNOWN);
623			ahc_set_width(ahc, &devinfo, spi->bus_width,
624				      update_type, /*paused*/FALSE);
625		}
626
627		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
628			if (update_type == AHC_TRANS_USER)
629				spi->ppr_options = tinfo->user.ppr_options;
630			else
631				spi->ppr_options = tinfo->goal.ppr_options;
632		}
633
634		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
635			if (update_type == AHC_TRANS_USER)
636				spi->sync_offset = tinfo->user.offset;
637			else
638				spi->sync_offset = tinfo->goal.offset;
639		}
640
641		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
642			if (update_type == AHC_TRANS_USER)
643				spi->sync_period = tinfo->user.period;
644			else
645				spi->sync_period = tinfo->goal.period;
646		}
647
648		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
649		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
650			struct ahc_syncrate *syncrate;
651			u_int maxsync;
652
653			if ((ahc->features & AHC_ULTRA2) != 0)
654				maxsync = AHC_SYNCRATE_DT;
655			else if ((ahc->features & AHC_ULTRA) != 0)
656				maxsync = AHC_SYNCRATE_ULTRA;
657			else
658				maxsync = AHC_SYNCRATE_FAST;
659
660			if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
661				spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
662
663			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
664						     &spi->ppr_options,
665						     maxsync);
666			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
667					    syncrate, &spi->sync_offset,
668					    spi->bus_width, ROLE_UNKNOWN);
669
670			/* We use a period of 0 to represent async */
671			if (spi->sync_offset == 0) {
672				spi->sync_period = 0;
673				spi->ppr_options = 0;
674			}
675
676			ahc_set_syncrate(ahc, &devinfo, syncrate,
677					 spi->sync_period, spi->sync_offset,
678					 spi->ppr_options, update_type,
679					 /*paused*/FALSE);
680		}
681		ahc_unlock(ahc, &s);
682		ccb->ccb_h.status = CAM_REQ_CMP;
683		xpt_done(ccb);
684#else
685		struct	  ahc_devinfo devinfo;
686		struct	  ccb_trans_settings *cts;
687		struct	  ahc_initiator_tinfo *tinfo;
688		struct	  ahc_tmode_tstate *tstate;
689		uint16_t *discenable;
690		uint16_t *tagenable;
691		u_int	  update_type;
692		long	  s;
693
694		cts = &ccb->cts;
695		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
696				    cts->ccb_h.target_id,
697				    cts->ccb_h.target_lun,
698				    SIM_CHANNEL(ahc, sim),
699				    ROLE_UNKNOWN);
700		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
701					    devinfo.our_scsiid,
702					    devinfo.target, &tstate);
703		update_type = 0;
704		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
705			update_type |= AHC_TRANS_GOAL;
706			discenable = &tstate->discenable;
707			tagenable = &tstate->tagenable;
708		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
709			update_type |= AHC_TRANS_USER;
710			discenable = &ahc->user_discenable;
711			tagenable = &ahc->user_tagenable;
712		} else {
713			ccb->ccb_h.status = CAM_REQ_INVALID;
714			xpt_done(ccb);
715			break;
716		}
717
718		ahc_lock(ahc, &s);
719
720		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
721			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
722				*discenable |= devinfo.target_mask;
723			else
724				*discenable &= ~devinfo.target_mask;
725		}
726
727		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
728			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
729				*tagenable |= devinfo.target_mask;
730			else
731				*tagenable &= ~devinfo.target_mask;
732		}
733
734		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
735			ahc_validate_width(ahc, /*tinfo limit*/NULL,
736					   &cts->bus_width, ROLE_UNKNOWN);
737			ahc_set_width(ahc, &devinfo, cts->bus_width,
738				      update_type, /*paused*/FALSE);
739		}
740
741		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
742			if (update_type == AHC_TRANS_USER)
743				cts->sync_offset = tinfo->user.offset;
744			else
745				cts->sync_offset = tinfo->goal.offset;
746		}
747
748		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
749			if (update_type == AHC_TRANS_USER)
750				cts->sync_period = tinfo->user.period;
751			else
752				cts->sync_period = tinfo->goal.period;
753		}
754
755		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
756		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
757			struct ahc_syncrate *syncrate;
758			u_int ppr_options;
759			u_int maxsync;
760
761			if ((ahc->features & AHC_ULTRA2) != 0)
762				maxsync = AHC_SYNCRATE_DT;
763			else if ((ahc->features & AHC_ULTRA) != 0)
764				maxsync = AHC_SYNCRATE_ULTRA;
765			else
766				maxsync = AHC_SYNCRATE_FAST;
767
768			ppr_options = 0;
769			if (cts->sync_period <= 9
770			 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
771				ppr_options = MSG_EXT_PPR_DT_REQ;
772
773			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
774						     &ppr_options,
775						     maxsync);
776			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
777					    syncrate, &cts->sync_offset,
778					    MSG_EXT_WDTR_BUS_8_BIT,
779					    ROLE_UNKNOWN);
780
781			/* We use a period of 0 to represent async */
782			if (cts->sync_offset == 0) {
783				cts->sync_period = 0;
784				ppr_options = 0;
785			}
786
787			if (ppr_options == MSG_EXT_PPR_DT_REQ
788			 && tinfo->user.transport_version >= 3) {
789				tinfo->goal.transport_version =
790				    tinfo->user.transport_version;
791				tinfo->curr.transport_version =
792				    tinfo->user.transport_version;
793			}
794
795			ahc_set_syncrate(ahc, &devinfo, syncrate,
796					 cts->sync_period, cts->sync_offset,
797					 ppr_options, update_type,
798					 /*paused*/FALSE);
799		}
800		ahc_unlock(ahc, &s);
801		ccb->ccb_h.status = CAM_REQ_CMP;
802		xpt_done(ccb);
803#endif
804		break;
805	}
806	case XPT_GET_TRAN_SETTINGS:
807	/* Get default/user set transfer settings for the target */
808	{
809
810		ahc_lock(ahc, &s);
811		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
812				      SIM_CHANNEL(ahc, sim), &ccb->cts);
813		ahc_unlock(ahc, &s);
814		xpt_done(ccb);
815		break;
816	}
817	case XPT_CALC_GEOMETRY:
818	{
819		struct	  ccb_calc_geometry *ccg;
820		uint32_t size_mb;
821		uint32_t secs_per_cylinder;
822		int	  extended;
823
824		ccg = &ccb->ccg;
825		size_mb = ccg->volume_size
826			/ ((1024L * 1024L) / ccg->block_size);
827		extended = SIM_IS_SCSIBUS_B(ahc, sim)
828			? ahc->flags & AHC_EXTENDED_TRANS_B
829			: ahc->flags & AHC_EXTENDED_TRANS_A;
830
831		if (size_mb > 1024 && extended) {
832			ccg->heads = 255;
833			ccg->secs_per_track = 63;
834		} else {
835			ccg->heads = 64;
836			ccg->secs_per_track = 32;
837		}
838		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
839		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
840		ccb->ccb_h.status = CAM_REQ_CMP;
841		xpt_done(ccb);
842		break;
843	}
844	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
845	{
846		int  found;
847
848		ahc_lock(ahc, &s);
849		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
850					  /*initiate reset*/TRUE);
851		ahc_unlock(ahc, &s);
852		if (bootverbose) {
853			xpt_print_path(SIM_PATH(ahc, sim));
854			printf("SCSI bus reset delivered. "
855			       "%d SCBs aborted.\n", found);
856		}
857		ccb->ccb_h.status = CAM_REQ_CMP;
858		xpt_done(ccb);
859		break;
860	}
861	case XPT_TERM_IO:		/* Terminate the I/O process */
862		/* XXX Implement */
863		ccb->ccb_h.status = CAM_REQ_INVALID;
864		xpt_done(ccb);
865		break;
866	case XPT_PATH_INQ:		/* Path routing inquiry */
867	{
868		struct ccb_pathinq *cpi = &ccb->cpi;
869
870		cpi->version_num = 1; /* XXX??? */
871		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
872		if ((ahc->features & AHC_WIDE) != 0)
873			cpi->hba_inquiry |= PI_WIDE_16;
874		if ((ahc->features & AHC_TARGETMODE) != 0) {
875			cpi->target_sprt = PIT_PROCESSOR
876					 | PIT_DISCONNECT
877					 | PIT_TERM_IO;
878		} else {
879			cpi->target_sprt = 0;
880		}
881		cpi->hba_misc = 0;
882		cpi->hba_eng_cnt = 0;
883		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
884		cpi->max_lun = AHC_NUM_LUNS - 1;
885		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
886			cpi->initiator_id = ahc->our_id_b;
887			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
888				cpi->hba_misc |= PIM_NOBUSRESET;
889		} else {
890			cpi->initiator_id = ahc->our_id;
891			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
892				cpi->hba_misc |= PIM_NOBUSRESET;
893		}
894		cpi->bus_id = cam_sim_bus(sim);
895		cpi->base_transfer_speed = 3300;
896		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
897		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
898		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
899		cpi->unit_number = cam_sim_unit(sim);
900#ifdef AHC_NEW_TRAN_SETTINGS
901		cpi->protocol = PROTO_SCSI;
902		cpi->protocol_version = SCSI_REV_2;
903		cpi->transport = XPORT_SPI;
904		cpi->transport_version = 2;
905		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
906		if ((ahc->features & AHC_DT) != 0) {
907			cpi->transport_version = 3;
908			cpi->xport_specific.spi.ppr_options =
909			    SID_SPI_CLOCK_DT_ST;
910		}
911#endif
912		cpi->ccb_h.status = CAM_REQ_CMP;
913		xpt_done(ccb);
914		break;
915	}
916	default:
917		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
918		xpt_done(ccb);
919		break;
920	}
921}
922
923static void
924ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
925		      struct ccb_trans_settings *cts)
926{
927#ifdef AHC_NEW_TRAN_SETTINGS
928	struct	ahc_devinfo devinfo;
929	struct	ccb_trans_settings_scsi *scsi;
930	struct	ccb_trans_settings_spi *spi;
931	struct	ahc_initiator_tinfo *targ_info;
932	struct	ahc_tmode_tstate *tstate;
933	struct	ahc_transinfo *tinfo;
934
935	scsi = &cts->proto_specific.scsi;
936	spi = &cts->xport_specific.spi;
937	ahc_compile_devinfo(&devinfo, our_id,
938			    cts->ccb_h.target_id,
939			    cts->ccb_h.target_lun,
940			    channel, ROLE_UNKNOWN);
941	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
942					devinfo.our_scsiid,
943					devinfo.target, &tstate);
944
945	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
946		tinfo = &targ_info->curr;
947	else
948		tinfo = &targ_info->user;
949
950	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
951	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
952	if (cts->type == CTS_TYPE_USER_SETTINGS) {
953		if ((ahc->user_discenable & devinfo.target_mask) != 0)
954			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
955
956		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
957			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
958	} else {
959		if ((tstate->discenable & devinfo.target_mask) != 0)
960			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
961
962		if ((tstate->tagenable & devinfo.target_mask) != 0)
963			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
964	}
965	cts->protocol_version = tinfo->protocol_version;
966	cts->transport_version = tinfo->transport_version;
967
968	spi->sync_period = tinfo->period;
969	spi->sync_offset = tinfo->offset;
970	spi->bus_width = tinfo->width;
971	spi->ppr_options = tinfo->ppr_options;
972
973	cts->protocol = PROTO_SCSI;
974	cts->transport = XPORT_SPI;
975	spi->valid = CTS_SPI_VALID_SYNC_RATE
976		   | CTS_SPI_VALID_SYNC_OFFSET
977		   | CTS_SPI_VALID_BUS_WIDTH
978		   | CTS_SPI_VALID_PPR_OPTIONS;
979
980	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
981		scsi->valid = CTS_SCSI_VALID_TQ;
982		spi->valid |= CTS_SPI_VALID_DISC;
983	} else {
984		scsi->valid = 0;
985	}
986
987	cts->ccb_h.status = CAM_REQ_CMP;
988#else
989	struct	ahc_devinfo devinfo;
990	struct	ahc_initiator_tinfo *targ_info;
991	struct	ahc_tmode_tstate *tstate;
992	struct	ahc_transinfo *tinfo;
993
994	ahc_compile_devinfo(&devinfo, our_id,
995			    cts->ccb_h.target_id,
996			    cts->ccb_h.target_lun,
997			    channel, ROLE_UNKNOWN);
998	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
999					devinfo.our_scsiid,
1000					devinfo.target, &tstate);
1001
1002	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
1003		tinfo = &targ_info->curr;
1004	else
1005		tinfo = &targ_info->user;
1006
1007	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1008	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
1009		if ((ahc->user_discenable & devinfo.target_mask) != 0)
1010			cts->flags |= CCB_TRANS_DISC_ENB;
1011
1012		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
1013			cts->flags |= CCB_TRANS_TAG_ENB;
1014	} else {
1015		if ((tstate->discenable & devinfo.target_mask) != 0)
1016			cts->flags |= CCB_TRANS_DISC_ENB;
1017
1018		if ((tstate->tagenable & devinfo.target_mask) != 0)
1019			cts->flags |= CCB_TRANS_TAG_ENB;
1020	}
1021	cts->sync_period = tinfo->period;
1022	cts->sync_offset = tinfo->offset;
1023	cts->bus_width = tinfo->width;
1024
1025	cts->valid = CCB_TRANS_SYNC_RATE_VALID
1026		   | CCB_TRANS_SYNC_OFFSET_VALID
1027		   | CCB_TRANS_BUS_WIDTH_VALID;
1028
1029	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
1030		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
1031
1032	cts->ccb_h.status = CAM_REQ_CMP;
1033#endif
1034}
1035
1036static void
1037ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1038{
1039	struct ahc_softc *ahc;
1040	struct cam_sim *sim;
1041
1042	sim = (struct cam_sim *)callback_arg;
1043	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1044	switch (code) {
1045	case AC_LOST_DEVICE:
1046	{
1047		struct	ahc_devinfo devinfo;
1048		long	s;
1049
1050		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1051				    xpt_path_target_id(path),
1052				    xpt_path_lun_id(path),
1053				    SIM_CHANNEL(ahc, sim),
1054				    ROLE_UNKNOWN);
1055
1056		/*
1057		 * Revert to async/narrow transfers
1058		 * for the next device.
1059		 */
1060		ahc_lock(ahc, &s);
1061		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1062			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1063		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1064				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1065				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1066				 /*paused*/FALSE);
1067		ahc_unlock(ahc, &s);
1068		break;
1069	}
1070	default:
1071		break;
1072	}
1073}
1074
1075static void
1076ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1077		int error)
1078{
1079	struct	scb *scb;
1080	union	ccb *ccb;
1081	struct	ahc_softc *ahc;
1082	struct	ahc_initiator_tinfo *tinfo;
1083	struct	ahc_tmode_tstate *tstate;
1084	u_int	mask;
1085	long	s;
1086
1087	scb = (struct scb *)arg;
1088	ccb = scb->io_ctx;
1089	ahc = scb->ahc_softc;
1090
1091	if (error != 0) {
1092		if (error == EFBIG)
1093			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1094		else
1095			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1096		if (nsegments != 0)
1097			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1098		ahc_lock(ahc, &s);
1099		ahc_free_scb(ahc, scb);
1100		ahc_unlock(ahc, &s);
1101		xpt_done(ccb);
1102		return;
1103	}
1104	if (nsegments != 0) {
1105		struct	  ahc_dma_seg *sg;
1106		bus_dma_segment_t *end_seg;
1107		bus_dmasync_op_t op;
1108
1109		end_seg = dm_segs + nsegments;
1110
1111		/* Copy the segments into our SG list */
1112		sg = scb->sg_list;
1113		while (dm_segs < end_seg) {
1114			uint32_t len;
1115
1116			sg->addr = ahc_htole32(dm_segs->ds_addr);
1117			len = dm_segs->ds_len
1118			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
1119			sg->len = ahc_htole32(len);
1120			sg++;
1121			dm_segs++;
1122		}
1123
1124		/*
1125		 * Note where to find the SG entries in bus space.
1126		 * We also set the full residual flag which the
1127		 * sequencer will clear as soon as a data transfer
1128		 * occurs.
1129		 */
1130		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
1131
1132		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1133			op = BUS_DMASYNC_PREREAD;
1134		else
1135			op = BUS_DMASYNC_PREWRITE;
1136
1137		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1138
1139		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1140			struct target_data *tdata;
1141
1142			tdata = &scb->hscb->shared_data.tdata;
1143			tdata->target_phases |= DPHASE_PENDING;
1144			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1145				tdata->data_phase = P_DATAOUT;
1146			else
1147				tdata->data_phase = P_DATAIN;
1148
1149			/*
1150			 * If the transfer is of an odd length and in the
1151			 * "in" direction (scsi->HostBus), then it may
1152			 * trigger a bug in the 'WideODD' feature of
1153			 * non-Ultra2 chips.  Force the total data-length
1154			 * to be even by adding an extra, 1 byte, SG,
1155			 * element.  We do this even if we are not currently
1156			 * negotiated wide as negotiation could occur before
1157			 * this command is executed.
1158			 */
1159			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1160			 && (ccb->csio.dxfer_len & 0x1) != 0
1161			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1162
1163				nsegments++;
1164				if (nsegments > AHC_NSEG) {
1165
1166					ahc_set_transaction_status(scb,
1167					    CAM_REQ_TOO_BIG);
1168					bus_dmamap_unload(ahc->buffer_dmat,
1169							  scb->dmamap);
1170					ahc_lock(ahc, &s);
1171					ahc_free_scb(ahc, scb);
1172					ahc_unlock(ahc, &s);
1173					xpt_done(ccb);
1174					return;
1175				}
1176				sg->addr = ahc_htole32(ahc->dma_bug_buf);
1177				sg->len = ahc_htole32(1);
1178				sg++;
1179			}
1180		}
1181		sg--;
1182		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
1183
1184		/* Copy the first SG into the "current" data pointer area */
1185		scb->hscb->dataptr = scb->sg_list->addr;
1186		scb->hscb->datacnt = scb->sg_list->len;
1187	} else {
1188		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
1189		scb->hscb->dataptr = 0;
1190		scb->hscb->datacnt = 0;
1191	}
1192
1193	scb->sg_count = nsegments;
1194
1195	ahc_lock(ahc, &s);
1196
1197	/*
1198	 * Last time we need to check if this SCB needs to
1199	 * be aborted.
1200	 */
1201	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1202		if (nsegments != 0)
1203			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1204		ahc_free_scb(ahc, scb);
1205		ahc_unlock(ahc, &s);
1206		xpt_done(ccb);
1207		return;
1208	}
1209
1210	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1211				    SCSIID_OUR_ID(scb->hscb->scsiid),
1212				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1213				    &tstate);
1214
1215	mask = SCB_GET_TARGET_MASK(ahc, scb);
1216	scb->hscb->scsirate = tinfo->scsirate;
1217	scb->hscb->scsioffset = tinfo->curr.offset;
1218	if ((tstate->ultraenb & mask) != 0)
1219		scb->hscb->control |= ULTRAENB;
1220
1221	if ((tstate->discenable & mask) != 0
1222	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1223		scb->hscb->control |= DISCENB;
1224
1225	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1226	 && (tinfo->goal.width != 0
1227	  || tinfo->goal.offset != 0
1228	  || tinfo->goal.ppr_options != 0)) {
1229		scb->flags |= SCB_NEGOTIATE;
1230		scb->hscb->control |= MK_MESSAGE;
1231	} else if ((tstate->auto_negotiate & mask) != 0) {
1232		scb->flags |= SCB_AUTO_NEGOTIATE;
1233		scb->hscb->control |= MK_MESSAGE;
1234	}
1235
1236	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1237
1238	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1239
1240	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1241		uint64_t time;
1242
1243		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1244			ccb->ccb_h.timeout = 5 * 1000;
1245
1246		time = ccb->ccb_h.timeout;
1247		time *= hz;
1248		time /= 1000;
1249		ccb->ccb_h.timeout_ch =
1250		    timeout(ahc_timeout, (caddr_t)scb, time);
1251	}
1252
1253	/*
1254	 * We only allow one untagged transaction
1255	 * per target in the initiator role unless
1256	 * we are storing a full busy target *lun*
1257	 * table in SCB space.
1258	 */
1259	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1260	 && (ahc->flags & AHC_SCB_BTT) == 0) {
1261		struct scb_tailq *untagged_q;
1262		int target_offset;
1263
1264		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1265		untagged_q = &(ahc->untagged_queues[target_offset]);
1266		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1267		scb->flags |= SCB_UNTAGGEDQ;
1268		if (TAILQ_FIRST(untagged_q) != scb) {
1269			ahc_unlock(ahc, &s);
1270			return;
1271		}
1272	}
1273	scb->flags |= SCB_ACTIVE;
1274
1275	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1276		/* Define a mapping from our tag to the SCB. */
1277		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
1278		ahc_pause(ahc);
1279		if ((ahc->flags & AHC_PAGESCBS) == 0)
1280			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1281		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
1282		ahc_unpause(ahc);
1283	} else {
1284		ahc_queue_scb(ahc, scb);
1285	}
1286
1287	ahc_unlock(ahc, &s);
1288}
1289
1290static void
1291ahc_poll(struct cam_sim *sim)
1292{
1293	struct ahc_softc *ahc;
1294
1295	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1296	ahc_intr(ahc);
1297}
1298
1299static void
1300ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1301	       struct ccb_scsiio *csio, struct scb *scb)
1302{
1303	struct hardware_scb *hscb;
1304	struct ccb_hdr *ccb_h;
1305
1306	hscb = scb->hscb;
1307	ccb_h = &csio->ccb_h;
1308
1309	csio->resid = 0;
1310	csio->sense_resid = 0;
1311	if (ccb_h->func_code == XPT_SCSI_IO) {
1312		hscb->cdb_len = csio->cdb_len;
1313		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1314
1315			if (hscb->cdb_len > sizeof(hscb->cdb32)
1316			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1317				u_long s;
1318
1319				ahc_set_transaction_status(scb,
1320							   CAM_REQ_INVALID);
1321				ahc_lock(ahc, &s);
1322				ahc_free_scb(ahc, scb);
1323				ahc_unlock(ahc, &s);
1324				xpt_done((union ccb *)csio);
1325				return;
1326			}
1327			if (hscb->cdb_len > 12) {
1328				memcpy(hscb->cdb32,
1329				       csio->cdb_io.cdb_ptr,
1330				       hscb->cdb_len);
1331				scb->flags |= SCB_CDB32_PTR;
1332			} else {
1333				memcpy(hscb->shared_data.cdb,
1334				       csio->cdb_io.cdb_ptr,
1335				       hscb->cdb_len);
1336			}
1337		} else {
1338			if (hscb->cdb_len > 12) {
1339				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1340				       hscb->cdb_len);
1341				scb->flags |= SCB_CDB32_PTR;
1342			} else {
1343				memcpy(hscb->shared_data.cdb,
1344				       csio->cdb_io.cdb_bytes,
1345				       hscb->cdb_len);
1346			}
1347		}
1348	}
1349
1350	/* Only use S/G if there is a transfer */
1351	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1352		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1353			/* We've been given a pointer to a single buffer */
1354			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1355				int s;
1356				int error;
1357
1358				s = splsoftvm();
1359				error = bus_dmamap_load(ahc->buffer_dmat,
1360							scb->dmamap,
1361							csio->data_ptr,
1362							csio->dxfer_len,
1363							ahc_execute_scb,
1364							scb, /*flags*/0);
1365				if (error == EINPROGRESS) {
1366					/*
1367					 * So as to maintain ordering,
1368					 * freeze the controller queue
1369					 * until our mapping is
1370					 * returned.
1371					 */
1372					xpt_freeze_simq(sim,
1373							/*count*/1);
1374					scb->io_ctx->ccb_h.status |=
1375					    CAM_RELEASE_SIMQ;
1376				}
1377				splx(s);
1378			} else {
1379				struct bus_dma_segment seg;
1380
1381				/* Pointer to physical buffer */
1382				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1383					panic("ahc_setup_data - Transfer size "
1384					      "larger than can device max");
1385
1386				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1387				seg.ds_len = csio->dxfer_len;
1388				ahc_execute_scb(scb, &seg, 1, 0);
1389			}
1390		} else {
1391			struct bus_dma_segment *segs;
1392
1393			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1394				panic("ahc_setup_data - Physical segment "
1395				      "pointers unsupported");
1396
1397			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1398				panic("ahc_setup_data - Virtual segment "
1399				      "addresses unsupported");
1400
1401			/* Just use the segments provided */
1402			segs = (struct bus_dma_segment *)csio->data_ptr;
1403			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1404		}
1405	} else {
1406		ahc_execute_scb(scb, NULL, 0, 0);
1407	}
1408}
1409
1410static void
1411ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1412
1413	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1414		struct scb *list_scb;
1415
1416		scb->flags |= SCB_RECOVERY_SCB;
1417
1418		/*
1419		 * Take all queued, but not sent SCBs out of the equation.
1420		 * Also ensure that no new CCBs are queued to us while we
1421		 * try to fix this problem.
1422		 */
1423		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1424			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1425			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1426		}
1427
1428		/*
1429		 * Go through all of our pending SCBs and remove
1430		 * any scheduled timeouts for them.  We will reschedule
1431		 * them after we've successfully fixed this problem.
1432		 */
1433		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1434			union ccb *ccb;
1435
1436			ccb = list_scb->io_ctx;
1437			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1438		}
1439	}
1440}
1441
1442void
1443ahc_timeout(void *arg)
1444{
1445	struct	scb *scb;
1446	struct	ahc_softc *ahc;
1447	long	s;
1448	int	found;
1449	u_int	last_phase;
1450	int	target;
1451	int	lun;
1452	int	i;
1453	char	channel;
1454
1455	scb = (struct scb *)arg;
1456	ahc = (struct ahc_softc *)scb->ahc_softc;
1457
1458	ahc_lock(ahc, &s);
1459
1460	ahc_pause_and_flushwork(ahc);
1461
1462	if ((scb->flags & SCB_ACTIVE) == 0) {
1463		/* Previous timeout took care of me already */
1464		printf("%s: Timedout SCB already complete. "
1465		       "Interrupts may not be functioning.\n", ahc_name(ahc));
1466		ahc_unpause(ahc);
1467		ahc_unlock(ahc, &s);
1468		return;
1469	}
1470
1471	target = SCB_GET_TARGET(ahc, scb);
1472	channel = SCB_GET_CHANNEL(ahc, scb);
1473	lun = SCB_GET_LUN(scb);
1474
1475	ahc_print_path(ahc, scb);
1476	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
1477	ahc_dump_card_state(ahc);
1478	last_phase = ahc_inb(ahc, LASTPHASE);
1479	if (scb->sg_count > 0) {
1480		for (i = 0; i < scb->sg_count; i++) {
1481			printf("sg[%d] - Addr 0x%x : Length %d\n",
1482			       i,
1483			       scb->sg_list[i].addr,
1484			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
1485		}
1486	}
1487	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1488		/*
1489		 * Been down this road before.
1490		 * Do a full bus reset.
1491		 */
1492bus_reset:
1493		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1494		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1495		printf("%s: Issued Channel %c Bus Reset. "
1496		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1497	} else {
1498		/*
1499		 * If we are a target, transition to bus free and report
1500		 * the timeout.
1501		 *
1502		 * The target/initiator that is holding up the bus may not
1503		 * be the same as the one that triggered this timeout
1504		 * (different commands have different timeout lengths).
1505		 * If the bus is idle and we are actiing as the initiator
1506		 * for this request, queue a BDR message to the timed out
1507		 * target.  Otherwise, if the timed out transaction is
1508		 * active:
1509		 *   Initiator transaction:
1510		 *	Stuff the message buffer with a BDR message and assert
1511		 *	ATN in the hopes that the target will let go of the bus
1512		 *	and go to the mesgout phase.  If this fails, we'll
1513		 *	get another timeout 2 seconds later which will attempt
1514		 *	a bus reset.
1515		 *
1516		 *   Target transaction:
1517		 *	Transition to BUS FREE and report the error.
1518		 *	It's good to be the target!
1519		 */
1520		u_int active_scb_index;
1521		u_int saved_scbptr;
1522
1523		saved_scbptr = ahc_inb(ahc, SCBPTR);
1524		active_scb_index = ahc_inb(ahc, SCB_TAG);
1525
1526		if ((ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0
1527		  && (active_scb_index < ahc->scb_data->numscbs)) {
1528			struct scb *active_scb;
1529
1530			/*
1531			 * If the active SCB is not us, assume that
1532			 * the active SCB has a longer timeout than
1533			 * the timedout SCB, and wait for the active
1534			 * SCB to timeout.
1535			 */
1536			active_scb = ahc_lookup_scb(ahc, active_scb_index);
1537			if (active_scb != scb) {
1538				struct	 ccb_hdr *ccbh;
1539				uint64_t newtimeout;
1540
1541				ahc_print_path(ahc, scb);
1542				printf("Other SCB Timeout%s",
1543			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1544				       ? " again\n" : "\n");
1545				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1546				newtimeout =
1547				    MAX(active_scb->io_ctx->ccb_h.timeout,
1548					scb->io_ctx->ccb_h.timeout);
1549				newtimeout *= hz;
1550				newtimeout /= 1000;
1551				ccbh = &scb->io_ctx->ccb_h;
1552				scb->io_ctx->ccb_h.timeout_ch =
1553				    timeout(ahc_timeout, scb, newtimeout);
1554				ahc_unpause(ahc);
1555				ahc_unlock(ahc, &s);
1556				return;
1557			}
1558
1559			/* It's us */
1560			if ((scb->hscb->control & TARGET_SCB) != 0) {
1561
1562				/*
1563				 * Send back any queued up transactions
1564				 * and properly record the error condition.
1565				 */
1566				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
1567					       SCB_GET_CHANNEL(ahc, scb),
1568					       SCB_GET_LUN(scb),
1569					       scb->hscb->tag,
1570					       ROLE_TARGET,
1571					       CAM_CMD_TIMEOUT);
1572
1573				/* Will clear us from the bus */
1574				ahc_restart(ahc);
1575				ahc_unlock(ahc, &s);
1576				return;
1577			}
1578
1579			ahc_set_recoveryscb(ahc, active_scb);
1580			ahc_outb(ahc, MSG_OUT, HOST_MSG);
1581			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1582			ahc_print_path(ahc, active_scb);
1583			printf("BDR message in message buffer\n");
1584			active_scb->flags |= SCB_DEVICE_RESET;
1585			active_scb->io_ctx->ccb_h.timeout_ch =
1586			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1587			ahc_unpause(ahc);
1588		} else {
1589			int	 disconnected;
1590
1591			/* XXX Shouldn't panic.  Just punt instead? */
1592			if ((scb->hscb->control & TARGET_SCB) != 0)
1593				panic("Timed-out target SCB but bus idle");
1594
1595			if (last_phase != P_BUSFREE
1596			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1597				/* XXX What happened to the SCB? */
1598				/* Hung target selection.  Goto busfree */
1599				printf("%s: Hung target selection\n",
1600				       ahc_name(ahc));
1601				ahc_restart(ahc);
1602				ahc_unlock(ahc, &s);
1603				return;
1604			}
1605
1606			if (ahc_search_qinfifo(ahc, target, channel, lun,
1607					       scb->hscb->tag, ROLE_INITIATOR,
1608					       /*status*/0, SEARCH_COUNT) > 0) {
1609				disconnected = FALSE;
1610			} else {
1611				disconnected = TRUE;
1612			}
1613
1614			if (disconnected) {
1615
1616				ahc_set_recoveryscb(ahc, scb);
1617				/*
1618				 * Actually re-queue this SCB in an attempt
1619				 * to select the device before it reconnects.
1620				 * In either case (selection or reselection),
1621				 * we will now issue a target reset to the
1622				 * timed-out device.
1623				 *
1624				 * Set the MK_MESSAGE control bit indicating
1625				 * that we desire to send a message.  We
1626				 * also set the disconnected flag since
1627				 * in the paging case there is no guarantee
1628				 * that our SCB control byte matches the
1629				 * version on the card.  We don't want the
1630				 * sequencer to abort the command thinking
1631				 * an unsolicited reselection occurred.
1632				 */
1633				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1634				scb->flags |= SCB_DEVICE_RESET;
1635
1636				/*
1637				 * Remove any cached copy of this SCB in the
1638				 * disconnected list in preparation for the
1639				 * queuing of our abort SCB.  We use the
1640				 * same element in the SCB, SCB_NEXT, for
1641				 * both the qinfifo and the disconnected list.
1642				 */
1643				ahc_search_disc_list(ahc, target, channel,
1644						     lun, scb->hscb->tag,
1645						     /*stop_on_first*/TRUE,
1646						     /*remove*/TRUE,
1647						     /*save_state*/FALSE);
1648
1649				/*
1650				 * In the non-paging case, the sequencer will
1651				 * never re-reference the in-core SCB.
1652				 * To make sure we are notified during
1653				 * reslection, set the MK_MESSAGE flag in
1654				 * the card's copy of the SCB.
1655				 */
1656				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1657					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1658					ahc_outb(ahc, SCB_CONTROL,
1659						 ahc_inb(ahc, SCB_CONTROL)
1660						| MK_MESSAGE);
1661				}
1662
1663				/*
1664				 * Clear out any entries in the QINFIFO first
1665				 * so we are the next SCB for this target
1666				 * to run.
1667				 */
1668				ahc_search_qinfifo(ahc,
1669						   SCB_GET_TARGET(ahc, scb),
1670						   channel, SCB_GET_LUN(scb),
1671						   SCB_LIST_NULL,
1672						   ROLE_INITIATOR,
1673						   CAM_REQUEUE_REQ,
1674						   SEARCH_COMPLETE);
1675				ahc_print_path(ahc, scb);
1676				printf("Queuing a BDR SCB\n");
1677				ahc_qinfifo_requeue_tail(ahc, scb);
1678				ahc_outb(ahc, SCBPTR, saved_scbptr);
1679				scb->io_ctx->ccb_h.timeout_ch =
1680				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1681				ahc_unpause(ahc);
1682			} else {
1683				/* Go "immediatly" to the bus reset */
1684				/* This shouldn't happen */
1685				ahc_set_recoveryscb(ahc, scb);
1686				ahc_print_path(ahc, scb);
1687				printf("SCB %d: Immediate reset.  "
1688					"Flags = 0x%x\n", scb->hscb->tag,
1689					scb->flags);
1690				goto bus_reset;
1691			}
1692		}
1693	}
1694	ahc_unlock(ahc, &s);
1695}
1696
1697static void
1698ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1699{
1700	union ccb *abort_ccb;
1701
1702	abort_ccb = ccb->cab.abort_ccb;
1703	switch (abort_ccb->ccb_h.func_code) {
1704	case XPT_ACCEPT_TARGET_IO:
1705	case XPT_IMMED_NOTIFY:
1706	case XPT_CONT_TARGET_IO:
1707	{
1708		struct ahc_tmode_tstate *tstate;
1709		struct ahc_tmode_lstate *lstate;
1710		struct ccb_hdr_slist *list;
1711		cam_status status;
1712
1713		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1714					     &lstate, TRUE);
1715
1716		if (status != CAM_REQ_CMP) {
1717			ccb->ccb_h.status = status;
1718			break;
1719		}
1720
1721		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1722			list = &lstate->accept_tios;
1723		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1724			list = &lstate->immed_notifies;
1725		else
1726			list = NULL;
1727
1728		if (list != NULL) {
1729			struct ccb_hdr *curelm;
1730			int found;
1731
1732			curelm = SLIST_FIRST(list);
1733			found = 0;
1734			if (curelm == &abort_ccb->ccb_h) {
1735				found = 1;
1736				SLIST_REMOVE_HEAD(list, sim_links.sle);
1737			} else {
1738				while(curelm != NULL) {
1739					struct ccb_hdr *nextelm;
1740
1741					nextelm =
1742					    SLIST_NEXT(curelm, sim_links.sle);
1743
1744					if (nextelm == &abort_ccb->ccb_h) {
1745						found = 1;
1746						SLIST_NEXT(curelm,
1747							   sim_links.sle) =
1748						    SLIST_NEXT(nextelm,
1749							       sim_links.sle);
1750						break;
1751					}
1752					curelm = nextelm;
1753				}
1754			}
1755
1756			if (found) {
1757				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1758				xpt_done(abort_ccb);
1759				ccb->ccb_h.status = CAM_REQ_CMP;
1760			} else {
1761				xpt_print_path(abort_ccb->ccb_h.path);
1762				printf("Not found\n");
1763				ccb->ccb_h.status = CAM_PATH_INVALID;
1764			}
1765			break;
1766		}
1767		/* FALLTHROUGH */
1768	}
1769	case XPT_SCSI_IO:
1770		/* XXX Fully implement the hard ones */
1771		ccb->ccb_h.status = CAM_UA_ABORT;
1772		break;
1773	default:
1774		ccb->ccb_h.status = CAM_REQ_INVALID;
1775		break;
1776	}
1777	xpt_done(ccb);
1778}
1779
1780void
1781ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1782		u_int lun, ac_code code, void *opt_arg)
1783{
1784	struct	ccb_trans_settings cts;
1785	struct cam_path *path;
1786	void *arg;
1787	int error;
1788
1789	arg = NULL;
1790	error = ahc_create_path(ahc, channel, target, lun, &path);
1791
1792	if (error != CAM_REQ_CMP)
1793		return;
1794
1795	switch (code) {
1796	case AC_TRANSFER_NEG:
1797	{
1798#ifdef AHC_NEW_TRAN_SETTINGS
1799		struct	ccb_trans_settings_scsi *scsi;
1800
1801		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1802		scsi = &cts.proto_specific.scsi;
1803#else
1804		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1805#endif
1806		cts.ccb_h.path = path;
1807		cts.ccb_h.target_id = target;
1808		cts.ccb_h.target_lun = lun;
1809		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1810							  : ahc->our_id_b,
1811				      channel, &cts);
1812		arg = &cts;
1813#ifdef AHC_NEW_TRAN_SETTINGS
1814		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1815		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1816#else
1817		cts.valid &= ~CCB_TRANS_TQ_VALID;
1818		cts.flags &= ~CCB_TRANS_TAG_ENB;
1819#endif
1820		if (opt_arg == NULL)
1821			break;
1822		if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED)
1823#ifdef AHC_NEW_TRAN_SETTINGS
1824			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1825		scsi->valid |= CTS_SCSI_VALID_TQ;
1826#else
1827			cts.flags |= CCB_TRANS_TAG_ENB;
1828		cts.valid |= CCB_TRANS_TQ_VALID;
1829#endif
1830		break;
1831	}
1832	case AC_SENT_BDR:
1833	case AC_BUS_RESET:
1834		break;
1835	default:
1836		panic("ahc_send_async: Unexpected async event");
1837	}
1838	xpt_async(code, path, arg);
1839	xpt_free_path(path);
1840}
1841
1842void
1843ahc_platform_set_tags(struct ahc_softc *ahc,
1844		      struct ahc_devinfo *devinfo, int enable)
1845{
1846}
1847
1848int
1849ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1850{
1851	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1852	    M_NOWAIT | M_ZERO);
1853	if (ahc->platform_data == NULL)
1854		return (ENOMEM);
1855	return (0);
1856}
1857
1858void
1859ahc_platform_free(struct ahc_softc *ahc)
1860{
1861	struct ahc_platform_data *pdata;
1862
1863	pdata = ahc->platform_data;
1864	if (pdata != NULL) {
1865		if (pdata->regs != NULL)
1866			bus_release_resource(ahc->dev_softc,
1867					     pdata->regs_res_type,
1868					     pdata->regs_res_id,
1869					     pdata->regs);
1870
1871		if (pdata->irq != NULL)
1872			bus_release_resource(ahc->dev_softc,
1873					     pdata->irq_res_type,
1874					     0, pdata->irq);
1875
1876		if (pdata->sim_b != NULL) {
1877			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1878			xpt_free_path(pdata->path_b);
1879			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1880			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1881		}
1882		if (pdata->sim != NULL) {
1883			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1884			xpt_free_path(pdata->path);
1885			xpt_bus_deregister(cam_sim_path(pdata->sim));
1886			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1887		}
1888		if (pdata->eh != NULL)
1889			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1890		free(ahc->platform_data, M_DEVBUF);
1891	}
1892}
1893
1894int
1895ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1896{
1897	/* We don't sort softcs under FreeBSD so report equal always */
1898	return (0);
1899}
1900
1901int
1902ahc_detach(device_t dev)
1903{
1904	struct ahc_softc *ahc;
1905	u_long l;
1906	u_long s;
1907
1908	ahc_list_lock(&l);
1909	device_printf(dev, "detaching device\n");
1910	ahc = device_get_softc(dev);
1911	ahc = ahc_find_softc(ahc);
1912	if (ahc == NULL) {
1913		device_printf(dev, "aic7xxx already detached\n");
1914		ahc_list_unlock(&l);
1915		return (ENOENT);
1916	}
1917	ahc_lock(ahc, &s);
1918	ahc_intr_enable(ahc, FALSE);
1919	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1920	ahc_unlock(ahc, &s);
1921	ahc_free(ahc);
1922	ahc_list_unlock(&l);
1923	return (0);
1924}
1925
1926#if UNUSED
1927static void
1928ahc_dump_targcmd(struct target_cmd *cmd)
1929{
1930	uint8_t *byte;
1931	uint8_t *last_byte;
1932	int i;
1933
1934	byte = &cmd->initiator_channel;
1935	/* Debugging info for received commands */
1936	last_byte = &cmd[1].initiator_channel;
1937
1938	i = 0;
1939	while (byte < last_byte) {
1940		if (i == 0)
1941			printf("\t");
1942		printf("%#x", *byte++);
1943		i++;
1944		if (i == 8) {
1945			printf("\n");
1946			i = 0;
1947		} else {
1948			printf(", ");
1949		}
1950	}
1951}
1952#endif
1953
1954static int
1955ahc_modevent(module_t mod, int type, void *data)
1956{
1957	/* XXX Deal with busy status on unload. */
1958	return 0;
1959}
1960
1961static moduledata_t ahc_mod = {
1962	"ahc",
1963	ahc_modevent,
1964	NULL
1965};
1966
1967DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1968MODULE_DEPEND(ahc, cam, 1, 1, 1);
1969MODULE_VERSION(ahc, 1);
1970