aic7xxx_osm.c revision 70807
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 70807 2001-01-09 00:40:38Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#ifndef AHC_TMODE_ENABLE
40#define AHC_TMODE_ENABLE 0
41#endif
42
43#define ccb_scb_ptr spriv_ptr0
44
45#ifdef AHC_DEBUG
46static int     ahc_debug = AHC_DEBUG;
47#endif
48
49#if UNUSED
50static void	ahc_dump_targcmd(struct target_cmd *cmd);
51#endif
52static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
53static void	ahc_get_tran_settings(struct ahc_softc *ahc,
54				      int our_id, char channel,
55				      struct ccb_trans_settings *cts);
56static void	ahc_async(void *callback_arg, uint32_t code,
57			  struct cam_path *path, void *arg);
58static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59				int nsegments, int error);
60static void	ahc_poll(struct cam_sim *sim);
61static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
62			       struct ccb_scsiio *csio, struct scb *scb);
63static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
64			      union ccb *ccb);
65static int	ahc_create_path(struct ahc_softc *ahc,
66				char channel, u_int target, u_int lun,
67				struct cam_path **path);
68
69static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
70
71static int
72ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
73	        u_int lun, struct cam_path **path)
74{
75	path_id_t path_id;
76
77	if (channel == 'B')
78		path_id = cam_sim_path(ahc->platform_data->sim_b);
79	else
80		path_id = cam_sim_path(ahc->platform_data->sim);
81
82	return (xpt_create_path(path, /*periph*/NULL,
83				path_id, target, lun));
84}
85
86/*
87 * Attach all the sub-devices we can find
88 */
89int
90ahc_attach(struct ahc_softc *ahc)
91{
92	char   ahc_info[256];
93	struct ccb_setasync csa;
94	struct cam_devq *devq;
95	int bus_id;
96	int bus_id2;
97	struct cam_sim *sim;
98	struct cam_sim *sim2;
99	struct cam_path *path;
100	struct cam_path *path2;
101	long s;
102	int count;
103	int error;
104
105	count = 0;
106	sim = NULL;
107	sim2 = NULL;
108
109	ahc_controller_info(ahc, ahc_info);
110	printf("%s\n", ahc_info);
111	ahc_lock(ahc, &s);
112	/* Hook up our interrupt handler */
113	if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
114				    INTR_TYPE_CAM, ahc_platform_intr, ahc,
115				    &ahc->platform_data->ih)) != 0) {
116		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
117			      error);
118		goto fail;
119	}
120
121	/*
122	 * Attach secondary channel first if the user has
123	 * declared it the primary channel.
124	 */
125	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
126		bus_id = 1;
127		bus_id2 = 0;
128	} else {
129		bus_id = 0;
130		bus_id2 = 1;
131	}
132
133	/*
134	 * Create the device queue for our SIM(s).
135	 */
136	devq = cam_simq_alloc(AHC_SCB_MAX - 1);
137	if (devq == NULL)
138		goto fail;
139
140	/*
141	 * Construct our first channel SIM entry
142	 */
143	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
144			    device_get_unit(ahc->dev_softc),
145			    1, AHC_SCB_MAX - 1, devq);
146	if (sim == NULL) {
147		cam_simq_free(devq);
148		goto fail;
149	}
150
151	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
152		cam_sim_free(sim, /*free_devq*/TRUE);
153		sim = NULL;
154		goto fail;
155	}
156
157	if (xpt_create_path(&path, /*periph*/NULL,
158			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
159			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
160		xpt_bus_deregister(cam_sim_path(sim));
161		cam_sim_free(sim, /*free_devq*/TRUE);
162		sim = NULL;
163		goto fail;
164	}
165
166	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
167	csa.ccb_h.func_code = XPT_SASYNC_CB;
168	csa.event_enable = AC_LOST_DEVICE;
169	csa.callback = ahc_async;
170	csa.callback_arg = sim;
171	xpt_action((union ccb *)&csa);
172	count++;
173
174	if (ahc->features & AHC_TWIN) {
175		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
176				    ahc, device_get_unit(ahc->dev_softc), 1,
177				    AHC_SCB_MAX - 1, devq);
178
179		if (sim2 == NULL) {
180			printf("ahc_attach: Unable to attach second "
181			       "bus due to resource shortage");
182			goto fail;
183		}
184
185		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
186			printf("ahc_attach: Unable to attach second "
187			       "bus due to resource shortage");
188			/*
189			 * We do not want to destroy the device queue
190			 * because the first bus is using it.
191			 */
192			cam_sim_free(sim2, /*free_devq*/FALSE);
193			goto fail;
194		}
195
196		if (xpt_create_path(&path2, /*periph*/NULL,
197				    cam_sim_path(sim2),
198				    CAM_TARGET_WILDCARD,
199				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
200			xpt_bus_deregister(cam_sim_path(sim2));
201			cam_sim_free(sim2, /*free_devq*/FALSE);
202			sim2 = NULL;
203			goto fail;
204		}
205		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
206		csa.ccb_h.func_code = XPT_SASYNC_CB;
207		csa.event_enable = AC_LOST_DEVICE;
208		csa.callback = ahc_async;
209		csa.callback_arg = sim2;
210		xpt_action((union ccb *)&csa);
211		count++;
212	}
213
214fail:
215	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
216		ahc->platform_data->sim_b = sim;
217		ahc->platform_data->path_b = path;
218		ahc->platform_data->sim = sim2;
219		ahc->platform_data->path = path2;
220	} else {
221		ahc->platform_data->sim = sim;
222		ahc->platform_data->path = path;
223		ahc->platform_data->sim_b = sim2;
224		ahc->platform_data->path_b = path2;
225	}
226	ahc_unlock(ahc, &s);
227
228	if (count != 0)
229		/* We have to wait until after any system dumps... */
230		ahc->platform_data->eh =
231		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
232					  ahc, SHUTDOWN_PRI_DEFAULT);
233
234	return (count);
235}
236
237/*
238 * Catch an interrupt from the adapter
239 */
240void
241ahc_platform_intr(void *arg)
242{
243	struct	ahc_softc *ahc;
244
245	ahc = (struct ahc_softc *)arg;
246	ahc_intr(ahc);
247}
248
249/*
250 * We have an scb which has been processed by the
251 * adaptor, now we look to see how the operation
252 * went.
253 */
254void
255ahc_done(struct ahc_softc *ahc, struct scb *scb)
256{
257	union ccb *ccb;
258
259	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
260		  ("ahc_done - scb %d\n", scb->hscb->tag));
261
262	ccb = scb->io_ctx;
263	LIST_REMOVE(scb, pending_links);
264	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
265		struct scb_tailq *untagged_q;
266
267		untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
268		TAILQ_REMOVE(untagged_q, scb, links.tqe);
269		scb->flags &= ~SCB_UNTAGGEDQ;
270		ahc_run_untagged_queue(ahc, untagged_q);
271	}
272
273	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
274
275	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
276		bus_dmasync_op_t op;
277
278		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
279			op = BUS_DMASYNC_POSTREAD;
280		else
281			op = BUS_DMASYNC_POSTWRITE;
282		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
283		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
284	}
285
286	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
287		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
288			ccb->ccb_h.status |= CAM_REQ_CMP;
289		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
290		ahc_free_scb(ahc, scb);
291		xpt_done(ccb);
292		return;
293	}
294
295	/*
296	 * If the recovery SCB completes, we have to be
297	 * out of our timeout.
298	 */
299	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
300		struct	scb *list_scb;
301
302		/*
303		 * We were able to complete the command successfully,
304		 * so reinstate the timeouts for all other pending
305		 * commands.
306		 */
307		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
308			union ccb *ccb;
309
310			ccb = list_scb->io_ctx;
311			ccb->ccb_h.timeout_ch =
312			    timeout(ahc_timeout, list_scb,
313				    (ccb->ccb_h.timeout * hz)/1000);
314		}
315
316		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
317		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
318			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
319		ahc_print_path(ahc, scb);
320		printf("no longer in timeout, status = %x\n",
321		       ccb->ccb_h.status);
322	}
323
324	/* Don't clobber any existing error state */
325	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
326		ccb->ccb_h.status |= CAM_REQ_CMP;
327	} else if ((scb->flags & SCB_SENSE) != 0) {
328		/*
329		 * We performed autosense retrieval.
330		 *
331		 * Zero any sense not transferred by the
332		 * device.  The SCSI spec mandates that any
333		 * untransfered data should be assumed to be
334		 * zero.  Complete the 'bounce' of sense information
335		 * through buffers accessible via bus-space by
336		 * copying it into the clients csio.
337		 */
338		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
339		memcpy(&ccb->csio.sense_data,
340		       ahc_get_sense_buf(ahc, scb),
341		       (scb->sg_list->len & AHC_SG_LEN_MASK)
342		       - ccb->csio.sense_resid);
343		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
344	}
345	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
346	ahc_free_scb(ahc, scb);
347	xpt_done(ccb);
348}
349
350static void
351ahc_action(struct cam_sim *sim, union ccb *ccb)
352{
353	struct	ahc_softc *ahc;
354	struct	tmode_lstate *lstate;
355	u_int	target_id;
356	u_int	our_id;
357	long	s;
358
359	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
360
361	ahc = (struct ahc_softc *)cam_sim_softc(sim);
362
363	target_id = ccb->ccb_h.target_id;
364	our_id = SIM_SCSI_ID(ahc, sim);
365
366	switch (ccb->ccb_h.func_code) {
367	/* Common cases first */
368	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
369	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
370	{
371		struct	   tmode_tstate *tstate;
372		cam_status status;
373
374		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
375					     &lstate, TRUE);
376
377		if (status != CAM_REQ_CMP) {
378			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
379				/* Response from the black hole device */
380				tstate = NULL;
381				lstate = ahc->black_hole;
382			} else {
383				ccb->ccb_h.status = status;
384				xpt_done(ccb);
385				break;
386			}
387		}
388		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
389
390			ahc_lock(ahc, &s);
391			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
392					  sim_links.sle);
393			ccb->ccb_h.status = CAM_REQ_INPROG;
394			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
395				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
396			ahc_unlock(ahc, &s);
397			break;
398		}
399
400		/*
401		 * The target_id represents the target we attempt to
402		 * select.  In target mode, this is the initiator of
403		 * the original command.
404		 */
405		our_id = target_id;
406		target_id = ccb->csio.init_id;
407		/* FALLTHROUGH */
408	}
409	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
410	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
411	{
412		struct	scb *scb;
413		struct	hardware_scb *hscb;
414
415		if ((ahc->flags & AHC_INITIATORROLE) == 0
416		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
417		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
418			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
419			xpt_done(ccb);
420		}
421
422		/*
423		 * get an scb to use.
424		 */
425		ahc_lock(ahc, &s);
426		if ((scb = ahc_get_scb(ahc)) == NULL) {
427
428			xpt_freeze_simq(sim, /*count*/1);
429			ahc->flags |= AHC_RESOURCE_SHORTAGE;
430			ahc_unlock(ahc, &s);
431			ccb->ccb_h.status = CAM_REQUEUE_REQ;
432			xpt_done(ccb);
433			return;
434		}
435		ahc_unlock(ahc, &s);
436
437		hscb = scb->hscb;
438
439		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
440			  ("start scb(%p)\n", scb));
441		scb->io_ctx = ccb;
442		/*
443		 * So we can find the SCB when an abort is requested
444		 */
445		ccb->ccb_h.ccb_scb_ptr = scb;
446
447		/*
448		 * Put all the arguments for the xfer in the scb
449		 */
450		hscb->control = 0;
451		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
452		hscb->lun = ccb->ccb_h.target_lun;
453		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
454			hscb->cdb_len = 0;
455			scb->flags |= SCB_DEVICE_RESET;
456			hscb->control |= MK_MESSAGE;
457			ahc_execute_scb(scb, NULL, 0, 0);
458		} else {
459			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
460				struct target_data *tdata;
461
462				tdata = &hscb->shared_data.tdata;
463				if (ahc->pending_device == lstate) {
464					scb->flags |= SCB_TARGET_IMMEDIATE;
465					ahc->pending_device = NULL;
466				}
467				hscb->control |= TARGET_SCB;
468				tdata->target_phases = IDENTIFY_SEEN;
469				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
470					tdata->target_phases |= SPHASE_PENDING;
471					tdata->scsi_status =
472					    ccb->csio.scsi_status;
473				}
474				tdata->initiator_tag = ccb->csio.tag_id;
475			}
476			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
477				hscb->control |= ccb->csio.tag_action;
478
479			ahc_setup_data(ahc, sim, &ccb->csio, scb);
480		}
481		break;
482	}
483	case XPT_NOTIFY_ACK:
484	case XPT_IMMED_NOTIFY:
485	{
486		struct	   tmode_tstate *tstate;
487		struct	   tmode_lstate *lstate;
488		cam_status status;
489
490		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
491					     &lstate, TRUE);
492
493		if (status != CAM_REQ_CMP) {
494			ccb->ccb_h.status = status;
495			xpt_done(ccb);
496			break;
497		}
498		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
499				  sim_links.sle);
500		ccb->ccb_h.status = CAM_REQ_INPROG;
501		ahc_send_lstate_events(ahc, lstate);
502		break;
503	}
504	case XPT_EN_LUN:		/* Enable LUN as a target */
505		ahc_handle_en_lun(ahc, sim, ccb);
506		xpt_done(ccb);
507		break;
508	case XPT_ABORT:			/* Abort the specified CCB */
509	{
510		ahc_abort_ccb(ahc, sim, ccb);
511		break;
512	}
513	case XPT_SET_TRAN_SETTINGS:
514	{
515#ifdef AHC_NEW_TRAN_SETTINGS
516		struct	ahc_devinfo devinfo;
517		struct	ccb_trans_settings *cts;
518		struct	ccb_trans_settings_scsi *scsi;
519		struct	ccb_trans_settings_spi *spi;
520		struct	ahc_initiator_tinfo *tinfo;
521		struct	tmode_tstate *tstate;
522		uint16_t *discenable;
523		uint16_t *tagenable;
524		u_int	update_type;
525
526		cts = &ccb->cts;
527		scsi = &cts->proto_specific.scsi;
528		spi = &cts->xport_specific.spi;
529		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
530				    cts->ccb_h.target_id,
531				    cts->ccb_h.target_lun,
532				    SIM_CHANNEL(ahc, sim),
533				    ROLE_UNKNOWN);
534		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
535					    devinfo.our_scsiid,
536					    devinfo.target, &tstate);
537		update_type = 0;
538		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
539			update_type |= AHC_TRANS_GOAL;
540			discenable = &tstate->discenable;
541			tagenable = &tstate->tagenable;
542			tinfo->current.protocol_version =
543			    cts->protocol_version;
544			tinfo->current.transport_version =
545			    cts->transport_version;
546			tinfo->goal.protocol_version =
547			    cts->protocol_version;
548			tinfo->goal.transport_version =
549			    cts->transport_version;
550		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
551			update_type |= AHC_TRANS_USER;
552			discenable = &ahc->user_discenable;
553			tagenable = &ahc->user_tagenable;
554			tinfo->user.protocol_version =
555			    cts->protocol_version;
556			tinfo->user.transport_version =
557			    cts->transport_version;
558		} else {
559			ccb->ccb_h.status = CAM_REQ_INVALID;
560			xpt_done(ccb);
561			break;
562		}
563
564		ahc_lock(ahc, &s);
565
566		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
567			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
568				*discenable |= devinfo.target_mask;
569			else
570				*discenable &= ~devinfo.target_mask;
571		}
572
573		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
574			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
575				*tagenable |= devinfo.target_mask;
576			else
577				*tagenable &= ~devinfo.target_mask;
578		}
579
580		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
581			ahc_validate_width(ahc, /*tinfo limit*/NULL,
582					   &spi->bus_width, ROLE_UNKNOWN);
583			ahc_set_width(ahc, &devinfo, spi->bus_width,
584				      update_type, /*paused*/FALSE);
585		}
586
587		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
588			if (update_type == AHC_TRANS_USER)
589				spi->ppr_options = tinfo->user.ppr_options;
590			else
591				spi->ppr_options = tinfo->goal.ppr_options;
592		}
593
594		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
595			if (update_type == AHC_TRANS_USER)
596				spi->sync_offset = tinfo->user.offset;
597			else
598				spi->sync_offset = tinfo->goal.offset;
599		}
600
601		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
602			if (update_type == AHC_TRANS_USER)
603				spi->sync_period = tinfo->user.period;
604			else
605				spi->sync_period = tinfo->goal.period;
606		}
607
608		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
609		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
610			struct ahc_syncrate *syncrate;
611			u_int maxsync;
612
613			if ((ahc->features & AHC_ULTRA2) != 0)
614				maxsync = AHC_SYNCRATE_DT;
615			else if ((ahc->features & AHC_ULTRA) != 0)
616				maxsync = AHC_SYNCRATE_ULTRA;
617			else
618				maxsync = AHC_SYNCRATE_FAST;
619
620			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
621						     &spi->ppr_options,
622						     maxsync);
623			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
624					    syncrate, &spi->sync_offset,
625					    spi->bus_width, ROLE_UNKNOWN);
626
627			/* We use a period of 0 to represent async */
628			if (spi->sync_offset == 0) {
629				spi->sync_period = 0;
630				spi->ppr_options = 0;
631			}
632
633			ahc_set_syncrate(ahc, &devinfo, syncrate,
634					 spi->sync_period, spi->sync_offset,
635					 spi->ppr_options, update_type,
636					 /*paused*/FALSE);
637		}
638		ahc_unlock(ahc, &s);
639		ccb->ccb_h.status = CAM_REQ_CMP;
640		xpt_done(ccb);
641#else
642		struct	  ahc_devinfo devinfo;
643		struct	  ccb_trans_settings *cts;
644		struct	  ahc_initiator_tinfo *tinfo;
645		struct	  tmode_tstate *tstate;
646		uint16_t *discenable;
647		uint16_t *tagenable;
648		u_int	  update_type;
649		long	  s;
650
651		cts = &ccb->cts;
652		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
653				    cts->ccb_h.target_id,
654				    cts->ccb_h.target_lun,
655				    SIM_CHANNEL(ahc, sim),
656				    ROLE_UNKNOWN);
657		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
658					    devinfo.our_scsiid,
659					    devinfo.target, &tstate);
660		update_type = 0;
661		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
662			update_type |= AHC_TRANS_GOAL;
663			discenable = &tstate->discenable;
664			tagenable = &tstate->tagenable;
665		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
666			update_type |= AHC_TRANS_USER;
667			discenable = &ahc->user_discenable;
668			tagenable = &ahc->user_tagenable;
669		} else {
670			ccb->ccb_h.status = CAM_REQ_INVALID;
671			xpt_done(ccb);
672			break;
673		}
674
675		ahc_lock(ahc, &s);
676
677		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
678			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
679				*discenable |= devinfo.target_mask;
680			else
681				*discenable &= ~devinfo.target_mask;
682		}
683
684		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
685			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
686				*tagenable |= devinfo.target_mask;
687			else
688				*tagenable &= ~devinfo.target_mask;
689		}
690
691		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
692			ahc_validate_width(ahc, /*tinfo limit*/NULL,
693					   &cts->bus_width, ROLE_UNKNOWN);
694			ahc_set_width(ahc, &devinfo, cts->bus_width,
695				      update_type, /*paused*/FALSE);
696		}
697
698		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
699			if (update_type == AHC_TRANS_USER)
700				cts->sync_offset = tinfo->user.offset;
701			else
702				cts->sync_offset = tinfo->goal.offset;
703		}
704
705		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
706			if (update_type == AHC_TRANS_USER)
707				cts->sync_period = tinfo->user.period;
708			else
709				cts->sync_period = tinfo->goal.period;
710		}
711
712		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
713		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
714			struct ahc_syncrate *syncrate;
715			u_int ppr_options;
716			u_int maxsync;
717
718			if ((ahc->features & AHC_ULTRA2) != 0)
719				maxsync = AHC_SYNCRATE_DT;
720			else if ((ahc->features & AHC_ULTRA) != 0)
721				maxsync = AHC_SYNCRATE_ULTRA;
722			else
723				maxsync = AHC_SYNCRATE_FAST;
724
725			ppr_options = 0;
726			if (cts->sync_period <= 9)
727				ppr_options = MSG_EXT_PPR_DT_REQ;
728
729			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
730						     &ppr_options,
731						     maxsync);
732			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
733					    syncrate, &cts->sync_offset,
734					    MSG_EXT_WDTR_BUS_8_BIT,
735					    ROLE_UNKNOWN);
736
737			/* We use a period of 0 to represent async */
738			if (cts->sync_offset == 0) {
739				cts->sync_period = 0;
740				ppr_options = 0;
741			}
742
743			if (ppr_options == MSG_EXT_PPR_DT_REQ
744			 && tinfo->user.transport_version >= 3) {
745				tinfo->goal.transport_version =
746				    tinfo->user.transport_version;
747				tinfo->current.transport_version =
748				    tinfo->user.transport_version;
749			}
750
751			ahc_set_syncrate(ahc, &devinfo, syncrate,
752					 cts->sync_period, cts->sync_offset,
753					 ppr_options, update_type,
754					 /*paused*/FALSE);
755		}
756		ahc_unlock(ahc, &s);
757		ccb->ccb_h.status = CAM_REQ_CMP;
758		xpt_done(ccb);
759#endif
760		break;
761	}
762	case XPT_GET_TRAN_SETTINGS:
763	/* Get default/user set transfer settings for the target */
764	{
765
766		ahc_lock(ahc, &s);
767		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
768				      SIM_CHANNEL(ahc, sim), &ccb->cts);
769		ahc_unlock(ahc, &s);
770		xpt_done(ccb);
771		break;
772	}
773	case XPT_CALC_GEOMETRY:
774	{
775		struct	  ccb_calc_geometry *ccg;
776		uint32_t size_mb;
777		uint32_t secs_per_cylinder;
778		int	  extended;
779
780		ccg = &ccb->ccg;
781		size_mb = ccg->volume_size
782			/ ((1024L * 1024L) / ccg->block_size);
783		extended = SIM_IS_SCSIBUS_B(ahc, sim)
784			? ahc->flags & AHC_EXTENDED_TRANS_B
785			: ahc->flags & AHC_EXTENDED_TRANS_A;
786
787		if (size_mb > 1024 && extended) {
788			ccg->heads = 255;
789			ccg->secs_per_track = 63;
790		} else {
791			ccg->heads = 64;
792			ccg->secs_per_track = 32;
793		}
794		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
795		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
796		ccb->ccb_h.status = CAM_REQ_CMP;
797		xpt_done(ccb);
798		break;
799	}
800	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
801	{
802		int  found;
803
804		ahc_lock(ahc, &s);
805		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
806					  /*initiate reset*/TRUE);
807		ahc_unlock(ahc, &s);
808		if (bootverbose) {
809			xpt_print_path(SIM_PATH(ahc, sim));
810			printf("SCSI bus reset delivered. "
811			       "%d SCBs aborted.\n", found);
812		}
813		ccb->ccb_h.status = CAM_REQ_CMP;
814		xpt_done(ccb);
815		break;
816	}
817	case XPT_TERM_IO:		/* Terminate the I/O process */
818		/* XXX Implement */
819		ccb->ccb_h.status = CAM_REQ_INVALID;
820		xpt_done(ccb);
821		break;
822	case XPT_PATH_INQ:		/* Path routing inquiry */
823	{
824		struct ccb_pathinq *cpi = &ccb->cpi;
825
826		cpi->version_num = 1; /* XXX??? */
827		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
828		if ((ahc->features & AHC_WIDE) != 0)
829			cpi->hba_inquiry |= PI_WIDE_16;
830		if ((ahc->features & AHC_TARGETMODE) != 0) {
831			cpi->target_sprt = PIT_PROCESSOR
832					 | PIT_DISCONNECT
833					 | PIT_TERM_IO;
834		} else {
835			cpi->target_sprt = 0;
836		}
837		cpi->hba_misc = 0;
838		cpi->hba_eng_cnt = 0;
839		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
840		cpi->max_lun = AHC_NUM_LUNS - 1;
841		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
842			cpi->initiator_id = ahc->our_id_b;
843			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
844				cpi->hba_misc |= PIM_NOBUSRESET;
845		} else {
846			cpi->initiator_id = ahc->our_id;
847			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
848				cpi->hba_misc |= PIM_NOBUSRESET;
849		}
850		cpi->bus_id = cam_sim_bus(sim);
851		cpi->base_transfer_speed = 3300;
852		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
853		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
854		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
855		cpi->unit_number = cam_sim_unit(sim);
856#ifdef AHC_NEW_TRAN_SETTINGS
857		cpi->protocol = PROTO_SCSI;
858		cpi->protocol_version = SCSI_REV_2;
859		cpi->transport = XPORT_SPI;
860		cpi->transport_version = 2;
861		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
862		if ((ahc->features & AHC_DT) != 0) {
863			cpi->transport_version = 3;
864			cpi->xport_specific.spi.ppr_options =
865			    SID_SPI_CLOCK_DT_ST;
866		}
867#endif
868		cpi->ccb_h.status = CAM_REQ_CMP;
869		xpt_done(ccb);
870		break;
871	}
872	default:
873		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
874		xpt_done(ccb);
875		break;
876	}
877}
878
879static void
880ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
881		      struct ccb_trans_settings *cts)
882{
883#ifdef AHC_NEW_TRAN_SETTINGS
884	struct	ahc_devinfo devinfo;
885	struct	ccb_trans_settings_scsi *scsi;
886	struct	ccb_trans_settings_spi *spi;
887	struct	ahc_initiator_tinfo *targ_info;
888	struct	tmode_tstate *tstate;
889	struct	ahc_transinfo *tinfo;
890
891	scsi = &cts->proto_specific.scsi;
892	spi = &cts->xport_specific.spi;
893	ahc_compile_devinfo(&devinfo, our_id,
894			    cts->ccb_h.target_id,
895			    cts->ccb_h.target_lun,
896			    channel, ROLE_UNKNOWN);
897	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
898					devinfo.our_scsiid,
899					devinfo.target, &tstate);
900
901	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
902		tinfo = &targ_info->current;
903	else
904		tinfo = &targ_info->user;
905
906	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
907	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
908	if (cts->type == CTS_TYPE_USER_SETTINGS) {
909		if ((ahc->user_discenable & devinfo.target_mask) != 0)
910			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
911
912		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
913			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
914	} else {
915		if ((tstate->discenable & devinfo.target_mask) != 0)
916			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
917
918		if ((tstate->tagenable & devinfo.target_mask) != 0)
919			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
920	}
921	cts->protocol_version = tinfo->protocol_version;
922	cts->transport_version = tinfo->transport_version;
923
924	spi->sync_period = tinfo->period;
925	spi->sync_offset = tinfo->offset;
926	spi->bus_width = tinfo->width;
927	spi->ppr_options = tinfo->ppr_options;
928
929	cts->protocol = PROTO_SCSI;
930	cts->transport = XPORT_SPI;
931	spi->valid = CTS_SPI_VALID_SYNC_RATE
932		   | CTS_SPI_VALID_SYNC_OFFSET
933		   | CTS_SPI_VALID_BUS_WIDTH
934		   | CTS_SPI_VALID_PPR_OPTIONS;
935
936	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
937		scsi->valid = CTS_SCSI_VALID_TQ;
938		spi->valid |= CTS_SPI_VALID_DISC;
939	} else {
940		scsi->valid = 0;
941	}
942
943	cts->ccb_h.status = CAM_REQ_CMP;
944#else
945	struct	ahc_devinfo devinfo;
946	struct	ahc_initiator_tinfo *targ_info;
947	struct	tmode_tstate *tstate;
948	struct	ahc_transinfo *tinfo;
949	long	s;
950
951	ahc_compile_devinfo(&devinfo, our_id,
952			    cts->ccb_h.target_id,
953			    cts->ccb_h.target_lun,
954			    channel, ROLE_UNKNOWN);
955	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
956					devinfo.our_scsiid,
957					devinfo.target, &tstate);
958
959	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
960		tinfo = &targ_info->current;
961	else
962		tinfo = &targ_info->user;
963
964	ahc_lock(ahc, &s);
965
966	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
967	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
968		if ((ahc->user_discenable & devinfo.target_mask) != 0)
969			cts->flags |= CCB_TRANS_DISC_ENB;
970
971		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
972			cts->flags |= CCB_TRANS_TAG_ENB;
973	} else {
974		if ((tstate->discenable & devinfo.target_mask) != 0)
975			cts->flags |= CCB_TRANS_DISC_ENB;
976
977		if ((tstate->tagenable & devinfo.target_mask) != 0)
978			cts->flags |= CCB_TRANS_TAG_ENB;
979	}
980	cts->sync_period = tinfo->period;
981	cts->sync_offset = tinfo->offset;
982	cts->bus_width = tinfo->width;
983
984	ahc_unlock(ahc, &s);
985
986	cts->valid = CCB_TRANS_SYNC_RATE_VALID
987		   | CCB_TRANS_SYNC_OFFSET_VALID
988		   | CCB_TRANS_BUS_WIDTH_VALID;
989
990	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
991		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
992
993	cts->ccb_h.status = CAM_REQ_CMP;
994#endif
995}
996
997static void
998ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
999{
1000	struct ahc_softc *ahc;
1001	struct cam_sim *sim;
1002
1003	sim = (struct cam_sim *)callback_arg;
1004	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1005	switch (code) {
1006	case AC_LOST_DEVICE:
1007	{
1008		struct	ahc_devinfo devinfo;
1009		long	s;
1010
1011		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1012				    xpt_path_target_id(path),
1013				    xpt_path_lun_id(path),
1014				    SIM_CHANNEL(ahc, sim),
1015				    ROLE_UNKNOWN);
1016
1017		/*
1018		 * Revert to async/narrow transfers
1019		 * for the next device.
1020		 */
1021		ahc_lock(ahc, &s);
1022		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1023			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1024		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1025				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1026				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1027				 /*paused*/FALSE);
1028		ahc_unlock(ahc, &s);
1029		break;
1030	}
1031	default:
1032		break;
1033	}
1034}
1035
1036static void
1037ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1038		int error)
1039{
1040	struct	scb *scb;
1041	union	ccb *ccb;
1042	struct	ahc_softc *ahc;
1043	struct	ahc_initiator_tinfo *tinfo;
1044	struct	tmode_tstate *tstate;
1045	u_int	mask;
1046	long	s;
1047
1048	scb = (struct scb *)arg;
1049	ccb = scb->io_ctx;
1050	ahc = scb->ahc_softc;
1051
1052	if (error != 0) {
1053		if (error == EFBIG)
1054			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1055		else
1056			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1057		if (nsegments != 0)
1058			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1059		ahc_lock(ahc, &s);
1060		ahc_free_scb(ahc, scb);
1061		ahc_unlock(ahc, &s);
1062		xpt_done(ccb);
1063		return;
1064	}
1065	if (nsegments != 0) {
1066		struct	  ahc_dma_seg *sg;
1067		bus_dma_segment_t *end_seg;
1068		bus_dmasync_op_t op;
1069
1070		end_seg = dm_segs + nsegments;
1071
1072		/* Copy the segments into our SG list */
1073		sg = scb->sg_list;
1074		while (dm_segs < end_seg) {
1075			sg->addr = dm_segs->ds_addr;
1076/* XXX Add in the 5th byte of the address later. */
1077			sg->len = dm_segs->ds_len;
1078			sg++;
1079			dm_segs++;
1080		}
1081
1082		/*
1083		 * Note where to find the SG entries in bus space.
1084		 * We also set the full residual flag which the
1085		 * sequencer will clear as soon as a data transfer
1086		 * occurs.
1087		 */
1088		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1089
1090		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1091			op = BUS_DMASYNC_PREREAD;
1092		else
1093			op = BUS_DMASYNC_PREWRITE;
1094
1095		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1096
1097		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1098			struct target_data *tdata;
1099
1100			tdata = &scb->hscb->shared_data.tdata;
1101			tdata->target_phases |= DPHASE_PENDING;
1102			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1103				tdata->data_phase = P_DATAOUT;
1104			else
1105				tdata->data_phase = P_DATAIN;
1106
1107			/*
1108			 * If the transfer is of an odd length and in the
1109			 * "in" direction (scsi->HostBus), then it may
1110			 * trigger a bug in the 'WideODD' feature of
1111			 * non-Ultra2 chips.  Force the total data-length
1112			 * to be even by adding an extra, 1 byte, SG,
1113			 * element.  We do this even if we are not currently
1114			 * negotiated wide as negotiation could occur before
1115			 * this command is executed.
1116			 */
1117			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1118			 && (ccb->csio.dxfer_len & 0x1) != 0
1119			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1120
1121				nsegments++;
1122				if (nsegments > AHC_NSEG) {
1123
1124					ahc_set_transaction_status(scb,
1125					    CAM_REQ_TOO_BIG);
1126					bus_dmamap_unload(ahc->buffer_dmat,
1127							  scb->dmamap);
1128					ahc_lock(ahc, &s);
1129					ahc_free_scb(ahc, scb);
1130					ahc_unlock(ahc, &s);
1131					xpt_done(ccb);
1132					return;
1133				}
1134				sg->addr = ahc->dma_bug_buf;
1135				sg->len = 1;
1136				sg++;
1137			}
1138		}
1139		sg--;
1140		sg->len |= AHC_DMA_LAST_SEG;
1141
1142		/* Copy the first SG into the "current" data pointer area */
1143		scb->hscb->dataptr = scb->sg_list->addr;
1144		scb->hscb->datacnt = scb->sg_list->len;
1145	} else {
1146		scb->hscb->sgptr = SG_LIST_NULL;
1147		scb->hscb->dataptr = 0;
1148		scb->hscb->datacnt = 0;
1149	}
1150
1151	scb->sg_count = nsegments;
1152
1153	ahc_lock(ahc, &s);
1154
1155	/*
1156	 * Last time we need to check if this SCB needs to
1157	 * be aborted.
1158	 */
1159	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1160		if (nsegments != 0)
1161			bus_dmamap_unload(ahc->buffer_dmat,
1162					  scb->dmamap);
1163		ahc_free_scb(ahc, scb);
1164		ahc_unlock(ahc, &s);
1165		xpt_done(ccb);
1166		return;
1167	}
1168
1169	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1170				    SCSIID_OUR_ID(scb->hscb->scsiid),
1171				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1172				    &tstate);
1173
1174	mask = SCB_GET_TARGET_MASK(ahc, scb);
1175	scb->hscb->scsirate = tinfo->scsirate;
1176	scb->hscb->scsioffset = tinfo->current.offset;
1177	if ((tstate->ultraenb & mask) != 0)
1178		scb->hscb->control |= ULTRAENB;
1179
1180	if ((tstate->discenable & mask) != 0
1181	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1182		scb->hscb->control |= DISCENB;
1183
1184	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1185	 && (tinfo->goal.width != 0
1186	  || tinfo->goal.period != 0
1187	  || tinfo->goal.ppr_options != 0)) {
1188		scb->flags |= SCB_NEGOTIATE;
1189		scb->hscb->control |= MK_MESSAGE;
1190	}
1191
1192	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1193
1194	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1195
1196	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1197		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1198			ccb->ccb_h.timeout = 5 * 1000;
1199		ccb->ccb_h.timeout_ch =
1200		    timeout(ahc_timeout, (caddr_t)scb,
1201			    (ccb->ccb_h.timeout * hz) / 1000);
1202	}
1203
1204	/*
1205	 * We only allow one untagged transaction
1206	 * per target in the initiator role unless
1207	 * we are storing a full busy target *lun*
1208	 * table in SCB space.
1209	 */
1210	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1211	 && (ahc->features & AHC_SCB_BTT) == 0) {
1212		struct scb_tailq *untagged_q;
1213
1214		untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
1215		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1216		scb->flags |= SCB_UNTAGGEDQ;
1217		if (TAILQ_FIRST(untagged_q) != scb) {
1218			ahc_unlock(ahc, &s);
1219			return;
1220		}
1221	}
1222	scb->flags |= SCB_ACTIVE;
1223
1224	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1225		pause_sequencer(ahc);
1226		if ((ahc->flags & AHC_PAGESCBS) == 0)
1227			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1228		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1229		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1230		unpause_sequencer(ahc);
1231	} else {
1232		ahc_queue_scb(ahc, scb);
1233	}
1234
1235	ahc_unlock(ahc, &s);
1236}
1237
1238static void
1239ahc_poll(struct cam_sim *sim)
1240{
1241	ahc_intr(cam_sim_softc(sim));
1242}
1243
1244static void
1245ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1246	       struct ccb_scsiio *csio, struct scb *scb)
1247{
1248	struct hardware_scb *hscb;
1249	struct ccb_hdr *ccb_h;
1250
1251	hscb = scb->hscb;
1252	ccb_h = &csio->ccb_h;
1253
1254	if (ccb_h->func_code == XPT_SCSI_IO) {
1255		hscb->cdb_len = csio->cdb_len;
1256		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1257
1258			if (hscb->cdb_len > sizeof(hscb->cdb32)
1259			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1260				u_long s;
1261
1262				ahc_set_transaction_status(scb,
1263							   CAM_REQ_INVALID);
1264				ahc_lock(ahc, &s);
1265				ahc_free_scb(ahc, scb);
1266				ahc_unlock(ahc, &s);
1267				xpt_done((union ccb *)csio);
1268				return;
1269			}
1270			if (hscb->cdb_len > 12) {
1271				memcpy(hscb->cdb32,
1272				       csio->cdb_io.cdb_ptr,
1273				       hscb->cdb_len);
1274				scb->flags |= SCB_CDB32_PTR;
1275			} else {
1276				memcpy(hscb->shared_data.cdb,
1277				       csio->cdb_io.cdb_ptr,
1278				       hscb->cdb_len);
1279			}
1280		} else {
1281			if (hscb->cdb_len > 12) {
1282				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1283				       hscb->cdb_len);
1284				scb->flags |= SCB_CDB32_PTR;
1285			} else {
1286				memcpy(hscb->shared_data.cdb,
1287				       csio->cdb_io.cdb_bytes,
1288				       hscb->cdb_len);
1289			}
1290		}
1291	}
1292
1293	/* Only use S/G if there is a transfer */
1294	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1295		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1296			/* We've been given a pointer to a single buffer */
1297			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1298				int s;
1299				int error;
1300
1301				s = splsoftvm();
1302				error = bus_dmamap_load(ahc->buffer_dmat,
1303							scb->dmamap,
1304							csio->data_ptr,
1305							csio->dxfer_len,
1306							ahc_execute_scb,
1307							scb, /*flags*/0);
1308				if (error == EINPROGRESS) {
1309					/*
1310					 * So as to maintain ordering,
1311					 * freeze the controller queue
1312					 * until our mapping is
1313					 * returned.
1314					 */
1315					xpt_freeze_simq(sim,
1316							/*count*/1);
1317					scb->io_ctx->ccb_h.status |=
1318					    CAM_RELEASE_SIMQ;
1319				}
1320				splx(s);
1321			} else {
1322				struct bus_dma_segment seg;
1323
1324				/* Pointer to physical buffer */
1325				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1326					panic("ahc_setup_data - Transfer size "
1327					      "larger than can device max");
1328
1329				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1330				seg.ds_len = csio->dxfer_len;
1331				ahc_execute_scb(scb, &seg, 1, 0);
1332			}
1333		} else {
1334			struct bus_dma_segment *segs;
1335
1336			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1337				panic("ahc_setup_data - Physical segment "
1338				      "pointers unsupported");
1339
1340			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1341				panic("ahc_setup_data - Virtual segment "
1342				      "addresses unsupported");
1343
1344			/* Just use the segments provided */
1345			segs = (struct bus_dma_segment *)csio->data_ptr;
1346			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1347		}
1348	} else {
1349		ahc_execute_scb(scb, NULL, 0, 0);
1350	}
1351}
1352
1353static void
1354ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1355
1356	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1357		struct scb *list_scb;
1358
1359		scb->flags |= SCB_RECOVERY_SCB;
1360
1361		/*
1362		 * Take all queued, but not sent SCBs out of the equation.
1363		 * Also ensure that no new CCBs are queued to us while we
1364		 * try to fix this problem.
1365		 */
1366		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1367			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1368			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1369		}
1370
1371		/*
1372		 * Go through all of our pending SCBs and remove
1373		 * any scheduled timeouts for them.  We will reschedule
1374		 * them after we've successfully fixed this problem.
1375		 */
1376		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1377			union ccb *ccb;
1378
1379			ccb = list_scb->io_ctx;
1380			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1381		}
1382	}
1383}
1384
1385void
1386ahc_timeout(void *arg)
1387{
1388	struct	scb *scb;
1389	struct	ahc_softc *ahc;
1390	long	s;
1391	int	found;
1392	u_int	last_phase;
1393	int	target;
1394	int	lun;
1395	int	i;
1396	char	channel;
1397
1398	scb = (struct scb *)arg;
1399	ahc = (struct ahc_softc *)scb->ahc_softc;
1400
1401	ahc_lock(ahc, &s);
1402
1403	ahc_pause_and_flushwork(ahc);
1404
1405	if ((scb->flags & SCB_ACTIVE) == 0) {
1406		/* Previous timeout took care of me already */
1407		printf("%s: Timedout SCB already complete. "
1408		       "Interrupts may not be functioning.\n", ahc_name(ahc));
1409		unpause_sequencer(ahc);
1410		ahc_unlock(ahc, &s);
1411		return;
1412	}
1413
1414	target = SCB_GET_TARGET(ahc, scb);
1415	channel = SCB_GET_CHANNEL(ahc, scb);
1416	lun = SCB_GET_LUN(scb);
1417
1418	ahc_print_path(ahc, scb);
1419	printf("SCB 0x%x - timed out ", scb->hscb->tag);
1420	/*
1421	 * Take a snapshot of the bus state and print out
1422	 * some information so we can track down driver bugs.
1423	 */
1424	last_phase = ahc_inb(ahc, LASTPHASE);
1425
1426	for (i = 0; i < num_phases; i++) {
1427		if (last_phase == phase_table[i].phase)
1428			break;
1429	}
1430	printf("%s", phase_table[i].phasemsg);
1431
1432	printf(", SEQADDR == 0x%x\n",
1433	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1434
1435	printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n",
1436		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1437		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1438		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1439		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8));
1440
1441	printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
1442
1443	ahc_dump_card_state(ahc);
1444	if (scb->sg_count > 0) {
1445		for (i = 0; i < scb->sg_count; i++) {
1446			printf("sg[%d] - Addr 0x%x : Length %d\n",
1447			       i,
1448			       scb->sg_list[i].addr,
1449			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
1450		}
1451	}
1452	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1453		/*
1454		 * Been down this road before.
1455		 * Do a full bus reset.
1456		 */
1457bus_reset:
1458		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1459		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1460		printf("%s: Issued Channel %c Bus Reset. "
1461		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1462	} else {
1463		/*
1464		 * If we are a target, transition to bus free and report
1465		 * the timeout.
1466		 *
1467		 * The target/initiator that is holding up the bus may not
1468		 * be the same as the one that triggered this timeout
1469		 * (different commands have different timeout lengths).
1470		 * If the bus is idle and we are actiing as the initiator
1471		 * for this request, queue a BDR message to the timed out
1472		 * target.  Otherwise, if the timed out transaction is
1473		 * active:
1474		 *   Initiator transaction:
1475		 *	Stuff the message buffer with a BDR message and assert
1476		 *	ATN in the hopes that the target will let go of the bus
1477		 *	and go to the mesgout phase.  If this fails, we'll
1478		 *	get another timeout 2 seconds later which will attempt
1479		 *	a bus reset.
1480		 *
1481		 *   Target transaction:
1482		 *	Transition to BUS FREE and report the error.
1483		 *	It's good to be the target!
1484		 */
1485		u_int active_scb_index;
1486		u_int saved_scbptr;
1487
1488		saved_scbptr = ahc_inb(ahc, SCBPTR);
1489		active_scb_index = ahc_inb(ahc, SCB_TAG);
1490
1491		if (last_phase != P_BUSFREE
1492		  && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0
1493		  && (active_scb_index < ahc->scb_data->numscbs)) {
1494			struct scb *active_scb;
1495
1496			/*
1497			 * If the active SCB is not us, assume that
1498			 * the active SCB has a longer timeout than
1499			 * the timedout SCB, and wait for the active
1500			 * SCB to timeout.
1501			 */
1502			active_scb = ahc_lookup_scb(ahc, active_scb_index);
1503			if (active_scb != scb) {
1504				struct	ccb_hdr *ccbh;
1505				u_int	newtimeout;
1506
1507				ahc_print_path(ahc, scb);
1508				printf("Other SCB Timeout%s",
1509			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1510				       ? " again\n" : "\n");
1511				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1512				newtimeout =
1513				    MAX(active_scb->io_ctx->ccb_h.timeout,
1514					scb->io_ctx->ccb_h.timeout);
1515				ccbh = &scb->io_ctx->ccb_h;
1516				scb->io_ctx->ccb_h.timeout_ch =
1517				    timeout(ahc_timeout, scb,
1518					    (newtimeout * hz) / 1000);
1519				ahc_unlock(ahc, &s);
1520				return;
1521			}
1522
1523			/* It's us */
1524			if ((scb->hscb->control & TARGET_SCB) != 0) {
1525
1526				/*
1527				 * Send back any queued up transactions
1528				 * and properly record the error condition.
1529				 */
1530				ahc_freeze_devq(ahc, scb);
1531				ahc_set_transaction_status(scb,
1532							   CAM_CMD_TIMEOUT);
1533				ahc_freeze_scb(scb);
1534				ahc_done(ahc, scb);
1535
1536				/* Will clear us from the bus */
1537				restart_sequencer(ahc);
1538				ahc_unlock(ahc, &s);
1539				return;
1540			}
1541
1542			ahc_set_recoveryscb(ahc, active_scb);
1543			ahc_outb(ahc, MSG_OUT, HOST_MSG);
1544			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1545			ahc_print_path(ahc, active_scb);
1546			printf("BDR message in message buffer\n");
1547			active_scb->flags |= SCB_DEVICE_RESET;
1548			active_scb->io_ctx->ccb_h.timeout_ch =
1549			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1550			unpause_sequencer(ahc);
1551		} else {
1552			int	 disconnected;
1553
1554			/* XXX Shouldn't panic.  Just punt instead */
1555			if ((scb->hscb->control & TARGET_SCB) != 0)
1556				panic("Timed-out target SCB but bus idle");
1557
1558			if (last_phase != P_BUSFREE
1559			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1560				/* XXX What happened to the SCB? */
1561				/* Hung target selection.  Goto busfree */
1562				printf("%s: Hung target selection\n",
1563				       ahc_name(ahc));
1564				restart_sequencer(ahc);
1565				ahc_unlock(ahc, &s);
1566				return;
1567			}
1568
1569			if (ahc_search_qinfifo(ahc, target, channel, lun,
1570					       scb->hscb->tag, ROLE_INITIATOR,
1571					       /*status*/0, SEARCH_COUNT) > 0) {
1572				disconnected = FALSE;
1573			} else {
1574				disconnected = TRUE;
1575			}
1576
1577			if (disconnected) {
1578
1579				ahc_set_recoveryscb(ahc, scb);
1580				/*
1581				 * Actually re-queue this SCB in an attempt
1582				 * to select the device before it reconnects.
1583				 * In either case (selection or reselection),
1584				 * we will now issue a target reset to the
1585				 * timed-out device.
1586				 *
1587				 * Set the MK_MESSAGE control bit indicating
1588				 * that we desire to send a message.  We
1589				 * also set the disconnected flag since
1590				 * in the paging case there is no guarantee
1591				 * that our SCB control byte matches the
1592				 * version on the card.  We don't want the
1593				 * sequencer to abort the command thinking
1594				 * an unsolicited reselection occurred.
1595				 */
1596				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1597				scb->flags |= SCB_DEVICE_RESET;
1598
1599				/*
1600				 * Remove any cached copy of this SCB in the
1601				 * disconnected list in preparation for the
1602				 * queuing of our abort SCB.  We use the
1603				 * same element in the SCB, SCB_NEXT, for
1604				 * both the qinfifo and the disconnected list.
1605				 */
1606				ahc_search_disc_list(ahc, target, channel,
1607						     lun, scb->hscb->tag,
1608						     /*stop_on_first*/TRUE,
1609						     /*remove*/TRUE,
1610						     /*save_state*/FALSE);
1611
1612				/*
1613				 * In the non-paging case, the sequencer will
1614				 * never re-reference the in-core SCB.
1615				 * To make sure we are notified during
1616				 * reslection, set the MK_MESSAGE flag in
1617				 * the card's copy of the SCB.
1618				 */
1619				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1620					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1621					ahc_outb(ahc, SCB_CONTROL,
1622						 ahc_inb(ahc, SCB_CONTROL)
1623						| MK_MESSAGE);
1624				}
1625
1626				/*
1627				 * Clear out any entries in the QINFIFO first
1628				 * so we are the next SCB for this target
1629				 * to run.
1630				 */
1631				ahc_search_qinfifo(ahc,
1632						   SCB_GET_TARGET(ahc, scb),
1633						   channel, SCB_GET_LUN(scb),
1634						   SCB_LIST_NULL,
1635						   ROLE_INITIATOR,
1636						   CAM_REQUEUE_REQ,
1637						   SEARCH_COMPLETE);
1638				ahc_print_path(ahc, scb);
1639				printf("Queuing a BDR SCB\n");
1640				ahc_qinfifo_requeue_tail(ahc, scb);
1641				ahc_outb(ahc, SCBPTR, saved_scbptr);
1642				scb->io_ctx->ccb_h.timeout_ch =
1643				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1644				unpause_sequencer(ahc);
1645			} else {
1646				/* Go "immediatly" to the bus reset */
1647				/* This shouldn't happen */
1648				ahc_set_recoveryscb(ahc, scb);
1649				ahc_print_path(ahc, scb);
1650				printf("SCB %d: Immediate reset.  "
1651					"Flags = 0x%x\n", scb->hscb->tag,
1652					scb->flags);
1653				goto bus_reset;
1654			}
1655		}
1656	}
1657	ahc_unlock(ahc, &s);
1658}
1659
1660static void
1661ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1662{
1663	union ccb *abort_ccb;
1664
1665	abort_ccb = ccb->cab.abort_ccb;
1666	switch (abort_ccb->ccb_h.func_code) {
1667	case XPT_ACCEPT_TARGET_IO:
1668	case XPT_IMMED_NOTIFY:
1669	case XPT_CONT_TARGET_IO:
1670	{
1671		struct tmode_tstate *tstate;
1672		struct tmode_lstate *lstate;
1673		struct ccb_hdr_slist *list;
1674		cam_status status;
1675
1676		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1677					     &lstate, TRUE);
1678
1679		if (status != CAM_REQ_CMP) {
1680			ccb->ccb_h.status = status;
1681			break;
1682		}
1683
1684		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1685			list = &lstate->accept_tios;
1686		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1687			list = &lstate->immed_notifies;
1688		else
1689			list = NULL;
1690
1691		if (list != NULL) {
1692			struct ccb_hdr *curelm;
1693			int found;
1694
1695			curelm = SLIST_FIRST(list);
1696			found = 0;
1697			if (curelm == &abort_ccb->ccb_h) {
1698				found = 1;
1699				SLIST_REMOVE_HEAD(list, sim_links.sle);
1700			} else {
1701				while(curelm != NULL) {
1702					struct ccb_hdr *nextelm;
1703
1704					nextelm =
1705					    SLIST_NEXT(curelm, sim_links.sle);
1706
1707					if (nextelm == &abort_ccb->ccb_h) {
1708						found = 1;
1709						SLIST_NEXT(curelm,
1710							   sim_links.sle) =
1711						    SLIST_NEXT(nextelm,
1712							       sim_links.sle);
1713						break;
1714					}
1715					curelm = nextelm;
1716				}
1717			}
1718
1719			if (found) {
1720				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1721				xpt_done(abort_ccb);
1722				ccb->ccb_h.status = CAM_REQ_CMP;
1723			} else {
1724				xpt_print_path(abort_ccb->ccb_h.path);
1725				printf("Not found\n");
1726				ccb->ccb_h.status = CAM_PATH_INVALID;
1727			}
1728			break;
1729		}
1730		/* FALLTHROUGH */
1731	}
1732	case XPT_SCSI_IO:
1733		/* XXX Fully implement the hard ones */
1734		ccb->ccb_h.status = CAM_UA_ABORT;
1735		break;
1736	default:
1737		ccb->ccb_h.status = CAM_REQ_INVALID;
1738		break;
1739	}
1740	xpt_done(ccb);
1741}
1742
1743void
1744ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1745		u_int lun, ac_code code)
1746{
1747	struct	ccb_trans_settings cts;
1748	struct cam_path *path;
1749	void *arg;
1750	int error;
1751
1752	arg = NULL;
1753	error = ahc_create_path(ahc, channel, target, lun, &path);
1754
1755	if (error != CAM_REQ_CMP)
1756		return;
1757
1758	switch (code) {
1759	case AC_TRANSFER_NEG:
1760#ifdef AHC_NEW_TRAN_SETTINGS
1761		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1762#else
1763		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1764#endif
1765		cts.ccb_h.path = path;
1766		cts.ccb_h.target_id = target;
1767		cts.ccb_h.target_lun = lun;
1768		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1769							  : ahc->our_id_b,
1770				      channel, &cts);
1771		arg = &cts;
1772		break;
1773	case AC_SENT_BDR:
1774	case AC_BUS_RESET:
1775		break;
1776	default:
1777		panic("ahc_send_async: Unexpected async event");
1778	}
1779	xpt_async(code, path, arg);
1780	xpt_free_path(path);
1781}
1782
1783void
1784ahc_platform_set_tags(struct ahc_softc *ahc,
1785		      struct ahc_devinfo *devinfo, int enable)
1786{
1787}
1788
1789int
1790ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1791{
1792	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1793	    M_NOWAIT | M_ZERO);
1794	if (ahc->platform_data == NULL)
1795		return (ENOMEM);
1796	return (0);
1797}
1798
1799void
1800ahc_platform_free(struct ahc_softc *ahc)
1801{
1802	struct ahc_platform_data *pdata;
1803
1804	pdata = ahc->platform_data;
1805	if (pdata != NULL) {
1806		device_printf(ahc->dev_softc, "Platform free\n");
1807		if (pdata->regs != NULL)
1808			bus_release_resource(ahc->dev_softc,
1809					     pdata->regs_res_type,
1810					     pdata->regs_res_id,
1811					     pdata->regs);
1812
1813		if (pdata->irq != NULL)
1814			bus_release_resource(ahc->dev_softc,
1815					     pdata->irq_res_type,
1816					     0, pdata->irq);
1817
1818		if (pdata->sim_b != NULL) {
1819			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1820			xpt_free_path(pdata->path_b);
1821			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1822			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1823		}
1824		if (pdata->sim != NULL) {
1825			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1826			xpt_free_path(pdata->path);
1827			xpt_bus_deregister(cam_sim_path(pdata->sim));
1828			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1829		}
1830		if (pdata->eh != NULL)
1831			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1832		free(ahc->platform_data, M_DEVBUF);
1833	}
1834}
1835
1836int
1837ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1838{
1839	/* We don't sort softcs under FreeBSD so report equal always */
1840	return (0);
1841}
1842
1843int
1844ahc_detach(device_t dev)
1845{
1846	struct ahc_softc *ahc;
1847	u_long s;
1848
1849	device_printf(dev, "detaching device\n");
1850	ahc = device_get_softc(dev);
1851	ahc_lock(ahc, &s);
1852	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1853	ahc_unlock(ahc, &s);
1854	ahc_free(ahc);
1855	return (0);
1856}
1857
1858#if UNUSED
1859static void
1860ahc_dump_targcmd(struct target_cmd *cmd)
1861{
1862	uint8_t *byte;
1863	uint8_t *last_byte;
1864	int i;
1865
1866	byte = &cmd->initiator_channel;
1867	/* Debugging info for received commands */
1868	last_byte = &cmd[1].initiator_channel;
1869
1870	i = 0;
1871	while (byte < last_byte) {
1872		if (i == 0)
1873			printf("\t");
1874		printf("%#x", *byte++);
1875		i++;
1876		if (i == 8) {
1877			printf("\n");
1878			i = 0;
1879		} else {
1880			printf(", ");
1881		}
1882	}
1883}
1884#endif
1885