aic7xxx_osm.c revision 74094
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 74094 2001-03-11 06:34:17Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#ifndef AHC_TMODE_ENABLE
40#define AHC_TMODE_ENABLE 0
41#endif
42
43#define ccb_scb_ptr spriv_ptr0
44
45#ifdef AHC_DEBUG
46static int     ahc_debug = AHC_DEBUG;
47#endif
48
49#if UNUSED
50static void	ahc_dump_targcmd(struct target_cmd *cmd);
51#endif
52static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
53static void	ahc_get_tran_settings(struct ahc_softc *ahc,
54				      int our_id, char channel,
55				      struct ccb_trans_settings *cts);
56static void	ahc_async(void *callback_arg, uint32_t code,
57			  struct cam_path *path, void *arg);
58static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59				int nsegments, int error);
60static void	ahc_poll(struct cam_sim *sim);
61static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
62			       struct ccb_scsiio *csio, struct scb *scb);
63static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
64			      union ccb *ccb);
65static int	ahc_create_path(struct ahc_softc *ahc,
66				char channel, u_int target, u_int lun,
67				struct cam_path **path);
68
69static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
70
71static int
72ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
73	        u_int lun, struct cam_path **path)
74{
75	path_id_t path_id;
76
77	if (channel == 'B')
78		path_id = cam_sim_path(ahc->platform_data->sim_b);
79	else
80		path_id = cam_sim_path(ahc->platform_data->sim);
81
82	return (xpt_create_path(path, /*periph*/NULL,
83				path_id, target, lun));
84}
85
86/*
87 * Attach all the sub-devices we can find
88 */
89int
90ahc_attach(struct ahc_softc *ahc)
91{
92	char   ahc_info[256];
93	struct ccb_setasync csa;
94	struct cam_devq *devq;
95	int bus_id;
96	int bus_id2;
97	struct cam_sim *sim;
98	struct cam_sim *sim2;
99	struct cam_path *path;
100	struct cam_path *path2;
101	long s;
102	int count;
103	int error;
104
105	count = 0;
106	sim = NULL;
107	sim2 = NULL;
108
109	ahc_controller_info(ahc, ahc_info);
110	printf("%s\n", ahc_info);
111	ahc_lock(ahc, &s);
112	/* Hook up our interrupt handler */
113	if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
114				    INTR_TYPE_CAM|INTR_ENTROPY, ahc_platform_intr, ahc,
115				    &ahc->platform_data->ih)) != 0) {
116		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
117			      error);
118		goto fail;
119	}
120
121	/*
122	 * Attach secondary channel first if the user has
123	 * declared it the primary channel.
124	 */
125	if ((ahc->features & AHC_TWIN) != 0
126	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
127		bus_id = 1;
128		bus_id2 = 0;
129	} else {
130		bus_id = 0;
131		bus_id2 = 1;
132	}
133
134	/*
135	 * Create the device queue for our SIM(s).
136	 */
137	devq = cam_simq_alloc(AHC_MAX_QUEUE);
138	if (devq == NULL)
139		goto fail;
140
141	/*
142	 * Construct our first channel SIM entry
143	 */
144	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
145			    device_get_unit(ahc->dev_softc),
146			    1, AHC_MAX_QUEUE, devq);
147	if (sim == NULL) {
148		cam_simq_free(devq);
149		goto fail;
150	}
151
152	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
153		cam_sim_free(sim, /*free_devq*/TRUE);
154		sim = NULL;
155		goto fail;
156	}
157
158	if (xpt_create_path(&path, /*periph*/NULL,
159			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
160			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
161		xpt_bus_deregister(cam_sim_path(sim));
162		cam_sim_free(sim, /*free_devq*/TRUE);
163		sim = NULL;
164		goto fail;
165	}
166
167	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
168	csa.ccb_h.func_code = XPT_SASYNC_CB;
169	csa.event_enable = AC_LOST_DEVICE;
170	csa.callback = ahc_async;
171	csa.callback_arg = sim;
172	xpt_action((union ccb *)&csa);
173	count++;
174
175	if (ahc->features & AHC_TWIN) {
176		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
177				    ahc, device_get_unit(ahc->dev_softc), 1,
178				    AHC_MAX_QUEUE, devq);
179
180		if (sim2 == NULL) {
181			printf("ahc_attach: Unable to attach second "
182			       "bus due to resource shortage");
183			goto fail;
184		}
185
186		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
187			printf("ahc_attach: Unable to attach second "
188			       "bus due to resource shortage");
189			/*
190			 * We do not want to destroy the device queue
191			 * because the first bus is using it.
192			 */
193			cam_sim_free(sim2, /*free_devq*/FALSE);
194			goto fail;
195		}
196
197		if (xpt_create_path(&path2, /*periph*/NULL,
198				    cam_sim_path(sim2),
199				    CAM_TARGET_WILDCARD,
200				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
201			xpt_bus_deregister(cam_sim_path(sim2));
202			cam_sim_free(sim2, /*free_devq*/FALSE);
203			sim2 = NULL;
204			goto fail;
205		}
206		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
207		csa.ccb_h.func_code = XPT_SASYNC_CB;
208		csa.event_enable = AC_LOST_DEVICE;
209		csa.callback = ahc_async;
210		csa.callback_arg = sim2;
211		xpt_action((union ccb *)&csa);
212		count++;
213	}
214
215fail:
216	if ((ahc->features & AHC_TWIN) != 0
217	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
218		ahc->platform_data->sim_b = sim;
219		ahc->platform_data->path_b = path;
220		ahc->platform_data->sim = sim2;
221		ahc->platform_data->path = path2;
222	} else {
223		ahc->platform_data->sim = sim;
224		ahc->platform_data->path = path;
225		ahc->platform_data->sim_b = sim2;
226		ahc->platform_data->path_b = path2;
227	}
228	ahc_unlock(ahc, &s);
229
230	if (count != 0)
231		/* We have to wait until after any system dumps... */
232		ahc->platform_data->eh =
233		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
234					  ahc, SHUTDOWN_PRI_DEFAULT);
235
236	return (count);
237}
238
239/*
240 * Catch an interrupt from the adapter
241 */
242void
243ahc_platform_intr(void *arg)
244{
245	struct	ahc_softc *ahc;
246
247	ahc = (struct ahc_softc *)arg;
248	ahc_intr(ahc);
249}
250
251/*
252 * We have an scb which has been processed by the
253 * adaptor, now we look to see how the operation
254 * went.
255 */
256void
257ahc_done(struct ahc_softc *ahc, struct scb *scb)
258{
259	union ccb *ccb;
260
261	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
262		  ("ahc_done - scb %d\n", scb->hscb->tag));
263
264	ccb = scb->io_ctx;
265	LIST_REMOVE(scb, pending_links);
266	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
267		struct scb_tailq *untagged_q;
268		int target_offset;
269
270		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
271		untagged_q = &ahc->untagged_queues[target_offset];
272		TAILQ_REMOVE(untagged_q, scb, links.tqe);
273		scb->flags &= ~SCB_UNTAGGEDQ;
274		ahc_run_untagged_queue(ahc, untagged_q);
275	}
276
277	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
278
279	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
280		bus_dmasync_op_t op;
281
282		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
283			op = BUS_DMASYNC_POSTREAD;
284		else
285			op = BUS_DMASYNC_POSTWRITE;
286		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
287		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
288	}
289
290	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
291		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
292			ccb->ccb_h.status |= CAM_REQ_CMP;
293		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
294		ahc_free_scb(ahc, scb);
295		xpt_done(ccb);
296		return;
297	}
298
299	/*
300	 * If the recovery SCB completes, we have to be
301	 * out of our timeout.
302	 */
303	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
304		struct	scb *list_scb;
305
306		/*
307		 * We were able to complete the command successfully,
308		 * so reinstate the timeouts for all other pending
309		 * commands.
310		 */
311		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
312			union ccb *ccb;
313			uint64_t time;
314
315			ccb = list_scb->io_ctx;
316			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
317				continue;
318
319			time = ccb->ccb_h.timeout;
320			time *= hz;
321			time /= 1000;
322			ccb->ccb_h.timeout_ch =
323			    timeout(ahc_timeout, list_scb, time);
324		}
325
326		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
327		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
328			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
329		ahc_print_path(ahc, scb);
330		printf("no longer in timeout, status = %x\n",
331		       ccb->ccb_h.status);
332	}
333
334	/* Don't clobber any existing error state */
335	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
336		ccb->ccb_h.status |= CAM_REQ_CMP;
337	} else if ((scb->flags & SCB_SENSE) != 0) {
338		/*
339		 * We performed autosense retrieval.
340		 *
341		 * Zero any sense not transferred by the
342		 * device.  The SCSI spec mandates that any
343		 * untransfered data should be assumed to be
344		 * zero.  Complete the 'bounce' of sense information
345		 * through buffers accessible via bus-space by
346		 * copying it into the clients csio.
347		 */
348		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
349		memcpy(&ccb->csio.sense_data,
350		       ahc_get_sense_buf(ahc, scb),
351		       (scb->sg_list->len & AHC_SG_LEN_MASK)
352		       - ccb->csio.sense_resid);
353		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
354	}
355	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
356	ahc_free_scb(ahc, scb);
357	xpt_done(ccb);
358}
359
360static void
361ahc_action(struct cam_sim *sim, union ccb *ccb)
362{
363	struct	ahc_softc *ahc;
364	struct	tmode_lstate *lstate;
365	u_int	target_id;
366	u_int	our_id;
367	long	s;
368
369	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
370
371	ahc = (struct ahc_softc *)cam_sim_softc(sim);
372
373	target_id = ccb->ccb_h.target_id;
374	our_id = SIM_SCSI_ID(ahc, sim);
375
376	switch (ccb->ccb_h.func_code) {
377	/* Common cases first */
378	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
379	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
380	{
381		struct	   tmode_tstate *tstate;
382		cam_status status;
383
384		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
385					     &lstate, TRUE);
386
387		if (status != CAM_REQ_CMP) {
388			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
389				/* Response from the black hole device */
390				tstate = NULL;
391				lstate = ahc->black_hole;
392			} else {
393				ccb->ccb_h.status = status;
394				xpt_done(ccb);
395				break;
396			}
397		}
398		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
399
400			ahc_lock(ahc, &s);
401			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
402					  sim_links.sle);
403			ccb->ccb_h.status = CAM_REQ_INPROG;
404			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
405				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
406			ahc_unlock(ahc, &s);
407			break;
408		}
409
410		/*
411		 * The target_id represents the target we attempt to
412		 * select.  In target mode, this is the initiator of
413		 * the original command.
414		 */
415		our_id = target_id;
416		target_id = ccb->csio.init_id;
417		/* FALLTHROUGH */
418	}
419	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
420	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
421	{
422		struct	scb *scb;
423		struct	hardware_scb *hscb;
424
425		if ((ahc->flags & AHC_INITIATORROLE) == 0
426		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
427		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
428			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
429			xpt_done(ccb);
430		}
431
432		/*
433		 * get an scb to use.
434		 */
435		ahc_lock(ahc, &s);
436		if ((scb = ahc_get_scb(ahc)) == NULL) {
437
438			xpt_freeze_simq(sim, /*count*/1);
439			ahc->flags |= AHC_RESOURCE_SHORTAGE;
440			ahc_unlock(ahc, &s);
441			ccb->ccb_h.status = CAM_REQUEUE_REQ;
442			xpt_done(ccb);
443			return;
444		}
445		ahc_unlock(ahc, &s);
446
447		hscb = scb->hscb;
448
449		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
450			  ("start scb(%p)\n", scb));
451		scb->io_ctx = ccb;
452		/*
453		 * So we can find the SCB when an abort is requested
454		 */
455		ccb->ccb_h.ccb_scb_ptr = scb;
456
457		/*
458		 * Put all the arguments for the xfer in the scb
459		 */
460		hscb->control = 0;
461		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
462		hscb->lun = ccb->ccb_h.target_lun;
463		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
464			hscb->cdb_len = 0;
465			scb->flags |= SCB_DEVICE_RESET;
466			hscb->control |= MK_MESSAGE;
467			ahc_execute_scb(scb, NULL, 0, 0);
468		} else {
469			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
470				struct target_data *tdata;
471
472				tdata = &hscb->shared_data.tdata;
473				if (ahc->pending_device == lstate) {
474					scb->flags |= SCB_TARGET_IMMEDIATE;
475					ahc->pending_device = NULL;
476				}
477				hscb->control |= TARGET_SCB;
478				tdata->target_phases = IDENTIFY_SEEN;
479				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
480					tdata->target_phases |= SPHASE_PENDING;
481					tdata->scsi_status =
482					    ccb->csio.scsi_status;
483				}
484				tdata->initiator_tag = ccb->csio.tag_id;
485			}
486			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
487				hscb->control |= ccb->csio.tag_action;
488
489			ahc_setup_data(ahc, sim, &ccb->csio, scb);
490		}
491		break;
492	}
493	case XPT_NOTIFY_ACK:
494	case XPT_IMMED_NOTIFY:
495	{
496		struct	   tmode_tstate *tstate;
497		struct	   tmode_lstate *lstate;
498		cam_status status;
499
500		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
501					     &lstate, TRUE);
502
503		if (status != CAM_REQ_CMP) {
504			ccb->ccb_h.status = status;
505			xpt_done(ccb);
506			break;
507		}
508		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
509				  sim_links.sle);
510		ccb->ccb_h.status = CAM_REQ_INPROG;
511		ahc_send_lstate_events(ahc, lstate);
512		break;
513	}
514	case XPT_EN_LUN:		/* Enable LUN as a target */
515		ahc_handle_en_lun(ahc, sim, ccb);
516		xpt_done(ccb);
517		break;
518	case XPT_ABORT:			/* Abort the specified CCB */
519	{
520		ahc_abort_ccb(ahc, sim, ccb);
521		break;
522	}
523	case XPT_SET_TRAN_SETTINGS:
524	{
525#ifdef AHC_NEW_TRAN_SETTINGS
526		struct	ahc_devinfo devinfo;
527		struct	ccb_trans_settings *cts;
528		struct	ccb_trans_settings_scsi *scsi;
529		struct	ccb_trans_settings_spi *spi;
530		struct	ahc_initiator_tinfo *tinfo;
531		struct	tmode_tstate *tstate;
532		uint16_t *discenable;
533		uint16_t *tagenable;
534		u_int	update_type;
535
536		cts = &ccb->cts;
537		scsi = &cts->proto_specific.scsi;
538		spi = &cts->xport_specific.spi;
539		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
540				    cts->ccb_h.target_id,
541				    cts->ccb_h.target_lun,
542				    SIM_CHANNEL(ahc, sim),
543				    ROLE_UNKNOWN);
544		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
545					    devinfo.our_scsiid,
546					    devinfo.target, &tstate);
547		update_type = 0;
548		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
549			update_type |= AHC_TRANS_GOAL;
550			discenable = &tstate->discenable;
551			tagenable = &tstate->tagenable;
552			tinfo->current.protocol_version =
553			    cts->protocol_version;
554			tinfo->current.transport_version =
555			    cts->transport_version;
556			tinfo->goal.protocol_version =
557			    cts->protocol_version;
558			tinfo->goal.transport_version =
559			    cts->transport_version;
560		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
561			update_type |= AHC_TRANS_USER;
562			discenable = &ahc->user_discenable;
563			tagenable = &ahc->user_tagenable;
564			tinfo->user.protocol_version =
565			    cts->protocol_version;
566			tinfo->user.transport_version =
567			    cts->transport_version;
568		} else {
569			ccb->ccb_h.status = CAM_REQ_INVALID;
570			xpt_done(ccb);
571			break;
572		}
573
574		ahc_lock(ahc, &s);
575
576		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
577			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
578				*discenable |= devinfo.target_mask;
579			else
580				*discenable &= ~devinfo.target_mask;
581		}
582
583		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
584			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
585				*tagenable |= devinfo.target_mask;
586			else
587				*tagenable &= ~devinfo.target_mask;
588		}
589
590		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
591			ahc_validate_width(ahc, /*tinfo limit*/NULL,
592					   &spi->bus_width, ROLE_UNKNOWN);
593			ahc_set_width(ahc, &devinfo, spi->bus_width,
594				      update_type, /*paused*/FALSE);
595		}
596
597		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
598			if (update_type == AHC_TRANS_USER)
599				spi->ppr_options = tinfo->user.ppr_options;
600			else
601				spi->ppr_options = tinfo->goal.ppr_options;
602		}
603
604		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
605			if (update_type == AHC_TRANS_USER)
606				spi->sync_offset = tinfo->user.offset;
607			else
608				spi->sync_offset = tinfo->goal.offset;
609		}
610
611		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
612			if (update_type == AHC_TRANS_USER)
613				spi->sync_period = tinfo->user.period;
614			else
615				spi->sync_period = tinfo->goal.period;
616		}
617
618		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
619		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
620			struct ahc_syncrate *syncrate;
621			u_int maxsync;
622
623			if ((ahc->features & AHC_ULTRA2) != 0)
624				maxsync = AHC_SYNCRATE_DT;
625			else if ((ahc->features & AHC_ULTRA) != 0)
626				maxsync = AHC_SYNCRATE_ULTRA;
627			else
628				maxsync = AHC_SYNCRATE_FAST;
629
630			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
631						     &spi->ppr_options,
632						     maxsync);
633			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
634					    syncrate, &spi->sync_offset,
635					    spi->bus_width, ROLE_UNKNOWN);
636
637			/* We use a period of 0 to represent async */
638			if (spi->sync_offset == 0) {
639				spi->sync_period = 0;
640				spi->ppr_options = 0;
641			}
642
643			ahc_set_syncrate(ahc, &devinfo, syncrate,
644					 spi->sync_period, spi->sync_offset,
645					 spi->ppr_options, update_type,
646					 /*paused*/FALSE);
647		}
648		ahc_unlock(ahc, &s);
649		ccb->ccb_h.status = CAM_REQ_CMP;
650		xpt_done(ccb);
651#else
652		struct	  ahc_devinfo devinfo;
653		struct	  ccb_trans_settings *cts;
654		struct	  ahc_initiator_tinfo *tinfo;
655		struct	  tmode_tstate *tstate;
656		uint16_t *discenable;
657		uint16_t *tagenable;
658		u_int	  update_type;
659		long	  s;
660
661		cts = &ccb->cts;
662		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
663				    cts->ccb_h.target_id,
664				    cts->ccb_h.target_lun,
665				    SIM_CHANNEL(ahc, sim),
666				    ROLE_UNKNOWN);
667		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
668					    devinfo.our_scsiid,
669					    devinfo.target, &tstate);
670		update_type = 0;
671		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
672			update_type |= AHC_TRANS_GOAL;
673			discenable = &tstate->discenable;
674			tagenable = &tstate->tagenable;
675		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
676			update_type |= AHC_TRANS_USER;
677			discenable = &ahc->user_discenable;
678			tagenable = &ahc->user_tagenable;
679		} else {
680			ccb->ccb_h.status = CAM_REQ_INVALID;
681			xpt_done(ccb);
682			break;
683		}
684
685		ahc_lock(ahc, &s);
686
687		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
688			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
689				*discenable |= devinfo.target_mask;
690			else
691				*discenable &= ~devinfo.target_mask;
692		}
693
694		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
695			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
696				*tagenable |= devinfo.target_mask;
697			else
698				*tagenable &= ~devinfo.target_mask;
699		}
700
701		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
702			ahc_validate_width(ahc, /*tinfo limit*/NULL,
703					   &cts->bus_width, ROLE_UNKNOWN);
704			ahc_set_width(ahc, &devinfo, cts->bus_width,
705				      update_type, /*paused*/FALSE);
706		}
707
708		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
709			if (update_type == AHC_TRANS_USER)
710				cts->sync_offset = tinfo->user.offset;
711			else
712				cts->sync_offset = tinfo->goal.offset;
713		}
714
715		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
716			if (update_type == AHC_TRANS_USER)
717				cts->sync_period = tinfo->user.period;
718			else
719				cts->sync_period = tinfo->goal.period;
720		}
721
722		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
723		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
724			struct ahc_syncrate *syncrate;
725			u_int ppr_options;
726			u_int maxsync;
727
728			if ((ahc->features & AHC_ULTRA2) != 0)
729				maxsync = AHC_SYNCRATE_DT;
730			else if ((ahc->features & AHC_ULTRA) != 0)
731				maxsync = AHC_SYNCRATE_ULTRA;
732			else
733				maxsync = AHC_SYNCRATE_FAST;
734
735			ppr_options = 0;
736			if (cts->sync_period <= 9)
737				ppr_options = MSG_EXT_PPR_DT_REQ;
738
739			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
740						     &ppr_options,
741						     maxsync);
742			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
743					    syncrate, &cts->sync_offset,
744					    MSG_EXT_WDTR_BUS_8_BIT,
745					    ROLE_UNKNOWN);
746
747			/* We use a period of 0 to represent async */
748			if (cts->sync_offset == 0) {
749				cts->sync_period = 0;
750				ppr_options = 0;
751			}
752
753			if (ppr_options == MSG_EXT_PPR_DT_REQ
754			 && tinfo->user.transport_version >= 3) {
755				tinfo->goal.transport_version =
756				    tinfo->user.transport_version;
757				tinfo->current.transport_version =
758				    tinfo->user.transport_version;
759			}
760
761			ahc_set_syncrate(ahc, &devinfo, syncrate,
762					 cts->sync_period, cts->sync_offset,
763					 ppr_options, update_type,
764					 /*paused*/FALSE);
765		}
766		ahc_unlock(ahc, &s);
767		ccb->ccb_h.status = CAM_REQ_CMP;
768		xpt_done(ccb);
769#endif
770		break;
771	}
772	case XPT_GET_TRAN_SETTINGS:
773	/* Get default/user set transfer settings for the target */
774	{
775
776		ahc_lock(ahc, &s);
777		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
778				      SIM_CHANNEL(ahc, sim), &ccb->cts);
779		ahc_unlock(ahc, &s);
780		xpt_done(ccb);
781		break;
782	}
783	case XPT_CALC_GEOMETRY:
784	{
785		struct	  ccb_calc_geometry *ccg;
786		uint32_t size_mb;
787		uint32_t secs_per_cylinder;
788		int	  extended;
789
790		ccg = &ccb->ccg;
791		size_mb = ccg->volume_size
792			/ ((1024L * 1024L) / ccg->block_size);
793		extended = SIM_IS_SCSIBUS_B(ahc, sim)
794			? ahc->flags & AHC_EXTENDED_TRANS_B
795			: ahc->flags & AHC_EXTENDED_TRANS_A;
796
797		if (size_mb > 1024 && extended) {
798			ccg->heads = 255;
799			ccg->secs_per_track = 63;
800		} else {
801			ccg->heads = 64;
802			ccg->secs_per_track = 32;
803		}
804		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
805		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
806		ccb->ccb_h.status = CAM_REQ_CMP;
807		xpt_done(ccb);
808		break;
809	}
810	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
811	{
812		int  found;
813
814		ahc_lock(ahc, &s);
815		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
816					  /*initiate reset*/TRUE);
817		ahc_unlock(ahc, &s);
818		if (bootverbose) {
819			xpt_print_path(SIM_PATH(ahc, sim));
820			printf("SCSI bus reset delivered. "
821			       "%d SCBs aborted.\n", found);
822		}
823		ccb->ccb_h.status = CAM_REQ_CMP;
824		xpt_done(ccb);
825		break;
826	}
827	case XPT_TERM_IO:		/* Terminate the I/O process */
828		/* XXX Implement */
829		ccb->ccb_h.status = CAM_REQ_INVALID;
830		xpt_done(ccb);
831		break;
832	case XPT_PATH_INQ:		/* Path routing inquiry */
833	{
834		struct ccb_pathinq *cpi = &ccb->cpi;
835
836		cpi->version_num = 1; /* XXX??? */
837		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
838		if ((ahc->features & AHC_WIDE) != 0)
839			cpi->hba_inquiry |= PI_WIDE_16;
840		if ((ahc->features & AHC_TARGETMODE) != 0) {
841			cpi->target_sprt = PIT_PROCESSOR
842					 | PIT_DISCONNECT
843					 | PIT_TERM_IO;
844		} else {
845			cpi->target_sprt = 0;
846		}
847		cpi->hba_misc = 0;
848		cpi->hba_eng_cnt = 0;
849		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
850		cpi->max_lun = AHC_NUM_LUNS - 1;
851		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
852			cpi->initiator_id = ahc->our_id_b;
853			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
854				cpi->hba_misc |= PIM_NOBUSRESET;
855		} else {
856			cpi->initiator_id = ahc->our_id;
857			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
858				cpi->hba_misc |= PIM_NOBUSRESET;
859		}
860		cpi->bus_id = cam_sim_bus(sim);
861		cpi->base_transfer_speed = 3300;
862		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
863		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
864		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
865		cpi->unit_number = cam_sim_unit(sim);
866#ifdef AHC_NEW_TRAN_SETTINGS
867		cpi->protocol = PROTO_SCSI;
868		cpi->protocol_version = SCSI_REV_2;
869		cpi->transport = XPORT_SPI;
870		cpi->transport_version = 2;
871		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
872		if ((ahc->features & AHC_DT) != 0) {
873			cpi->transport_version = 3;
874			cpi->xport_specific.spi.ppr_options =
875			    SID_SPI_CLOCK_DT_ST;
876		}
877#endif
878		cpi->ccb_h.status = CAM_REQ_CMP;
879		xpt_done(ccb);
880		break;
881	}
882	default:
883		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
884		xpt_done(ccb);
885		break;
886	}
887}
888
889static void
890ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
891		      struct ccb_trans_settings *cts)
892{
893#ifdef AHC_NEW_TRAN_SETTINGS
894	struct	ahc_devinfo devinfo;
895	struct	ccb_trans_settings_scsi *scsi;
896	struct	ccb_trans_settings_spi *spi;
897	struct	ahc_initiator_tinfo *targ_info;
898	struct	tmode_tstate *tstate;
899	struct	ahc_transinfo *tinfo;
900
901	scsi = &cts->proto_specific.scsi;
902	spi = &cts->xport_specific.spi;
903	ahc_compile_devinfo(&devinfo, our_id,
904			    cts->ccb_h.target_id,
905			    cts->ccb_h.target_lun,
906			    channel, ROLE_UNKNOWN);
907	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
908					devinfo.our_scsiid,
909					devinfo.target, &tstate);
910
911	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
912		tinfo = &targ_info->current;
913	else
914		tinfo = &targ_info->user;
915
916	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
917	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
918	if (cts->type == CTS_TYPE_USER_SETTINGS) {
919		if ((ahc->user_discenable & devinfo.target_mask) != 0)
920			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
921
922		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
923			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
924	} else {
925		if ((tstate->discenable & devinfo.target_mask) != 0)
926			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
927
928		if ((tstate->tagenable & devinfo.target_mask) != 0)
929			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
930	}
931	cts->protocol_version = tinfo->protocol_version;
932	cts->transport_version = tinfo->transport_version;
933
934	spi->sync_period = tinfo->period;
935	spi->sync_offset = tinfo->offset;
936	spi->bus_width = tinfo->width;
937	spi->ppr_options = tinfo->ppr_options;
938
939	cts->protocol = PROTO_SCSI;
940	cts->transport = XPORT_SPI;
941	spi->valid = CTS_SPI_VALID_SYNC_RATE
942		   | CTS_SPI_VALID_SYNC_OFFSET
943		   | CTS_SPI_VALID_BUS_WIDTH
944		   | CTS_SPI_VALID_PPR_OPTIONS;
945
946	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
947		scsi->valid = CTS_SCSI_VALID_TQ;
948		spi->valid |= CTS_SPI_VALID_DISC;
949	} else {
950		scsi->valid = 0;
951	}
952
953	cts->ccb_h.status = CAM_REQ_CMP;
954#else
955	struct	ahc_devinfo devinfo;
956	struct	ahc_initiator_tinfo *targ_info;
957	struct	tmode_tstate *tstate;
958	struct	ahc_transinfo *tinfo;
959	long	s;
960
961	ahc_compile_devinfo(&devinfo, our_id,
962			    cts->ccb_h.target_id,
963			    cts->ccb_h.target_lun,
964			    channel, ROLE_UNKNOWN);
965	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
966					devinfo.our_scsiid,
967					devinfo.target, &tstate);
968
969	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
970		tinfo = &targ_info->current;
971	else
972		tinfo = &targ_info->user;
973
974	ahc_lock(ahc, &s);
975
976	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
977	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
978		if ((ahc->user_discenable & devinfo.target_mask) != 0)
979			cts->flags |= CCB_TRANS_DISC_ENB;
980
981		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
982			cts->flags |= CCB_TRANS_TAG_ENB;
983	} else {
984		if ((tstate->discenable & devinfo.target_mask) != 0)
985			cts->flags |= CCB_TRANS_DISC_ENB;
986
987		if ((tstate->tagenable & devinfo.target_mask) != 0)
988			cts->flags |= CCB_TRANS_TAG_ENB;
989	}
990	cts->sync_period = tinfo->period;
991	cts->sync_offset = tinfo->offset;
992	cts->bus_width = tinfo->width;
993
994	ahc_unlock(ahc, &s);
995
996	cts->valid = CCB_TRANS_SYNC_RATE_VALID
997		   | CCB_TRANS_SYNC_OFFSET_VALID
998		   | CCB_TRANS_BUS_WIDTH_VALID;
999
1000	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
1001		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
1002
1003	cts->ccb_h.status = CAM_REQ_CMP;
1004#endif
1005}
1006
1007static void
1008ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1009{
1010	struct ahc_softc *ahc;
1011	struct cam_sim *sim;
1012
1013	sim = (struct cam_sim *)callback_arg;
1014	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1015	switch (code) {
1016	case AC_LOST_DEVICE:
1017	{
1018		struct	ahc_devinfo devinfo;
1019		long	s;
1020
1021		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1022				    xpt_path_target_id(path),
1023				    xpt_path_lun_id(path),
1024				    SIM_CHANNEL(ahc, sim),
1025				    ROLE_UNKNOWN);
1026
1027		/*
1028		 * Revert to async/narrow transfers
1029		 * for the next device.
1030		 */
1031		ahc_lock(ahc, &s);
1032		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1033			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1034		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1035				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1036				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1037				 /*paused*/FALSE);
1038		ahc_unlock(ahc, &s);
1039		break;
1040	}
1041	default:
1042		break;
1043	}
1044}
1045
1046static void
1047ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1048		int error)
1049{
1050	struct	scb *scb;
1051	union	ccb *ccb;
1052	struct	ahc_softc *ahc;
1053	struct	ahc_initiator_tinfo *tinfo;
1054	struct	tmode_tstate *tstate;
1055	u_int	mask;
1056	long	s;
1057
1058	scb = (struct scb *)arg;
1059	ccb = scb->io_ctx;
1060	ahc = scb->ahc_softc;
1061
1062	if (error != 0) {
1063		if (error == EFBIG)
1064			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1065		else
1066			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1067		if (nsegments != 0)
1068			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1069		ahc_lock(ahc, &s);
1070		ahc_free_scb(ahc, scb);
1071		ahc_unlock(ahc, &s);
1072		xpt_done(ccb);
1073		return;
1074	}
1075	if (nsegments != 0) {
1076		struct	  ahc_dma_seg *sg;
1077		bus_dma_segment_t *end_seg;
1078		bus_dmasync_op_t op;
1079
1080		end_seg = dm_segs + nsegments;
1081
1082		/* Copy the segments into our SG list */
1083		sg = scb->sg_list;
1084		while (dm_segs < end_seg) {
1085			sg->addr = dm_segs->ds_addr;
1086/* XXX Add in the 5th byte of the address later. */
1087			sg->len = dm_segs->ds_len;
1088			sg++;
1089			dm_segs++;
1090		}
1091
1092		/*
1093		 * Note where to find the SG entries in bus space.
1094		 * We also set the full residual flag which the
1095		 * sequencer will clear as soon as a data transfer
1096		 * occurs.
1097		 */
1098		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1099
1100		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1101			op = BUS_DMASYNC_PREREAD;
1102		else
1103			op = BUS_DMASYNC_PREWRITE;
1104
1105		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1106
1107		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1108			struct target_data *tdata;
1109
1110			tdata = &scb->hscb->shared_data.tdata;
1111			tdata->target_phases |= DPHASE_PENDING;
1112			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1113				tdata->data_phase = P_DATAOUT;
1114			else
1115				tdata->data_phase = P_DATAIN;
1116
1117			/*
1118			 * If the transfer is of an odd length and in the
1119			 * "in" direction (scsi->HostBus), then it may
1120			 * trigger a bug in the 'WideODD' feature of
1121			 * non-Ultra2 chips.  Force the total data-length
1122			 * to be even by adding an extra, 1 byte, SG,
1123			 * element.  We do this even if we are not currently
1124			 * negotiated wide as negotiation could occur before
1125			 * this command is executed.
1126			 */
1127			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1128			 && (ccb->csio.dxfer_len & 0x1) != 0
1129			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1130
1131				nsegments++;
1132				if (nsegments > AHC_NSEG) {
1133
1134					ahc_set_transaction_status(scb,
1135					    CAM_REQ_TOO_BIG);
1136					bus_dmamap_unload(ahc->buffer_dmat,
1137							  scb->dmamap);
1138					ahc_lock(ahc, &s);
1139					ahc_free_scb(ahc, scb);
1140					ahc_unlock(ahc, &s);
1141					xpt_done(ccb);
1142					return;
1143				}
1144				sg->addr = ahc->dma_bug_buf;
1145				sg->len = 1;
1146				sg++;
1147			}
1148		}
1149		sg--;
1150		sg->len |= AHC_DMA_LAST_SEG;
1151
1152		/* Copy the first SG into the "current" data pointer area */
1153		scb->hscb->dataptr = scb->sg_list->addr;
1154		scb->hscb->datacnt = scb->sg_list->len;
1155	} else {
1156		scb->hscb->sgptr = SG_LIST_NULL;
1157		scb->hscb->dataptr = 0;
1158		scb->hscb->datacnt = 0;
1159	}
1160
1161	scb->sg_count = nsegments;
1162
1163	ahc_lock(ahc, &s);
1164
1165	/*
1166	 * Last time we need to check if this SCB needs to
1167	 * be aborted.
1168	 */
1169	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1170		if (nsegments != 0)
1171			bus_dmamap_unload(ahc->buffer_dmat,
1172					  scb->dmamap);
1173		ahc_free_scb(ahc, scb);
1174		ahc_unlock(ahc, &s);
1175		xpt_done(ccb);
1176		return;
1177	}
1178
1179	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1180				    SCSIID_OUR_ID(scb->hscb->scsiid),
1181				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1182				    &tstate);
1183
1184	mask = SCB_GET_TARGET_MASK(ahc, scb);
1185	scb->hscb->scsirate = tinfo->scsirate;
1186	scb->hscb->scsioffset = tinfo->current.offset;
1187	if ((tstate->ultraenb & mask) != 0)
1188		scb->hscb->control |= ULTRAENB;
1189
1190	if ((tstate->discenable & mask) != 0
1191	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1192		scb->hscb->control |= DISCENB;
1193
1194	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1195	 && (tinfo->goal.width != 0
1196	  || tinfo->goal.period != 0
1197	  || tinfo->goal.ppr_options != 0)) {
1198		scb->flags |= SCB_NEGOTIATE;
1199		scb->hscb->control |= MK_MESSAGE;
1200	}
1201
1202	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1203
1204	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1205
1206	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1207		uint64_t time;
1208
1209		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1210			ccb->ccb_h.timeout = 5 * 1000;
1211
1212		time = ccb->ccb_h.timeout;
1213		time *= hz;
1214		time /= 1000;
1215		ccb->ccb_h.timeout_ch =
1216		    timeout(ahc_timeout, (caddr_t)scb, time);
1217	}
1218
1219	/*
1220	 * We only allow one untagged transaction
1221	 * per target in the initiator role unless
1222	 * we are storing a full busy target *lun*
1223	 * table in SCB space.
1224	 */
1225	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1226	 && (ahc->flags & AHC_SCB_BTT) == 0) {
1227		struct scb_tailq *untagged_q;
1228		int target_offset;
1229
1230		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1231		untagged_q = &(ahc->untagged_queues[target_offset]);
1232		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1233		scb->flags |= SCB_UNTAGGEDQ;
1234		if (TAILQ_FIRST(untagged_q) != scb) {
1235			ahc_unlock(ahc, &s);
1236			return;
1237		}
1238	}
1239	scb->flags |= SCB_ACTIVE;
1240
1241	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1242		ahc_pause(ahc);
1243		if ((ahc->flags & AHC_PAGESCBS) == 0)
1244			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1245		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1246		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1247		ahc_unpause(ahc);
1248	} else {
1249		ahc_queue_scb(ahc, scb);
1250	}
1251
1252	ahc_unlock(ahc, &s);
1253}
1254
1255static void
1256ahc_poll(struct cam_sim *sim)
1257{
1258	ahc_intr(cam_sim_softc(sim));
1259}
1260
1261static void
1262ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1263	       struct ccb_scsiio *csio, struct scb *scb)
1264{
1265	struct hardware_scb *hscb;
1266	struct ccb_hdr *ccb_h;
1267
1268	hscb = scb->hscb;
1269	ccb_h = &csio->ccb_h;
1270
1271	if (ccb_h->func_code == XPT_SCSI_IO) {
1272		hscb->cdb_len = csio->cdb_len;
1273		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1274
1275			if (hscb->cdb_len > sizeof(hscb->cdb32)
1276			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1277				u_long s;
1278
1279				ahc_set_transaction_status(scb,
1280							   CAM_REQ_INVALID);
1281				ahc_lock(ahc, &s);
1282				ahc_free_scb(ahc, scb);
1283				ahc_unlock(ahc, &s);
1284				xpt_done((union ccb *)csio);
1285				return;
1286			}
1287			if (hscb->cdb_len > 12) {
1288				memcpy(hscb->cdb32,
1289				       csio->cdb_io.cdb_ptr,
1290				       hscb->cdb_len);
1291				scb->flags |= SCB_CDB32_PTR;
1292			} else {
1293				memcpy(hscb->shared_data.cdb,
1294				       csio->cdb_io.cdb_ptr,
1295				       hscb->cdb_len);
1296			}
1297		} else {
1298			if (hscb->cdb_len > 12) {
1299				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1300				       hscb->cdb_len);
1301				scb->flags |= SCB_CDB32_PTR;
1302			} else {
1303				memcpy(hscb->shared_data.cdb,
1304				       csio->cdb_io.cdb_bytes,
1305				       hscb->cdb_len);
1306			}
1307		}
1308	}
1309
1310	/* Only use S/G if there is a transfer */
1311	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1312		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1313			/* We've been given a pointer to a single buffer */
1314			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1315				int s;
1316				int error;
1317
1318				s = splsoftvm();
1319				error = bus_dmamap_load(ahc->buffer_dmat,
1320							scb->dmamap,
1321							csio->data_ptr,
1322							csio->dxfer_len,
1323							ahc_execute_scb,
1324							scb, /*flags*/0);
1325				if (error == EINPROGRESS) {
1326					/*
1327					 * So as to maintain ordering,
1328					 * freeze the controller queue
1329					 * until our mapping is
1330					 * returned.
1331					 */
1332					xpt_freeze_simq(sim,
1333							/*count*/1);
1334					scb->io_ctx->ccb_h.status |=
1335					    CAM_RELEASE_SIMQ;
1336				}
1337				splx(s);
1338			} else {
1339				struct bus_dma_segment seg;
1340
1341				/* Pointer to physical buffer */
1342				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1343					panic("ahc_setup_data - Transfer size "
1344					      "larger than can device max");
1345
1346				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1347				seg.ds_len = csio->dxfer_len;
1348				ahc_execute_scb(scb, &seg, 1, 0);
1349			}
1350		} else {
1351			struct bus_dma_segment *segs;
1352
1353			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1354				panic("ahc_setup_data - Physical segment "
1355				      "pointers unsupported");
1356
1357			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1358				panic("ahc_setup_data - Virtual segment "
1359				      "addresses unsupported");
1360
1361			/* Just use the segments provided */
1362			segs = (struct bus_dma_segment *)csio->data_ptr;
1363			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1364		}
1365	} else {
1366		ahc_execute_scb(scb, NULL, 0, 0);
1367	}
1368}
1369
1370static void
1371ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1372
1373	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1374		struct scb *list_scb;
1375
1376		scb->flags |= SCB_RECOVERY_SCB;
1377
1378		/*
1379		 * Take all queued, but not sent SCBs out of the equation.
1380		 * Also ensure that no new CCBs are queued to us while we
1381		 * try to fix this problem.
1382		 */
1383		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1384			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1385			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1386		}
1387
1388		/*
1389		 * Go through all of our pending SCBs and remove
1390		 * any scheduled timeouts for them.  We will reschedule
1391		 * them after we've successfully fixed this problem.
1392		 */
1393		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1394			union ccb *ccb;
1395
1396			ccb = list_scb->io_ctx;
1397			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1398		}
1399	}
1400}
1401
1402void
1403ahc_timeout(void *arg)
1404{
1405	struct	scb *scb;
1406	struct	ahc_softc *ahc;
1407	long	s;
1408	int	found;
1409	u_int	last_phase;
1410	int	target;
1411	int	lun;
1412	int	i;
1413	char	channel;
1414
1415	scb = (struct scb *)arg;
1416	ahc = (struct ahc_softc *)scb->ahc_softc;
1417
1418	ahc_lock(ahc, &s);
1419
1420	ahc_pause_and_flushwork(ahc);
1421
1422	if ((scb->flags & SCB_ACTIVE) == 0) {
1423		/* Previous timeout took care of me already */
1424		printf("%s: Timedout SCB already complete. "
1425		       "Interrupts may not be functioning.\n", ahc_name(ahc));
1426		ahc_unpause(ahc);
1427		ahc_unlock(ahc, &s);
1428		return;
1429	}
1430
1431	target = SCB_GET_TARGET(ahc, scb);
1432	channel = SCB_GET_CHANNEL(ahc, scb);
1433	lun = SCB_GET_LUN(scb);
1434
1435	ahc_print_path(ahc, scb);
1436	printf("SCB 0x%x - timed out ", scb->hscb->tag);
1437	/*
1438	 * Take a snapshot of the bus state and print out
1439	 * some information so we can track down driver bugs.
1440	 */
1441	last_phase = ahc_inb(ahc, LASTPHASE);
1442
1443	printf("%s", ahc_lookup_phase_entry(last_phase)->phasemsg);
1444
1445	printf(", SEQADDR == 0x%x\n",
1446	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1447
1448	printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n",
1449		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1450		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1451		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
1452		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8));
1453
1454	printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
1455
1456	ahc_dump_card_state(ahc);
1457	if (scb->sg_count > 0) {
1458		for (i = 0; i < scb->sg_count; i++) {
1459			printf("sg[%d] - Addr 0x%x : Length %d\n",
1460			       i,
1461			       scb->sg_list[i].addr,
1462			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
1463		}
1464	}
1465	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1466		/*
1467		 * Been down this road before.
1468		 * Do a full bus reset.
1469		 */
1470bus_reset:
1471		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1472		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1473		printf("%s: Issued Channel %c Bus Reset. "
1474		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1475	} else {
1476		/*
1477		 * If we are a target, transition to bus free and report
1478		 * the timeout.
1479		 *
1480		 * The target/initiator that is holding up the bus may not
1481		 * be the same as the one that triggered this timeout
1482		 * (different commands have different timeout lengths).
1483		 * If the bus is idle and we are actiing as the initiator
1484		 * for this request, queue a BDR message to the timed out
1485		 * target.  Otherwise, if the timed out transaction is
1486		 * active:
1487		 *   Initiator transaction:
1488		 *	Stuff the message buffer with a BDR message and assert
1489		 *	ATN in the hopes that the target will let go of the bus
1490		 *	and go to the mesgout phase.  If this fails, we'll
1491		 *	get another timeout 2 seconds later which will attempt
1492		 *	a bus reset.
1493		 *
1494		 *   Target transaction:
1495		 *	Transition to BUS FREE and report the error.
1496		 *	It's good to be the target!
1497		 */
1498		u_int active_scb_index;
1499		u_int saved_scbptr;
1500
1501		saved_scbptr = ahc_inb(ahc, SCBPTR);
1502		active_scb_index = ahc_inb(ahc, SCB_TAG);
1503
1504		if (last_phase != P_BUSFREE
1505		  && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0
1506		  && (active_scb_index < ahc->scb_data->numscbs)) {
1507			struct scb *active_scb;
1508
1509			/*
1510			 * If the active SCB is not us, assume that
1511			 * the active SCB has a longer timeout than
1512			 * the timedout SCB, and wait for the active
1513			 * SCB to timeout.
1514			 */
1515			active_scb = ahc_lookup_scb(ahc, active_scb_index);
1516			if (active_scb != scb) {
1517				struct	 ccb_hdr *ccbh;
1518				uint64_t newtimeout;
1519
1520				ahc_print_path(ahc, scb);
1521				printf("Other SCB Timeout%s",
1522			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1523				       ? " again\n" : "\n");
1524				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1525				newtimeout =
1526				    MAX(active_scb->io_ctx->ccb_h.timeout,
1527					scb->io_ctx->ccb_h.timeout);
1528				newtimeout *= hz;
1529				newtimeout /= 1000;
1530				ccbh = &scb->io_ctx->ccb_h;
1531				scb->io_ctx->ccb_h.timeout_ch =
1532				    timeout(ahc_timeout, scb, newtimeout);
1533				ahc_unpause(ahc);
1534				ahc_unlock(ahc, &s);
1535				return;
1536			}
1537
1538			/* It's us */
1539			if ((scb->hscb->control & TARGET_SCB) != 0) {
1540
1541				/*
1542				 * Send back any queued up transactions
1543				 * and properly record the error condition.
1544				 */
1545				ahc_freeze_devq(ahc, scb);
1546				ahc_set_transaction_status(scb,
1547							   CAM_CMD_TIMEOUT);
1548				ahc_freeze_scb(scb);
1549				ahc_done(ahc, scb);
1550
1551				/* Will clear us from the bus */
1552				ahc_restart(ahc);
1553				ahc_unlock(ahc, &s);
1554				return;
1555			}
1556
1557			ahc_set_recoveryscb(ahc, active_scb);
1558			ahc_outb(ahc, MSG_OUT, HOST_MSG);
1559			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1560			ahc_print_path(ahc, active_scb);
1561			printf("BDR message in message buffer\n");
1562			active_scb->flags |= SCB_DEVICE_RESET;
1563			active_scb->io_ctx->ccb_h.timeout_ch =
1564			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1565			ahc_unpause(ahc);
1566		} else {
1567			int	 disconnected;
1568
1569			/* XXX Shouldn't panic.  Just punt instead */
1570			if ((scb->hscb->control & TARGET_SCB) != 0)
1571				panic("Timed-out target SCB but bus idle");
1572
1573			if (last_phase != P_BUSFREE
1574			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1575				/* XXX What happened to the SCB? */
1576				/* Hung target selection.  Goto busfree */
1577				printf("%s: Hung target selection\n",
1578				       ahc_name(ahc));
1579				ahc_restart(ahc);
1580				ahc_unlock(ahc, &s);
1581				return;
1582			}
1583
1584			if (ahc_search_qinfifo(ahc, target, channel, lun,
1585					       scb->hscb->tag, ROLE_INITIATOR,
1586					       /*status*/0, SEARCH_COUNT) > 0) {
1587				disconnected = FALSE;
1588			} else {
1589				disconnected = TRUE;
1590			}
1591
1592			if (disconnected) {
1593
1594				ahc_set_recoveryscb(ahc, scb);
1595				/*
1596				 * Actually re-queue this SCB in an attempt
1597				 * to select the device before it reconnects.
1598				 * In either case (selection or reselection),
1599				 * we will now issue a target reset to the
1600				 * timed-out device.
1601				 *
1602				 * Set the MK_MESSAGE control bit indicating
1603				 * that we desire to send a message.  We
1604				 * also set the disconnected flag since
1605				 * in the paging case there is no guarantee
1606				 * that our SCB control byte matches the
1607				 * version on the card.  We don't want the
1608				 * sequencer to abort the command thinking
1609				 * an unsolicited reselection occurred.
1610				 */
1611				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1612				scb->flags |= SCB_DEVICE_RESET;
1613
1614				/*
1615				 * Remove any cached copy of this SCB in the
1616				 * disconnected list in preparation for the
1617				 * queuing of our abort SCB.  We use the
1618				 * same element in the SCB, SCB_NEXT, for
1619				 * both the qinfifo and the disconnected list.
1620				 */
1621				ahc_search_disc_list(ahc, target, channel,
1622						     lun, scb->hscb->tag,
1623						     /*stop_on_first*/TRUE,
1624						     /*remove*/TRUE,
1625						     /*save_state*/FALSE);
1626
1627				/*
1628				 * In the non-paging case, the sequencer will
1629				 * never re-reference the in-core SCB.
1630				 * To make sure we are notified during
1631				 * reslection, set the MK_MESSAGE flag in
1632				 * the card's copy of the SCB.
1633				 */
1634				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1635					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1636					ahc_outb(ahc, SCB_CONTROL,
1637						 ahc_inb(ahc, SCB_CONTROL)
1638						| MK_MESSAGE);
1639				}
1640
1641				/*
1642				 * Clear out any entries in the QINFIFO first
1643				 * so we are the next SCB for this target
1644				 * to run.
1645				 */
1646				ahc_search_qinfifo(ahc,
1647						   SCB_GET_TARGET(ahc, scb),
1648						   channel, SCB_GET_LUN(scb),
1649						   SCB_LIST_NULL,
1650						   ROLE_INITIATOR,
1651						   CAM_REQUEUE_REQ,
1652						   SEARCH_COMPLETE);
1653				ahc_print_path(ahc, scb);
1654				printf("Queuing a BDR SCB\n");
1655				ahc_qinfifo_requeue_tail(ahc, scb);
1656				ahc_outb(ahc, SCBPTR, saved_scbptr);
1657				scb->io_ctx->ccb_h.timeout_ch =
1658				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1659				ahc_unpause(ahc);
1660			} else {
1661				/* Go "immediatly" to the bus reset */
1662				/* This shouldn't happen */
1663				ahc_set_recoveryscb(ahc, scb);
1664				ahc_print_path(ahc, scb);
1665				printf("SCB %d: Immediate reset.  "
1666					"Flags = 0x%x\n", scb->hscb->tag,
1667					scb->flags);
1668				goto bus_reset;
1669			}
1670		}
1671	}
1672	ahc_unlock(ahc, &s);
1673}
1674
1675static void
1676ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1677{
1678	union ccb *abort_ccb;
1679
1680	abort_ccb = ccb->cab.abort_ccb;
1681	switch (abort_ccb->ccb_h.func_code) {
1682	case XPT_ACCEPT_TARGET_IO:
1683	case XPT_IMMED_NOTIFY:
1684	case XPT_CONT_TARGET_IO:
1685	{
1686		struct tmode_tstate *tstate;
1687		struct tmode_lstate *lstate;
1688		struct ccb_hdr_slist *list;
1689		cam_status status;
1690
1691		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1692					     &lstate, TRUE);
1693
1694		if (status != CAM_REQ_CMP) {
1695			ccb->ccb_h.status = status;
1696			break;
1697		}
1698
1699		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1700			list = &lstate->accept_tios;
1701		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1702			list = &lstate->immed_notifies;
1703		else
1704			list = NULL;
1705
1706		if (list != NULL) {
1707			struct ccb_hdr *curelm;
1708			int found;
1709
1710			curelm = SLIST_FIRST(list);
1711			found = 0;
1712			if (curelm == &abort_ccb->ccb_h) {
1713				found = 1;
1714				SLIST_REMOVE_HEAD(list, sim_links.sle);
1715			} else {
1716				while(curelm != NULL) {
1717					struct ccb_hdr *nextelm;
1718
1719					nextelm =
1720					    SLIST_NEXT(curelm, sim_links.sle);
1721
1722					if (nextelm == &abort_ccb->ccb_h) {
1723						found = 1;
1724						SLIST_NEXT(curelm,
1725							   sim_links.sle) =
1726						    SLIST_NEXT(nextelm,
1727							       sim_links.sle);
1728						break;
1729					}
1730					curelm = nextelm;
1731				}
1732			}
1733
1734			if (found) {
1735				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1736				xpt_done(abort_ccb);
1737				ccb->ccb_h.status = CAM_REQ_CMP;
1738			} else {
1739				xpt_print_path(abort_ccb->ccb_h.path);
1740				printf("Not found\n");
1741				ccb->ccb_h.status = CAM_PATH_INVALID;
1742			}
1743			break;
1744		}
1745		/* FALLTHROUGH */
1746	}
1747	case XPT_SCSI_IO:
1748		/* XXX Fully implement the hard ones */
1749		ccb->ccb_h.status = CAM_UA_ABORT;
1750		break;
1751	default:
1752		ccb->ccb_h.status = CAM_REQ_INVALID;
1753		break;
1754	}
1755	xpt_done(ccb);
1756}
1757
1758void
1759ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1760		u_int lun, ac_code code)
1761{
1762	struct	ccb_trans_settings cts;
1763	struct cam_path *path;
1764	void *arg;
1765	int error;
1766
1767	arg = NULL;
1768	error = ahc_create_path(ahc, channel, target, lun, &path);
1769
1770	if (error != CAM_REQ_CMP)
1771		return;
1772
1773	switch (code) {
1774	case AC_TRANSFER_NEG:
1775#ifdef AHC_NEW_TRAN_SETTINGS
1776		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1777#else
1778		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1779#endif
1780		cts.ccb_h.path = path;
1781		cts.ccb_h.target_id = target;
1782		cts.ccb_h.target_lun = lun;
1783		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1784							  : ahc->our_id_b,
1785				      channel, &cts);
1786		arg = &cts;
1787		break;
1788	case AC_SENT_BDR:
1789	case AC_BUS_RESET:
1790		break;
1791	default:
1792		panic("ahc_send_async: Unexpected async event");
1793	}
1794	xpt_async(code, path, arg);
1795	xpt_free_path(path);
1796}
1797
1798void
1799ahc_platform_set_tags(struct ahc_softc *ahc,
1800		      struct ahc_devinfo *devinfo, int enable)
1801{
1802}
1803
1804int
1805ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1806{
1807	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1808	    M_NOWAIT | M_ZERO);
1809	if (ahc->platform_data == NULL)
1810		return (ENOMEM);
1811	return (0);
1812}
1813
1814void
1815ahc_platform_free(struct ahc_softc *ahc)
1816{
1817	struct ahc_platform_data *pdata;
1818
1819	pdata = ahc->platform_data;
1820	if (pdata != NULL) {
1821		device_printf(ahc->dev_softc, "Platform free\n");
1822		if (pdata->regs != NULL)
1823			bus_release_resource(ahc->dev_softc,
1824					     pdata->regs_res_type,
1825					     pdata->regs_res_id,
1826					     pdata->regs);
1827
1828		if (pdata->irq != NULL)
1829			bus_release_resource(ahc->dev_softc,
1830					     pdata->irq_res_type,
1831					     0, pdata->irq);
1832
1833		if (pdata->sim_b != NULL) {
1834			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1835			xpt_free_path(pdata->path_b);
1836			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1837			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1838		}
1839		if (pdata->sim != NULL) {
1840			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1841			xpt_free_path(pdata->path);
1842			xpt_bus_deregister(cam_sim_path(pdata->sim));
1843			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1844		}
1845		if (pdata->eh != NULL)
1846			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1847		free(ahc->platform_data, M_DEVBUF);
1848	}
1849}
1850
1851int
1852ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1853{
1854	/* We don't sort softcs under FreeBSD so report equal always */
1855	return (0);
1856}
1857
1858int
1859ahc_detach(device_t dev)
1860{
1861	struct ahc_softc *ahc;
1862	u_long s;
1863
1864	device_printf(dev, "detaching device\n");
1865	ahc = device_get_softc(dev);
1866	ahc_lock(ahc, &s);
1867	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1868	ahc_unlock(ahc, &s);
1869	ahc_free(ahc);
1870	return (0);
1871}
1872
1873#if UNUSED
1874static void
1875ahc_dump_targcmd(struct target_cmd *cmd)
1876{
1877	uint8_t *byte;
1878	uint8_t *last_byte;
1879	int i;
1880
1881	byte = &cmd->initiator_channel;
1882	/* Debugging info for received commands */
1883	last_byte = &cmd[1].initiator_channel;
1884
1885	i = 0;
1886	while (byte < last_byte) {
1887		if (i == 0)
1888			printf("\t");
1889		printf("%#x", *byte++);
1890		i++;
1891		if (i == 8) {
1892			printf("\n");
1893			i = 0;
1894		} else {
1895			printf(", ");
1896		}
1897	}
1898}
1899#endif
1900