aic7xxx_osm.c revision 74972
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 74972 2001-03-29 00:36:35Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#ifndef AHC_TMODE_ENABLE
40#define AHC_TMODE_ENABLE 0
41#endif
42
43#define ccb_scb_ptr spriv_ptr0
44
45#ifdef AHC_DEBUG
46static int     ahc_debug = AHC_DEBUG;
47#endif
48
49#if UNUSED
50static void	ahc_dump_targcmd(struct target_cmd *cmd);
51#endif
52static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
53static void	ahc_get_tran_settings(struct ahc_softc *ahc,
54				      int our_id, char channel,
55				      struct ccb_trans_settings *cts);
56static void	ahc_async(void *callback_arg, uint32_t code,
57			  struct cam_path *path, void *arg);
58static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59				int nsegments, int error);
60static void	ahc_poll(struct cam_sim *sim);
61static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
62			       struct ccb_scsiio *csio, struct scb *scb);
63static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
64			      union ccb *ccb);
65static int	ahc_create_path(struct ahc_softc *ahc,
66				char channel, u_int target, u_int lun,
67				struct cam_path **path);
68
69static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
70
71static int
72ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
73	        u_int lun, struct cam_path **path)
74{
75	path_id_t path_id;
76
77	if (channel == 'B')
78		path_id = cam_sim_path(ahc->platform_data->sim_b);
79	else
80		path_id = cam_sim_path(ahc->platform_data->sim);
81
82	return (xpt_create_path(path, /*periph*/NULL,
83				path_id, target, lun));
84}
85
86/*
87 * Attach all the sub-devices we can find
88 */
89int
90ahc_attach(struct ahc_softc *ahc)
91{
92	char   ahc_info[256];
93	struct ccb_setasync csa;
94	struct cam_devq *devq;
95	int bus_id;
96	int bus_id2;
97	struct cam_sim *sim;
98	struct cam_sim *sim2;
99	struct cam_path *path;
100	struct cam_path *path2;
101	long s;
102	int count;
103	int error;
104
105	count = 0;
106	sim = NULL;
107	sim2 = NULL;
108
109	ahc_controller_info(ahc, ahc_info);
110	printf("%s\n", ahc_info);
111	ahc_lock(ahc, &s);
112	/* Hook up our interrupt handler */
113	if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
114				    INTR_TYPE_CAM|INTR_ENTROPY, ahc_platform_intr, ahc,
115				    &ahc->platform_data->ih)) != 0) {
116		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
117			      error);
118		goto fail;
119	}
120
121	/*
122	 * Attach secondary channel first if the user has
123	 * declared it the primary channel.
124	 */
125	if ((ahc->features & AHC_TWIN) != 0
126	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
127		bus_id = 1;
128		bus_id2 = 0;
129	} else {
130		bus_id = 0;
131		bus_id2 = 1;
132	}
133
134	/*
135	 * Create the device queue for our SIM(s).
136	 */
137	devq = cam_simq_alloc(AHC_MAX_QUEUE);
138	if (devq == NULL)
139		goto fail;
140
141	/*
142	 * Construct our first channel SIM entry
143	 */
144	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
145			    device_get_unit(ahc->dev_softc),
146			    1, AHC_MAX_QUEUE, devq);
147	if (sim == NULL) {
148		cam_simq_free(devq);
149		goto fail;
150	}
151
152	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
153		cam_sim_free(sim, /*free_devq*/TRUE);
154		sim = NULL;
155		goto fail;
156	}
157
158	if (xpt_create_path(&path, /*periph*/NULL,
159			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
160			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
161		xpt_bus_deregister(cam_sim_path(sim));
162		cam_sim_free(sim, /*free_devq*/TRUE);
163		sim = NULL;
164		goto fail;
165	}
166
167	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
168	csa.ccb_h.func_code = XPT_SASYNC_CB;
169	csa.event_enable = AC_LOST_DEVICE;
170	csa.callback = ahc_async;
171	csa.callback_arg = sim;
172	xpt_action((union ccb *)&csa);
173	count++;
174
175	if (ahc->features & AHC_TWIN) {
176		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
177				    ahc, device_get_unit(ahc->dev_softc), 1,
178				    AHC_MAX_QUEUE, devq);
179
180		if (sim2 == NULL) {
181			printf("ahc_attach: Unable to attach second "
182			       "bus due to resource shortage");
183			goto fail;
184		}
185
186		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
187			printf("ahc_attach: Unable to attach second "
188			       "bus due to resource shortage");
189			/*
190			 * We do not want to destroy the device queue
191			 * because the first bus is using it.
192			 */
193			cam_sim_free(sim2, /*free_devq*/FALSE);
194			goto fail;
195		}
196
197		if (xpt_create_path(&path2, /*periph*/NULL,
198				    cam_sim_path(sim2),
199				    CAM_TARGET_WILDCARD,
200				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
201			xpt_bus_deregister(cam_sim_path(sim2));
202			cam_sim_free(sim2, /*free_devq*/FALSE);
203			sim2 = NULL;
204			goto fail;
205		}
206		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
207		csa.ccb_h.func_code = XPT_SASYNC_CB;
208		csa.event_enable = AC_LOST_DEVICE;
209		csa.callback = ahc_async;
210		csa.callback_arg = sim2;
211		xpt_action((union ccb *)&csa);
212		count++;
213	}
214
215fail:
216	if ((ahc->features & AHC_TWIN) != 0
217	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
218		ahc->platform_data->sim_b = sim;
219		ahc->platform_data->path_b = path;
220		ahc->platform_data->sim = sim2;
221		ahc->platform_data->path = path2;
222	} else {
223		ahc->platform_data->sim = sim;
224		ahc->platform_data->path = path;
225		ahc->platform_data->sim_b = sim2;
226		ahc->platform_data->path_b = path2;
227	}
228	ahc_unlock(ahc, &s);
229
230	if (count != 0)
231		/* We have to wait until after any system dumps... */
232		ahc->platform_data->eh =
233		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
234					  ahc, SHUTDOWN_PRI_DEFAULT);
235
236	return (count);
237}
238
239/*
240 * Catch an interrupt from the adapter
241 */
242void
243ahc_platform_intr(void *arg)
244{
245	struct	ahc_softc *ahc;
246
247	ahc = (struct ahc_softc *)arg;
248	ahc_intr(ahc);
249}
250
251/*
252 * We have an scb which has been processed by the
253 * adaptor, now we look to see how the operation
254 * went.
255 */
256void
257ahc_done(struct ahc_softc *ahc, struct scb *scb)
258{
259	union ccb *ccb;
260
261	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
262		  ("ahc_done - scb %d\n", scb->hscb->tag));
263
264	ccb = scb->io_ctx;
265	LIST_REMOVE(scb, pending_links);
266	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
267		struct scb_tailq *untagged_q;
268		int target_offset;
269
270		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
271		untagged_q = &ahc->untagged_queues[target_offset];
272		TAILQ_REMOVE(untagged_q, scb, links.tqe);
273		scb->flags &= ~SCB_UNTAGGEDQ;
274		ahc_run_untagged_queue(ahc, untagged_q);
275	}
276
277	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
278
279	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
280		bus_dmasync_op_t op;
281
282		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
283			op = BUS_DMASYNC_POSTREAD;
284		else
285			op = BUS_DMASYNC_POSTWRITE;
286		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
287		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
288	}
289
290	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
291		struct cam_path *ccb_path;
292
293		/*
294		 * If we have finally disconnected, clean up our
295		 * pending device state.
296		 * XXX - There may be error states that cause where
297		 *       we will remain connected.
298		 */
299		ccb_path = ccb->ccb_h.path;
300		if (ahc->pending_device != NULL
301		 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) {
302
303			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
304				ahc->pending_device = NULL;
305			} else {
306				xpt_print_path(ccb->ccb_h.path);
307				printf("Still disconnected\n");
308				ahc_freeze_ccb(ccb);
309			}
310		}
311
312		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
313			ccb->ccb_h.status |= CAM_REQ_CMP;
314		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
315		ahc_free_scb(ahc, scb);
316		xpt_done(ccb);
317		return;
318	}
319
320	/*
321	 * If the recovery SCB completes, we have to be
322	 * out of our timeout.
323	 */
324	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
325		struct	scb *list_scb;
326
327		/*
328		 * We were able to complete the command successfully,
329		 * so reinstate the timeouts for all other pending
330		 * commands.
331		 */
332		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
333			union ccb *ccb;
334			uint64_t time;
335
336			ccb = list_scb->io_ctx;
337			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
338				continue;
339
340			time = ccb->ccb_h.timeout;
341			time *= hz;
342			time /= 1000;
343			ccb->ccb_h.timeout_ch =
344			    timeout(ahc_timeout, list_scb, time);
345		}
346
347		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
348		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
349			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
350		ahc_print_path(ahc, scb);
351		printf("no longer in timeout, status = %x\n",
352		       ccb->ccb_h.status);
353	}
354
355	/* Don't clobber any existing error state */
356	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
357		ccb->ccb_h.status |= CAM_REQ_CMP;
358	} else if ((scb->flags & SCB_SENSE) != 0) {
359		/*
360		 * We performed autosense retrieval.
361		 *
362		 * Zero any sense not transferred by the
363		 * device.  The SCSI spec mandates that any
364		 * untransfered data should be assumed to be
365		 * zero.  Complete the 'bounce' of sense information
366		 * through buffers accessible via bus-space by
367		 * copying it into the clients csio.
368		 */
369		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
370		memcpy(&ccb->csio.sense_data,
371		       ahc_get_sense_buf(ahc, scb),
372		       (scb->sg_list->len & AHC_SG_LEN_MASK)
373		       - ccb->csio.sense_resid);
374		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
375	}
376	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
377	ahc_free_scb(ahc, scb);
378	xpt_done(ccb);
379}
380
381static void
382ahc_action(struct cam_sim *sim, union ccb *ccb)
383{
384	struct	ahc_softc *ahc;
385	struct	ahc_tmode_lstate *lstate;
386	u_int	target_id;
387	u_int	our_id;
388	long	s;
389
390	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
391
392	ahc = (struct ahc_softc *)cam_sim_softc(sim);
393
394	target_id = ccb->ccb_h.target_id;
395	our_id = SIM_SCSI_ID(ahc, sim);
396
397	switch (ccb->ccb_h.func_code) {
398	/* Common cases first */
399	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
400	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
401	{
402		struct	   ahc_tmode_tstate *tstate;
403		cam_status status;
404
405		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
406					     &lstate, TRUE);
407
408		if (status != CAM_REQ_CMP) {
409			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
410				/* Response from the black hole device */
411				tstate = NULL;
412				lstate = ahc->black_hole;
413			} else {
414				ccb->ccb_h.status = status;
415				xpt_done(ccb);
416				break;
417			}
418		}
419		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
420
421			ahc_lock(ahc, &s);
422			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
423					  sim_links.sle);
424			ccb->ccb_h.status = CAM_REQ_INPROG;
425			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
426				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
427			ahc_unlock(ahc, &s);
428			break;
429		}
430
431		/*
432		 * The target_id represents the target we attempt to
433		 * select.  In target mode, this is the initiator of
434		 * the original command.
435		 */
436		our_id = target_id;
437		target_id = ccb->csio.init_id;
438		/* FALLTHROUGH */
439	}
440	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
441	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
442	{
443		struct	scb *scb;
444		struct	hardware_scb *hscb;
445
446		if ((ahc->flags & AHC_INITIATORROLE) == 0
447		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
448		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
449			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
450			xpt_done(ccb);
451			return;
452		}
453
454		/*
455		 * get an scb to use.
456		 */
457		ahc_lock(ahc, &s);
458		if ((scb = ahc_get_scb(ahc)) == NULL) {
459
460			xpt_freeze_simq(sim, /*count*/1);
461			ahc->flags |= AHC_RESOURCE_SHORTAGE;
462			ahc_unlock(ahc, &s);
463			ccb->ccb_h.status = CAM_REQUEUE_REQ;
464			xpt_done(ccb);
465			return;
466		}
467		ahc_unlock(ahc, &s);
468
469		hscb = scb->hscb;
470
471		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
472			  ("start scb(%p)\n", scb));
473		scb->io_ctx = ccb;
474		/*
475		 * So we can find the SCB when an abort is requested
476		 */
477		ccb->ccb_h.ccb_scb_ptr = scb;
478
479		/*
480		 * Put all the arguments for the xfer in the scb
481		 */
482		hscb->control = 0;
483		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
484		hscb->lun = ccb->ccb_h.target_lun;
485		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
486			hscb->cdb_len = 0;
487			scb->flags |= SCB_DEVICE_RESET;
488			hscb->control |= MK_MESSAGE;
489			ahc_execute_scb(scb, NULL, 0, 0);
490		} else {
491			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
492				struct target_data *tdata;
493
494				tdata = &hscb->shared_data.tdata;
495				if (ahc->pending_device == lstate)
496					scb->flags |= SCB_TARGET_IMMEDIATE;
497				hscb->control |= TARGET_SCB;
498				tdata->target_phases = IDENTIFY_SEEN;
499				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
500					tdata->target_phases |= SPHASE_PENDING;
501					tdata->scsi_status =
502					    ccb->csio.scsi_status;
503				}
504	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
505					tdata->target_phases |= NO_DISCONNECT;
506
507				tdata->initiator_tag = ccb->csio.tag_id;
508			}
509			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
510				hscb->control |= ccb->csio.tag_action;
511
512			ahc_setup_data(ahc, sim, &ccb->csio, scb);
513		}
514		break;
515	}
516	case XPT_NOTIFY_ACK:
517	case XPT_IMMED_NOTIFY:
518	{
519		struct	   ahc_tmode_tstate *tstate;
520		struct	   ahc_tmode_lstate *lstate;
521		cam_status status;
522
523		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
524					     &lstate, TRUE);
525
526		if (status != CAM_REQ_CMP) {
527			ccb->ccb_h.status = status;
528			xpt_done(ccb);
529			break;
530		}
531		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
532				  sim_links.sle);
533		ccb->ccb_h.status = CAM_REQ_INPROG;
534		ahc_send_lstate_events(ahc, lstate);
535		break;
536	}
537	case XPT_EN_LUN:		/* Enable LUN as a target */
538		ahc_handle_en_lun(ahc, sim, ccb);
539		xpt_done(ccb);
540		break;
541	case XPT_ABORT:			/* Abort the specified CCB */
542	{
543		ahc_abort_ccb(ahc, sim, ccb);
544		break;
545	}
546	case XPT_SET_TRAN_SETTINGS:
547	{
548#ifdef AHC_NEW_TRAN_SETTINGS
549		struct	ahc_devinfo devinfo;
550		struct	ccb_trans_settings *cts;
551		struct	ccb_trans_settings_scsi *scsi;
552		struct	ccb_trans_settings_spi *spi;
553		struct	ahc_initiator_tinfo *tinfo;
554		struct	ahc_tmode_tstate *tstate;
555		uint16_t *discenable;
556		uint16_t *tagenable;
557		u_int	update_type;
558
559		cts = &ccb->cts;
560		scsi = &cts->proto_specific.scsi;
561		spi = &cts->xport_specific.spi;
562		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
563				    cts->ccb_h.target_id,
564				    cts->ccb_h.target_lun,
565				    SIM_CHANNEL(ahc, sim),
566				    ROLE_UNKNOWN);
567		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
568					    devinfo.our_scsiid,
569					    devinfo.target, &tstate);
570		update_type = 0;
571		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
572			update_type |= AHC_TRANS_GOAL;
573			discenable = &tstate->discenable;
574			tagenable = &tstate->tagenable;
575			tinfo->current.protocol_version =
576			    cts->protocol_version;
577			tinfo->current.transport_version =
578			    cts->transport_version;
579			tinfo->goal.protocol_version =
580			    cts->protocol_version;
581			tinfo->goal.transport_version =
582			    cts->transport_version;
583		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
584			update_type |= AHC_TRANS_USER;
585			discenable = &ahc->user_discenable;
586			tagenable = &ahc->user_tagenable;
587			tinfo->user.protocol_version =
588			    cts->protocol_version;
589			tinfo->user.transport_version =
590			    cts->transport_version;
591		} else {
592			ccb->ccb_h.status = CAM_REQ_INVALID;
593			xpt_done(ccb);
594			break;
595		}
596
597		ahc_lock(ahc, &s);
598
599		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
600			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
601				*discenable |= devinfo.target_mask;
602			else
603				*discenable &= ~devinfo.target_mask;
604		}
605
606		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
607			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
608				*tagenable |= devinfo.target_mask;
609			else
610				*tagenable &= ~devinfo.target_mask;
611		}
612
613		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
614			ahc_validate_width(ahc, /*tinfo limit*/NULL,
615					   &spi->bus_width, ROLE_UNKNOWN);
616			ahc_set_width(ahc, &devinfo, spi->bus_width,
617				      update_type, /*paused*/FALSE);
618		}
619
620		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
621			if (update_type == AHC_TRANS_USER)
622				spi->ppr_options = tinfo->user.ppr_options;
623			else
624				spi->ppr_options = tinfo->goal.ppr_options;
625		}
626
627		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
628			if (update_type == AHC_TRANS_USER)
629				spi->sync_offset = tinfo->user.offset;
630			else
631				spi->sync_offset = tinfo->goal.offset;
632		}
633
634		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
635			if (update_type == AHC_TRANS_USER)
636				spi->sync_period = tinfo->user.period;
637			else
638				spi->sync_period = tinfo->goal.period;
639		}
640
641		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
642		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
643			struct ahc_syncrate *syncrate;
644			u_int maxsync;
645
646			if ((ahc->features & AHC_ULTRA2) != 0)
647				maxsync = AHC_SYNCRATE_DT;
648			else if ((ahc->features & AHC_ULTRA) != 0)
649				maxsync = AHC_SYNCRATE_ULTRA;
650			else
651				maxsync = AHC_SYNCRATE_FAST;
652
653			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
654						     &spi->ppr_options,
655						     maxsync);
656			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
657					    syncrate, &spi->sync_offset,
658					    spi->bus_width, ROLE_UNKNOWN);
659
660			/* We use a period of 0 to represent async */
661			if (spi->sync_offset == 0) {
662				spi->sync_period = 0;
663				spi->ppr_options = 0;
664			}
665
666			ahc_set_syncrate(ahc, &devinfo, syncrate,
667					 spi->sync_period, spi->sync_offset,
668					 spi->ppr_options, update_type,
669					 /*paused*/FALSE);
670		}
671		ahc_unlock(ahc, &s);
672		ccb->ccb_h.status = CAM_REQ_CMP;
673		xpt_done(ccb);
674#else
675		struct	  ahc_devinfo devinfo;
676		struct	  ccb_trans_settings *cts;
677		struct	  ahc_initiator_tinfo *tinfo;
678		struct	  ahc_tmode_tstate *tstate;
679		uint16_t *discenable;
680		uint16_t *tagenable;
681		u_int	  update_type;
682		long	  s;
683
684		cts = &ccb->cts;
685		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
686				    cts->ccb_h.target_id,
687				    cts->ccb_h.target_lun,
688				    SIM_CHANNEL(ahc, sim),
689				    ROLE_UNKNOWN);
690		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
691					    devinfo.our_scsiid,
692					    devinfo.target, &tstate);
693		update_type = 0;
694		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
695			update_type |= AHC_TRANS_GOAL;
696			discenable = &tstate->discenable;
697			tagenable = &tstate->tagenable;
698		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
699			update_type |= AHC_TRANS_USER;
700			discenable = &ahc->user_discenable;
701			tagenable = &ahc->user_tagenable;
702		} else {
703			ccb->ccb_h.status = CAM_REQ_INVALID;
704			xpt_done(ccb);
705			break;
706		}
707
708		ahc_lock(ahc, &s);
709
710		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
711			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
712				*discenable |= devinfo.target_mask;
713			else
714				*discenable &= ~devinfo.target_mask;
715		}
716
717		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
718			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
719				*tagenable |= devinfo.target_mask;
720			else
721				*tagenable &= ~devinfo.target_mask;
722		}
723
724		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
725			ahc_validate_width(ahc, /*tinfo limit*/NULL,
726					   &cts->bus_width, ROLE_UNKNOWN);
727			ahc_set_width(ahc, &devinfo, cts->bus_width,
728				      update_type, /*paused*/FALSE);
729		}
730
731		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
732			if (update_type == AHC_TRANS_USER)
733				cts->sync_offset = tinfo->user.offset;
734			else
735				cts->sync_offset = tinfo->goal.offset;
736		}
737
738		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
739			if (update_type == AHC_TRANS_USER)
740				cts->sync_period = tinfo->user.period;
741			else
742				cts->sync_period = tinfo->goal.period;
743		}
744
745		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
746		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
747			struct ahc_syncrate *syncrate;
748			u_int ppr_options;
749			u_int maxsync;
750
751			if ((ahc->features & AHC_ULTRA2) != 0)
752				maxsync = AHC_SYNCRATE_DT;
753			else if ((ahc->features & AHC_ULTRA) != 0)
754				maxsync = AHC_SYNCRATE_ULTRA;
755			else
756				maxsync = AHC_SYNCRATE_FAST;
757
758			ppr_options = 0;
759			if (cts->sync_period <= 9)
760				ppr_options = MSG_EXT_PPR_DT_REQ;
761
762			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
763						     &ppr_options,
764						     maxsync);
765			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
766					    syncrate, &cts->sync_offset,
767					    MSG_EXT_WDTR_BUS_8_BIT,
768					    ROLE_UNKNOWN);
769
770			/* We use a period of 0 to represent async */
771			if (cts->sync_offset == 0) {
772				cts->sync_period = 0;
773				ppr_options = 0;
774			}
775
776			if (ppr_options == MSG_EXT_PPR_DT_REQ
777			 && tinfo->user.transport_version >= 3) {
778				tinfo->goal.transport_version =
779				    tinfo->user.transport_version;
780				tinfo->current.transport_version =
781				    tinfo->user.transport_version;
782			}
783
784			ahc_set_syncrate(ahc, &devinfo, syncrate,
785					 cts->sync_period, cts->sync_offset,
786					 ppr_options, update_type,
787					 /*paused*/FALSE);
788		}
789		ahc_unlock(ahc, &s);
790		ccb->ccb_h.status = CAM_REQ_CMP;
791		xpt_done(ccb);
792#endif
793		break;
794	}
795	case XPT_GET_TRAN_SETTINGS:
796	/* Get default/user set transfer settings for the target */
797	{
798
799		ahc_lock(ahc, &s);
800		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
801				      SIM_CHANNEL(ahc, sim), &ccb->cts);
802		ahc_unlock(ahc, &s);
803		xpt_done(ccb);
804		break;
805	}
806	case XPT_CALC_GEOMETRY:
807	{
808		struct	  ccb_calc_geometry *ccg;
809		uint32_t size_mb;
810		uint32_t secs_per_cylinder;
811		int	  extended;
812
813		ccg = &ccb->ccg;
814		size_mb = ccg->volume_size
815			/ ((1024L * 1024L) / ccg->block_size);
816		extended = SIM_IS_SCSIBUS_B(ahc, sim)
817			? ahc->flags & AHC_EXTENDED_TRANS_B
818			: ahc->flags & AHC_EXTENDED_TRANS_A;
819
820		if (size_mb > 1024 && extended) {
821			ccg->heads = 255;
822			ccg->secs_per_track = 63;
823		} else {
824			ccg->heads = 64;
825			ccg->secs_per_track = 32;
826		}
827		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
828		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
829		ccb->ccb_h.status = CAM_REQ_CMP;
830		xpt_done(ccb);
831		break;
832	}
833	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
834	{
835		int  found;
836
837		ahc_lock(ahc, &s);
838		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
839					  /*initiate reset*/TRUE);
840		ahc_unlock(ahc, &s);
841		if (bootverbose) {
842			xpt_print_path(SIM_PATH(ahc, sim));
843			printf("SCSI bus reset delivered. "
844			       "%d SCBs aborted.\n", found);
845		}
846		ccb->ccb_h.status = CAM_REQ_CMP;
847		xpt_done(ccb);
848		break;
849	}
850	case XPT_TERM_IO:		/* Terminate the I/O process */
851		/* XXX Implement */
852		ccb->ccb_h.status = CAM_REQ_INVALID;
853		xpt_done(ccb);
854		break;
855	case XPT_PATH_INQ:		/* Path routing inquiry */
856	{
857		struct ccb_pathinq *cpi = &ccb->cpi;
858
859		cpi->version_num = 1; /* XXX??? */
860		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
861		if ((ahc->features & AHC_WIDE) != 0)
862			cpi->hba_inquiry |= PI_WIDE_16;
863		if ((ahc->features & AHC_TARGETMODE) != 0) {
864			cpi->target_sprt = PIT_PROCESSOR
865					 | PIT_DISCONNECT
866					 | PIT_TERM_IO;
867		} else {
868			cpi->target_sprt = 0;
869		}
870		cpi->hba_misc = 0;
871		cpi->hba_eng_cnt = 0;
872		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
873		cpi->max_lun = AHC_NUM_LUNS - 1;
874		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
875			cpi->initiator_id = ahc->our_id_b;
876			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
877				cpi->hba_misc |= PIM_NOBUSRESET;
878		} else {
879			cpi->initiator_id = ahc->our_id;
880			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
881				cpi->hba_misc |= PIM_NOBUSRESET;
882		}
883		cpi->bus_id = cam_sim_bus(sim);
884		cpi->base_transfer_speed = 3300;
885		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
886		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
887		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
888		cpi->unit_number = cam_sim_unit(sim);
889#ifdef AHC_NEW_TRAN_SETTINGS
890		cpi->protocol = PROTO_SCSI;
891		cpi->protocol_version = SCSI_REV_2;
892		cpi->transport = XPORT_SPI;
893		cpi->transport_version = 2;
894		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
895		if ((ahc->features & AHC_DT) != 0) {
896			cpi->transport_version = 3;
897			cpi->xport_specific.spi.ppr_options =
898			    SID_SPI_CLOCK_DT_ST;
899		}
900#endif
901		cpi->ccb_h.status = CAM_REQ_CMP;
902		xpt_done(ccb);
903		break;
904	}
905	default:
906		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
907		xpt_done(ccb);
908		break;
909	}
910}
911
912static void
913ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
914		      struct ccb_trans_settings *cts)
915{
916#ifdef AHC_NEW_TRAN_SETTINGS
917	struct	ahc_devinfo devinfo;
918	struct	ccb_trans_settings_scsi *scsi;
919	struct	ccb_trans_settings_spi *spi;
920	struct	ahc_initiator_tinfo *targ_info;
921	struct	ahc_tmode_tstate *tstate;
922	struct	ahc_transinfo *tinfo;
923
924	scsi = &cts->proto_specific.scsi;
925	spi = &cts->xport_specific.spi;
926	ahc_compile_devinfo(&devinfo, our_id,
927			    cts->ccb_h.target_id,
928			    cts->ccb_h.target_lun,
929			    channel, ROLE_UNKNOWN);
930	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
931					devinfo.our_scsiid,
932					devinfo.target, &tstate);
933
934	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
935		tinfo = &targ_info->current;
936	else
937		tinfo = &targ_info->user;
938
939	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
940	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
941	if (cts->type == CTS_TYPE_USER_SETTINGS) {
942		if ((ahc->user_discenable & devinfo.target_mask) != 0)
943			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
944
945		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
946			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
947	} else {
948		if ((tstate->discenable & devinfo.target_mask) != 0)
949			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
950
951		if ((tstate->tagenable & devinfo.target_mask) != 0)
952			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
953	}
954	cts->protocol_version = tinfo->protocol_version;
955	cts->transport_version = tinfo->transport_version;
956
957	spi->sync_period = tinfo->period;
958	spi->sync_offset = tinfo->offset;
959	spi->bus_width = tinfo->width;
960	spi->ppr_options = tinfo->ppr_options;
961
962	cts->protocol = PROTO_SCSI;
963	cts->transport = XPORT_SPI;
964	spi->valid = CTS_SPI_VALID_SYNC_RATE
965		   | CTS_SPI_VALID_SYNC_OFFSET
966		   | CTS_SPI_VALID_BUS_WIDTH
967		   | CTS_SPI_VALID_PPR_OPTIONS;
968
969	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
970		scsi->valid = CTS_SCSI_VALID_TQ;
971		spi->valid |= CTS_SPI_VALID_DISC;
972	} else {
973		scsi->valid = 0;
974	}
975
976	cts->ccb_h.status = CAM_REQ_CMP;
977#else
978	struct	ahc_devinfo devinfo;
979	struct	ahc_initiator_tinfo *targ_info;
980	struct	ahc_tmode_tstate *tstate;
981	struct	ahc_transinfo *tinfo;
982	long	s;
983
984	ahc_compile_devinfo(&devinfo, our_id,
985			    cts->ccb_h.target_id,
986			    cts->ccb_h.target_lun,
987			    channel, ROLE_UNKNOWN);
988	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
989					devinfo.our_scsiid,
990					devinfo.target, &tstate);
991
992	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
993		tinfo = &targ_info->current;
994	else
995		tinfo = &targ_info->user;
996
997	ahc_lock(ahc, &s);
998
999	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1000	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
1001		if ((ahc->user_discenable & devinfo.target_mask) != 0)
1002			cts->flags |= CCB_TRANS_DISC_ENB;
1003
1004		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
1005			cts->flags |= CCB_TRANS_TAG_ENB;
1006	} else {
1007		if ((tstate->discenable & devinfo.target_mask) != 0)
1008			cts->flags |= CCB_TRANS_DISC_ENB;
1009
1010		if ((tstate->tagenable & devinfo.target_mask) != 0)
1011			cts->flags |= CCB_TRANS_TAG_ENB;
1012	}
1013	cts->sync_period = tinfo->period;
1014	cts->sync_offset = tinfo->offset;
1015	cts->bus_width = tinfo->width;
1016
1017	ahc_unlock(ahc, &s);
1018
1019	cts->valid = CCB_TRANS_SYNC_RATE_VALID
1020		   | CCB_TRANS_SYNC_OFFSET_VALID
1021		   | CCB_TRANS_BUS_WIDTH_VALID;
1022
1023	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
1024		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
1025
1026	cts->ccb_h.status = CAM_REQ_CMP;
1027#endif
1028}
1029
1030static void
1031ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1032{
1033	struct ahc_softc *ahc;
1034	struct cam_sim *sim;
1035
1036	sim = (struct cam_sim *)callback_arg;
1037	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1038	switch (code) {
1039	case AC_LOST_DEVICE:
1040	{
1041		struct	ahc_devinfo devinfo;
1042		long	s;
1043
1044		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1045				    xpt_path_target_id(path),
1046				    xpt_path_lun_id(path),
1047				    SIM_CHANNEL(ahc, sim),
1048				    ROLE_UNKNOWN);
1049
1050		/*
1051		 * Revert to async/narrow transfers
1052		 * for the next device.
1053		 */
1054		ahc_lock(ahc, &s);
1055		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1056			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1057		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1058				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1059				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1060				 /*paused*/FALSE);
1061		ahc_unlock(ahc, &s);
1062		break;
1063	}
1064	default:
1065		break;
1066	}
1067}
1068
1069static void
1070ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1071		int error)
1072{
1073	struct	scb *scb;
1074	union	ccb *ccb;
1075	struct	ahc_softc *ahc;
1076	struct	ahc_initiator_tinfo *tinfo;
1077	struct	ahc_tmode_tstate *tstate;
1078	u_int	mask;
1079	long	s;
1080
1081	scb = (struct scb *)arg;
1082	ccb = scb->io_ctx;
1083	ahc = scb->ahc_softc;
1084
1085	if (error != 0) {
1086		if (error == EFBIG)
1087			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1088		else
1089			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1090		if (nsegments != 0)
1091			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1092		ahc_lock(ahc, &s);
1093		ahc_free_scb(ahc, scb);
1094		ahc_unlock(ahc, &s);
1095		xpt_done(ccb);
1096		return;
1097	}
1098	if (nsegments != 0) {
1099		struct	  ahc_dma_seg *sg;
1100		bus_dma_segment_t *end_seg;
1101		bus_dmasync_op_t op;
1102
1103		end_seg = dm_segs + nsegments;
1104
1105		/* Copy the segments into our SG list */
1106		sg = scb->sg_list;
1107		while (dm_segs < end_seg) {
1108			sg->addr = dm_segs->ds_addr;
1109/* XXX Add in the 5th byte of the address later. */
1110			sg->len = dm_segs->ds_len;
1111			sg++;
1112			dm_segs++;
1113		}
1114
1115		/*
1116		 * Note where to find the SG entries in bus space.
1117		 * We also set the full residual flag which the
1118		 * sequencer will clear as soon as a data transfer
1119		 * occurs.
1120		 */
1121		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1122
1123		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1124			op = BUS_DMASYNC_PREREAD;
1125		else
1126			op = BUS_DMASYNC_PREWRITE;
1127
1128		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1129
1130		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1131			struct target_data *tdata;
1132
1133			tdata = &scb->hscb->shared_data.tdata;
1134			tdata->target_phases |= DPHASE_PENDING;
1135			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1136				tdata->data_phase = P_DATAOUT;
1137			else
1138				tdata->data_phase = P_DATAIN;
1139
1140			/*
1141			 * If the transfer is of an odd length and in the
1142			 * "in" direction (scsi->HostBus), then it may
1143			 * trigger a bug in the 'WideODD' feature of
1144			 * non-Ultra2 chips.  Force the total data-length
1145			 * to be even by adding an extra, 1 byte, SG,
1146			 * element.  We do this even if we are not currently
1147			 * negotiated wide as negotiation could occur before
1148			 * this command is executed.
1149			 */
1150			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1151			 && (ccb->csio.dxfer_len & 0x1) != 0
1152			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1153
1154				nsegments++;
1155				if (nsegments > AHC_NSEG) {
1156
1157					ahc_set_transaction_status(scb,
1158					    CAM_REQ_TOO_BIG);
1159					bus_dmamap_unload(ahc->buffer_dmat,
1160							  scb->dmamap);
1161					ahc_lock(ahc, &s);
1162					ahc_free_scb(ahc, scb);
1163					ahc_unlock(ahc, &s);
1164					xpt_done(ccb);
1165					return;
1166				}
1167				sg->addr = ahc->dma_bug_buf;
1168				sg->len = 1;
1169				sg++;
1170			}
1171		}
1172		sg--;
1173		sg->len |= AHC_DMA_LAST_SEG;
1174
1175		/* Copy the first SG into the "current" data pointer area */
1176		scb->hscb->dataptr = scb->sg_list->addr;
1177		scb->hscb->datacnt = scb->sg_list->len;
1178	} else {
1179		scb->hscb->sgptr = SG_LIST_NULL;
1180		scb->hscb->dataptr = 0;
1181		scb->hscb->datacnt = 0;
1182	}
1183
1184	scb->sg_count = nsegments;
1185
1186	ahc_lock(ahc, &s);
1187
1188	/*
1189	 * Last time we need to check if this SCB needs to
1190	 * be aborted.
1191	 */
1192	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1193		if (nsegments != 0)
1194			bus_dmamap_unload(ahc->buffer_dmat,
1195					  scb->dmamap);
1196		ahc_free_scb(ahc, scb);
1197		ahc_unlock(ahc, &s);
1198		xpt_done(ccb);
1199		return;
1200	}
1201
1202	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1203				    SCSIID_OUR_ID(scb->hscb->scsiid),
1204				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1205				    &tstate);
1206
1207	mask = SCB_GET_TARGET_MASK(ahc, scb);
1208	scb->hscb->scsirate = tinfo->scsirate;
1209	scb->hscb->scsioffset = tinfo->current.offset;
1210	if ((tstate->ultraenb & mask) != 0)
1211		scb->hscb->control |= ULTRAENB;
1212
1213	if ((tstate->discenable & mask) != 0
1214	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1215		scb->hscb->control |= DISCENB;
1216
1217	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1218	 && (tinfo->goal.width != 0
1219	  || tinfo->goal.period != 0
1220	  || tinfo->goal.ppr_options != 0)) {
1221		scb->flags |= SCB_NEGOTIATE;
1222		scb->hscb->control |= MK_MESSAGE;
1223	} else if ((tstate->auto_negotiate & mask) != 0) {
1224		scb->flags |= SCB_AUTO_NEGOTIATE;
1225		scb->hscb->control |= MK_MESSAGE;
1226	}
1227
1228	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1229
1230	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1231
1232	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1233		uint64_t time;
1234
1235		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1236			ccb->ccb_h.timeout = 5 * 1000;
1237
1238		time = ccb->ccb_h.timeout;
1239		time *= hz;
1240		time /= 1000;
1241		ccb->ccb_h.timeout_ch =
1242		    timeout(ahc_timeout, (caddr_t)scb, time);
1243	}
1244
1245	/*
1246	 * We only allow one untagged transaction
1247	 * per target in the initiator role unless
1248	 * we are storing a full busy target *lun*
1249	 * table in SCB space.
1250	 */
1251	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1252	 && (ahc->flags & AHC_SCB_BTT) == 0) {
1253		struct scb_tailq *untagged_q;
1254		int target_offset;
1255
1256		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1257		untagged_q = &(ahc->untagged_queues[target_offset]);
1258		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1259		scb->flags |= SCB_UNTAGGEDQ;
1260		if (TAILQ_FIRST(untagged_q) != scb) {
1261			ahc_unlock(ahc, &s);
1262			return;
1263		}
1264	}
1265	scb->flags |= SCB_ACTIVE;
1266
1267	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1268		/* Define a mapping from our tag to the SCB. */
1269		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
1270		ahc_pause(ahc);
1271		if ((ahc->flags & AHC_PAGESCBS) == 0)
1272			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1273		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1274		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1275		ahc_unpause(ahc);
1276	} else {
1277		ahc_queue_scb(ahc, scb);
1278	}
1279
1280	ahc_unlock(ahc, &s);
1281}
1282
1283static void
1284ahc_poll(struct cam_sim *sim)
1285{
1286	ahc_intr(cam_sim_softc(sim));
1287}
1288
1289static void
1290ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1291	       struct ccb_scsiio *csio, struct scb *scb)
1292{
1293	struct hardware_scb *hscb;
1294	struct ccb_hdr *ccb_h;
1295
1296	hscb = scb->hscb;
1297	ccb_h = &csio->ccb_h;
1298
1299	if (ccb_h->func_code == XPT_SCSI_IO) {
1300		hscb->cdb_len = csio->cdb_len;
1301		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1302
1303			if (hscb->cdb_len > sizeof(hscb->cdb32)
1304			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1305				u_long s;
1306
1307				ahc_set_transaction_status(scb,
1308							   CAM_REQ_INVALID);
1309				ahc_lock(ahc, &s);
1310				ahc_free_scb(ahc, scb);
1311				ahc_unlock(ahc, &s);
1312				xpt_done((union ccb *)csio);
1313				return;
1314			}
1315			if (hscb->cdb_len > 12) {
1316				memcpy(hscb->cdb32,
1317				       csio->cdb_io.cdb_ptr,
1318				       hscb->cdb_len);
1319				scb->flags |= SCB_CDB32_PTR;
1320			} else {
1321				memcpy(hscb->shared_data.cdb,
1322				       csio->cdb_io.cdb_ptr,
1323				       hscb->cdb_len);
1324			}
1325		} else {
1326			if (hscb->cdb_len > 12) {
1327				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1328				       hscb->cdb_len);
1329				scb->flags |= SCB_CDB32_PTR;
1330			} else {
1331				memcpy(hscb->shared_data.cdb,
1332				       csio->cdb_io.cdb_bytes,
1333				       hscb->cdb_len);
1334			}
1335		}
1336	}
1337
1338	/* Only use S/G if there is a transfer */
1339	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1340		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1341			/* We've been given a pointer to a single buffer */
1342			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1343				int s;
1344				int error;
1345
1346				s = splsoftvm();
1347				error = bus_dmamap_load(ahc->buffer_dmat,
1348							scb->dmamap,
1349							csio->data_ptr,
1350							csio->dxfer_len,
1351							ahc_execute_scb,
1352							scb, /*flags*/0);
1353				if (error == EINPROGRESS) {
1354					/*
1355					 * So as to maintain ordering,
1356					 * freeze the controller queue
1357					 * until our mapping is
1358					 * returned.
1359					 */
1360					xpt_freeze_simq(sim,
1361							/*count*/1);
1362					scb->io_ctx->ccb_h.status |=
1363					    CAM_RELEASE_SIMQ;
1364				}
1365				splx(s);
1366			} else {
1367				struct bus_dma_segment seg;
1368
1369				/* Pointer to physical buffer */
1370				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1371					panic("ahc_setup_data - Transfer size "
1372					      "larger than can device max");
1373
1374				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1375				seg.ds_len = csio->dxfer_len;
1376				ahc_execute_scb(scb, &seg, 1, 0);
1377			}
1378		} else {
1379			struct bus_dma_segment *segs;
1380
1381			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1382				panic("ahc_setup_data - Physical segment "
1383				      "pointers unsupported");
1384
1385			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1386				panic("ahc_setup_data - Virtual segment "
1387				      "addresses unsupported");
1388
1389			/* Just use the segments provided */
1390			segs = (struct bus_dma_segment *)csio->data_ptr;
1391			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1392		}
1393	} else {
1394		ahc_execute_scb(scb, NULL, 0, 0);
1395	}
1396}
1397
1398static void
1399ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1400
1401	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1402		struct scb *list_scb;
1403
1404		scb->flags |= SCB_RECOVERY_SCB;
1405
1406		/*
1407		 * Take all queued, but not sent SCBs out of the equation.
1408		 * Also ensure that no new CCBs are queued to us while we
1409		 * try to fix this problem.
1410		 */
1411		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1412			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1413			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1414		}
1415
1416		/*
1417		 * Go through all of our pending SCBs and remove
1418		 * any scheduled timeouts for them.  We will reschedule
1419		 * them after we've successfully fixed this problem.
1420		 */
1421		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1422			union ccb *ccb;
1423
1424			ccb = list_scb->io_ctx;
1425			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1426		}
1427	}
1428}
1429
1430void
1431ahc_timeout(void *arg)
1432{
1433	struct	scb *scb;
1434	struct	ahc_softc *ahc;
1435	long	s;
1436	int	found;
1437	u_int	last_phase;
1438	int	target;
1439	int	lun;
1440	int	i;
1441	char	channel;
1442
1443	scb = (struct scb *)arg;
1444	ahc = (struct ahc_softc *)scb->ahc_softc;
1445
1446	ahc_lock(ahc, &s);
1447
1448	ahc_pause_and_flushwork(ahc);
1449
1450	if ((scb->flags & SCB_ACTIVE) == 0) {
1451		/* Previous timeout took care of me already */
1452		printf("%s: Timedout SCB already complete. "
1453		       "Interrupts may not be functioning.\n", ahc_name(ahc));
1454		ahc_unpause(ahc);
1455		ahc_unlock(ahc, &s);
1456		return;
1457	}
1458
1459	target = SCB_GET_TARGET(ahc, scb);
1460	channel = SCB_GET_CHANNEL(ahc, scb);
1461	lun = SCB_GET_LUN(scb);
1462
1463	ahc_print_path(ahc, scb);
1464	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
1465	ahc_dump_card_state(ahc);
1466	last_phase = ahc_inb(ahc, LASTPHASE);
1467	if (scb->sg_count > 0) {
1468		for (i = 0; i < scb->sg_count; i++) {
1469			printf("sg[%d] - Addr 0x%x : Length %d\n",
1470			       i,
1471			       scb->sg_list[i].addr,
1472			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
1473		}
1474	}
1475	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1476		/*
1477		 * Been down this road before.
1478		 * Do a full bus reset.
1479		 */
1480bus_reset:
1481		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1482		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1483		printf("%s: Issued Channel %c Bus Reset. "
1484		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1485	} else {
1486		/*
1487		 * If we are a target, transition to bus free and report
1488		 * the timeout.
1489		 *
1490		 * The target/initiator that is holding up the bus may not
1491		 * be the same as the one that triggered this timeout
1492		 * (different commands have different timeout lengths).
1493		 * If the bus is idle and we are actiing as the initiator
1494		 * for this request, queue a BDR message to the timed out
1495		 * target.  Otherwise, if the timed out transaction is
1496		 * active:
1497		 *   Initiator transaction:
1498		 *	Stuff the message buffer with a BDR message and assert
1499		 *	ATN in the hopes that the target will let go of the bus
1500		 *	and go to the mesgout phase.  If this fails, we'll
1501		 *	get another timeout 2 seconds later which will attempt
1502		 *	a bus reset.
1503		 *
1504		 *   Target transaction:
1505		 *	Transition to BUS FREE and report the error.
1506		 *	It's good to be the target!
1507		 */
1508		u_int active_scb_index;
1509		u_int saved_scbptr;
1510
1511		saved_scbptr = ahc_inb(ahc, SCBPTR);
1512		active_scb_index = ahc_inb(ahc, SCB_TAG);
1513
1514		if (last_phase != P_BUSFREE
1515		  && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0
1516		  && (active_scb_index < ahc->scb_data->numscbs)) {
1517			struct scb *active_scb;
1518
1519			/*
1520			 * If the active SCB is not us, assume that
1521			 * the active SCB has a longer timeout than
1522			 * the timedout SCB, and wait for the active
1523			 * SCB to timeout.
1524			 */
1525			active_scb = ahc_lookup_scb(ahc, active_scb_index);
1526			if (active_scb != scb) {
1527				struct	 ccb_hdr *ccbh;
1528				uint64_t newtimeout;
1529
1530				ahc_print_path(ahc, scb);
1531				printf("Other SCB Timeout%s",
1532			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1533				       ? " again\n" : "\n");
1534				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1535				newtimeout =
1536				    MAX(active_scb->io_ctx->ccb_h.timeout,
1537					scb->io_ctx->ccb_h.timeout);
1538				newtimeout *= hz;
1539				newtimeout /= 1000;
1540				ccbh = &scb->io_ctx->ccb_h;
1541				scb->io_ctx->ccb_h.timeout_ch =
1542				    timeout(ahc_timeout, scb, newtimeout);
1543				ahc_unpause(ahc);
1544				ahc_unlock(ahc, &s);
1545				return;
1546			}
1547
1548			/* It's us */
1549			if ((scb->hscb->control & TARGET_SCB) != 0) {
1550
1551				/*
1552				 * Send back any queued up transactions
1553				 * and properly record the error condition.
1554				 */
1555				ahc_freeze_devq(ahc, scb);
1556				ahc_set_transaction_status(scb,
1557							   CAM_CMD_TIMEOUT);
1558				ahc_freeze_scb(scb);
1559				ahc_done(ahc, scb);
1560
1561				/* Will clear us from the bus */
1562				ahc_restart(ahc);
1563				ahc_unlock(ahc, &s);
1564				return;
1565			}
1566
1567			ahc_set_recoveryscb(ahc, active_scb);
1568			ahc_outb(ahc, MSG_OUT, HOST_MSG);
1569			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1570			ahc_print_path(ahc, active_scb);
1571			printf("BDR message in message buffer\n");
1572			active_scb->flags |= SCB_DEVICE_RESET;
1573			active_scb->io_ctx->ccb_h.timeout_ch =
1574			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1575			ahc_unpause(ahc);
1576		} else {
1577			int	 disconnected;
1578
1579			/* XXX Shouldn't panic.  Just punt instead */
1580			if ((scb->hscb->control & TARGET_SCB) != 0)
1581				panic("Timed-out target SCB but bus idle");
1582
1583			if (last_phase != P_BUSFREE
1584			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1585				/* XXX What happened to the SCB? */
1586				/* Hung target selection.  Goto busfree */
1587				printf("%s: Hung target selection\n",
1588				       ahc_name(ahc));
1589				ahc_restart(ahc);
1590				ahc_unlock(ahc, &s);
1591				return;
1592			}
1593
1594			if (ahc_search_qinfifo(ahc, target, channel, lun,
1595					       scb->hscb->tag, ROLE_INITIATOR,
1596					       /*status*/0, SEARCH_COUNT) > 0) {
1597				disconnected = FALSE;
1598			} else {
1599				disconnected = TRUE;
1600			}
1601
1602			if (disconnected) {
1603
1604				ahc_set_recoveryscb(ahc, scb);
1605				/*
1606				 * Actually re-queue this SCB in an attempt
1607				 * to select the device before it reconnects.
1608				 * In either case (selection or reselection),
1609				 * we will now issue a target reset to the
1610				 * timed-out device.
1611				 *
1612				 * Set the MK_MESSAGE control bit indicating
1613				 * that we desire to send a message.  We
1614				 * also set the disconnected flag since
1615				 * in the paging case there is no guarantee
1616				 * that our SCB control byte matches the
1617				 * version on the card.  We don't want the
1618				 * sequencer to abort the command thinking
1619				 * an unsolicited reselection occurred.
1620				 */
1621				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1622				scb->flags |= SCB_DEVICE_RESET;
1623
1624				/*
1625				 * Remove any cached copy of this SCB in the
1626				 * disconnected list in preparation for the
1627				 * queuing of our abort SCB.  We use the
1628				 * same element in the SCB, SCB_NEXT, for
1629				 * both the qinfifo and the disconnected list.
1630				 */
1631				ahc_search_disc_list(ahc, target, channel,
1632						     lun, scb->hscb->tag,
1633						     /*stop_on_first*/TRUE,
1634						     /*remove*/TRUE,
1635						     /*save_state*/FALSE);
1636
1637				/*
1638				 * In the non-paging case, the sequencer will
1639				 * never re-reference the in-core SCB.
1640				 * To make sure we are notified during
1641				 * reslection, set the MK_MESSAGE flag in
1642				 * the card's copy of the SCB.
1643				 */
1644				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1645					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1646					ahc_outb(ahc, SCB_CONTROL,
1647						 ahc_inb(ahc, SCB_CONTROL)
1648						| MK_MESSAGE);
1649				}
1650
1651				/*
1652				 * Clear out any entries in the QINFIFO first
1653				 * so we are the next SCB for this target
1654				 * to run.
1655				 */
1656				ahc_search_qinfifo(ahc,
1657						   SCB_GET_TARGET(ahc, scb),
1658						   channel, SCB_GET_LUN(scb),
1659						   SCB_LIST_NULL,
1660						   ROLE_INITIATOR,
1661						   CAM_REQUEUE_REQ,
1662						   SEARCH_COMPLETE);
1663				ahc_print_path(ahc, scb);
1664				printf("Queuing a BDR SCB\n");
1665				ahc_qinfifo_requeue_tail(ahc, scb);
1666				ahc_outb(ahc, SCBPTR, saved_scbptr);
1667				scb->io_ctx->ccb_h.timeout_ch =
1668				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1669				ahc_unpause(ahc);
1670			} else {
1671				/* Go "immediatly" to the bus reset */
1672				/* This shouldn't happen */
1673				ahc_set_recoveryscb(ahc, scb);
1674				ahc_print_path(ahc, scb);
1675				printf("SCB %d: Immediate reset.  "
1676					"Flags = 0x%x\n", scb->hscb->tag,
1677					scb->flags);
1678				goto bus_reset;
1679			}
1680		}
1681	}
1682	ahc_unlock(ahc, &s);
1683}
1684
1685static void
1686ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1687{
1688	union ccb *abort_ccb;
1689
1690	abort_ccb = ccb->cab.abort_ccb;
1691	switch (abort_ccb->ccb_h.func_code) {
1692	case XPT_ACCEPT_TARGET_IO:
1693	case XPT_IMMED_NOTIFY:
1694	case XPT_CONT_TARGET_IO:
1695	{
1696		struct ahc_tmode_tstate *tstate;
1697		struct ahc_tmode_lstate *lstate;
1698		struct ccb_hdr_slist *list;
1699		cam_status status;
1700
1701		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1702					     &lstate, TRUE);
1703
1704		if (status != CAM_REQ_CMP) {
1705			ccb->ccb_h.status = status;
1706			break;
1707		}
1708
1709		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1710			list = &lstate->accept_tios;
1711		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1712			list = &lstate->immed_notifies;
1713		else
1714			list = NULL;
1715
1716		if (list != NULL) {
1717			struct ccb_hdr *curelm;
1718			int found;
1719
1720			curelm = SLIST_FIRST(list);
1721			found = 0;
1722			if (curelm == &abort_ccb->ccb_h) {
1723				found = 1;
1724				SLIST_REMOVE_HEAD(list, sim_links.sle);
1725			} else {
1726				while(curelm != NULL) {
1727					struct ccb_hdr *nextelm;
1728
1729					nextelm =
1730					    SLIST_NEXT(curelm, sim_links.sle);
1731
1732					if (nextelm == &abort_ccb->ccb_h) {
1733						found = 1;
1734						SLIST_NEXT(curelm,
1735							   sim_links.sle) =
1736						    SLIST_NEXT(nextelm,
1737							       sim_links.sle);
1738						break;
1739					}
1740					curelm = nextelm;
1741				}
1742			}
1743
1744			if (found) {
1745				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1746				xpt_done(abort_ccb);
1747				ccb->ccb_h.status = CAM_REQ_CMP;
1748			} else {
1749				xpt_print_path(abort_ccb->ccb_h.path);
1750				printf("Not found\n");
1751				ccb->ccb_h.status = CAM_PATH_INVALID;
1752			}
1753			break;
1754		}
1755		/* FALLTHROUGH */
1756	}
1757	case XPT_SCSI_IO:
1758		/* XXX Fully implement the hard ones */
1759		ccb->ccb_h.status = CAM_UA_ABORT;
1760		break;
1761	default:
1762		ccb->ccb_h.status = CAM_REQ_INVALID;
1763		break;
1764	}
1765	xpt_done(ccb);
1766}
1767
1768void
1769ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1770		u_int lun, ac_code code)
1771{
1772	struct	ccb_trans_settings cts;
1773	struct cam_path *path;
1774	void *arg;
1775	int error;
1776
1777	arg = NULL;
1778	error = ahc_create_path(ahc, channel, target, lun, &path);
1779
1780	if (error != CAM_REQ_CMP)
1781		return;
1782
1783	switch (code) {
1784	case AC_TRANSFER_NEG:
1785#ifdef AHC_NEW_TRAN_SETTINGS
1786		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1787#else
1788		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1789#endif
1790		cts.ccb_h.path = path;
1791		cts.ccb_h.target_id = target;
1792		cts.ccb_h.target_lun = lun;
1793		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1794							  : ahc->our_id_b,
1795				      channel, &cts);
1796		arg = &cts;
1797		break;
1798	case AC_SENT_BDR:
1799	case AC_BUS_RESET:
1800		break;
1801	default:
1802		panic("ahc_send_async: Unexpected async event");
1803	}
1804	xpt_async(code, path, arg);
1805	xpt_free_path(path);
1806}
1807
1808void
1809ahc_platform_set_tags(struct ahc_softc *ahc,
1810		      struct ahc_devinfo *devinfo, int enable)
1811{
1812}
1813
1814int
1815ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1816{
1817	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1818	    M_NOWAIT | M_ZERO);
1819	if (ahc->platform_data == NULL)
1820		return (ENOMEM);
1821	return (0);
1822}
1823
1824void
1825ahc_platform_free(struct ahc_softc *ahc)
1826{
1827	struct ahc_platform_data *pdata;
1828
1829	pdata = ahc->platform_data;
1830	if (pdata != NULL) {
1831		if (pdata->regs != NULL)
1832			bus_release_resource(ahc->dev_softc,
1833					     pdata->regs_res_type,
1834					     pdata->regs_res_id,
1835					     pdata->regs);
1836
1837		if (pdata->irq != NULL)
1838			bus_release_resource(ahc->dev_softc,
1839					     pdata->irq_res_type,
1840					     0, pdata->irq);
1841
1842		if (pdata->sim_b != NULL) {
1843			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1844			xpt_free_path(pdata->path_b);
1845			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1846			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1847		}
1848		if (pdata->sim != NULL) {
1849			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1850			xpt_free_path(pdata->path);
1851			xpt_bus_deregister(cam_sim_path(pdata->sim));
1852			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1853		}
1854		if (pdata->eh != NULL)
1855			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1856		free(ahc->platform_data, M_DEVBUF);
1857	}
1858}
1859
1860int
1861ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1862{
1863	/* We don't sort softcs under FreeBSD so report equal always */
1864	return (0);
1865}
1866
1867int
1868ahc_detach(device_t dev)
1869{
1870	struct ahc_softc *ahc;
1871	u_long s;
1872
1873	device_printf(dev, "detaching device\n");
1874	ahc = device_get_softc(dev);
1875	ahc_lock(ahc, &s);
1876	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1877	ahc_unlock(ahc, &s);
1878	ahc_free(ahc);
1879	return (0);
1880}
1881
1882#if UNUSED
1883static void
1884ahc_dump_targcmd(struct target_cmd *cmd)
1885{
1886	uint8_t *byte;
1887	uint8_t *last_byte;
1888	int i;
1889
1890	byte = &cmd->initiator_channel;
1891	/* Debugging info for received commands */
1892	last_byte = &cmd[1].initiator_channel;
1893
1894	i = 0;
1895	while (byte < last_byte) {
1896		if (i == 0)
1897			printf("\t");
1898		printf("%#x", *byte++);
1899		i++;
1900		if (i == 8) {
1901			printf("\n");
1902			i = 0;
1903		} else {
1904			printf(", ");
1905		}
1906	}
1907}
1908#endif
1909