aic7xxx_osm.c revision 114621
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 114621 2003-05-03 23:55:38Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_osm.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#ifndef AHC_TMODE_ENABLE
40#define AHC_TMODE_ENABLE 0
41#endif
42
43#define ccb_scb_ptr spriv_ptr0
44
45devclass_t ahc_devclass;
46
47#if UNUSED
48static void	ahc_dump_targcmd(struct target_cmd *cmd);
49#endif
50static int	ahc_modevent(module_t mod, int type, void *data);
51static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
52static void	ahc_get_tran_settings(struct ahc_softc *ahc,
53				      int our_id, char channel,
54				      struct ccb_trans_settings *cts);
55static void	ahc_async(void *callback_arg, uint32_t code,
56			  struct cam_path *path, void *arg);
57static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
58				int nsegments, int error);
59static void	ahc_poll(struct cam_sim *sim);
60static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
61			       struct ccb_scsiio *csio, struct scb *scb);
62static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
63			      union ccb *ccb);
64static int	ahc_create_path(struct ahc_softc *ahc,
65				char channel, u_int target, u_int lun,
66				struct cam_path **path);
67
68static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
69
70static int
71ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
72	        u_int lun, struct cam_path **path)
73{
74	path_id_t path_id;
75
76	if (channel == 'B')
77		path_id = cam_sim_path(ahc->platform_data->sim_b);
78	else
79		path_id = cam_sim_path(ahc->platform_data->sim);
80
81	return (xpt_create_path(path, /*periph*/NULL,
82				path_id, target, lun));
83}
84
85int
86ahc_map_int(struct ahc_softc *ahc)
87{
88	int error;
89
90	/* Hook up our interrupt handler */
91	error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
92			       INTR_TYPE_CAM, ahc_platform_intr, ahc,
93			       &ahc->platform_data->ih);
94
95	if (error != 0)
96		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
97			      error);
98	return (error);
99}
100
101/*
102 * Attach all the sub-devices we can find
103 */
104int
105ahc_attach(struct ahc_softc *ahc)
106{
107	char   ahc_info[256];
108	struct ccb_setasync csa;
109	struct cam_devq *devq;
110	int bus_id;
111	int bus_id2;
112	struct cam_sim *sim;
113	struct cam_sim *sim2;
114	struct cam_path *path;
115	struct cam_path *path2;
116	long s;
117	int count;
118
119	count = 0;
120	sim = NULL;
121	sim2 = NULL;
122
123	ahc_controller_info(ahc, ahc_info);
124	printf("%s\n", ahc_info);
125	ahc_lock(ahc, &s);
126	/*
127	 * Attach secondary channel first if the user has
128	 * declared it the primary channel.
129	 */
130	if ((ahc->features & AHC_TWIN) != 0
131	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
132		bus_id = 1;
133		bus_id2 = 0;
134	} else {
135		bus_id = 0;
136		bus_id2 = 1;
137	}
138
139	/*
140	 * Create the device queue for our SIM(s).
141	 */
142	devq = cam_simq_alloc(AHC_MAX_QUEUE);
143	if (devq == NULL)
144		goto fail;
145
146	/*
147	 * Construct our first channel SIM entry
148	 */
149	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
150			    device_get_unit(ahc->dev_softc),
151			    1, AHC_MAX_QUEUE, devq);
152	if (sim == NULL) {
153		cam_simq_free(devq);
154		goto fail;
155	}
156
157	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
158		cam_sim_free(sim, /*free_devq*/TRUE);
159		sim = NULL;
160		goto fail;
161	}
162
163	if (xpt_create_path(&path, /*periph*/NULL,
164			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
165			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
166		xpt_bus_deregister(cam_sim_path(sim));
167		cam_sim_free(sim, /*free_devq*/TRUE);
168		sim = NULL;
169		goto fail;
170	}
171
172	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
173	csa.ccb_h.func_code = XPT_SASYNC_CB;
174	csa.event_enable = AC_LOST_DEVICE;
175	csa.callback = ahc_async;
176	csa.callback_arg = sim;
177	xpt_action((union ccb *)&csa);
178	count++;
179
180	if (ahc->features & AHC_TWIN) {
181		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
182				    ahc, device_get_unit(ahc->dev_softc), 1,
183				    AHC_MAX_QUEUE, devq);
184
185		if (sim2 == NULL) {
186			printf("ahc_attach: Unable to attach second "
187			       "bus due to resource shortage");
188			goto fail;
189		}
190
191		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
192			printf("ahc_attach: Unable to attach second "
193			       "bus due to resource shortage");
194			/*
195			 * We do not want to destroy the device queue
196			 * because the first bus is using it.
197			 */
198			cam_sim_free(sim2, /*free_devq*/FALSE);
199			goto fail;
200		}
201
202		if (xpt_create_path(&path2, /*periph*/NULL,
203				    cam_sim_path(sim2),
204				    CAM_TARGET_WILDCARD,
205				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
206			xpt_bus_deregister(cam_sim_path(sim2));
207			cam_sim_free(sim2, /*free_devq*/FALSE);
208			sim2 = NULL;
209			goto fail;
210		}
211		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
212		csa.ccb_h.func_code = XPT_SASYNC_CB;
213		csa.event_enable = AC_LOST_DEVICE;
214		csa.callback = ahc_async;
215		csa.callback_arg = sim2;
216		xpt_action((union ccb *)&csa);
217		count++;
218	}
219
220fail:
221	if ((ahc->features & AHC_TWIN) != 0
222	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
223		ahc->platform_data->sim_b = sim;
224		ahc->platform_data->path_b = path;
225		ahc->platform_data->sim = sim2;
226		ahc->platform_data->path = path2;
227	} else {
228		ahc->platform_data->sim = sim;
229		ahc->platform_data->path = path;
230		ahc->platform_data->sim_b = sim2;
231		ahc->platform_data->path_b = path2;
232	}
233
234	if (count != 0) {
235		/* We have to wait until after any system dumps... */
236		ahc->platform_data->eh =
237		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
238					  ahc, SHUTDOWN_PRI_DEFAULT);
239		ahc_intr_enable(ahc, TRUE);
240	}
241
242	ahc_unlock(ahc, &s);
243	return (count);
244}
245
246/*
247 * Catch an interrupt from the adapter
248 */
249void
250ahc_platform_intr(void *arg)
251{
252	struct	ahc_softc *ahc;
253
254	ahc = (struct ahc_softc *)arg;
255	ahc_intr(ahc);
256}
257
258/*
259 * We have an scb which has been processed by the
260 * adaptor, now we look to see how the operation
261 * went.
262 */
263void
264ahc_done(struct ahc_softc *ahc, struct scb *scb)
265{
266	union ccb *ccb;
267
268	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
269		  ("ahc_done - scb %d\n", scb->hscb->tag));
270
271	ccb = scb->io_ctx;
272	LIST_REMOVE(scb, pending_links);
273	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
274		struct scb_tailq *untagged_q;
275		int target_offset;
276
277		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
278		untagged_q = &ahc->untagged_queues[target_offset];
279		TAILQ_REMOVE(untagged_q, scb, links.tqe);
280		scb->flags &= ~SCB_UNTAGGEDQ;
281		ahc_run_untagged_queue(ahc, untagged_q);
282	}
283
284	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
285
286	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
287		/*XXX bus_dmasync_op_t*/int op;
288
289		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
290			op = BUS_DMASYNC_POSTREAD;
291		else
292			op = BUS_DMASYNC_POSTWRITE;
293		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
294		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
295	}
296
297	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
298		struct cam_path *ccb_path;
299
300		/*
301		 * If we have finally disconnected, clean up our
302		 * pending device state.
303		 * XXX - There may be error states that cause where
304		 *       we will remain connected.
305		 */
306		ccb_path = ccb->ccb_h.path;
307		if (ahc->pending_device != NULL
308		 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) {
309
310			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
311				ahc->pending_device = NULL;
312			} else {
313				if (bootverbose) {
314					xpt_print_path(ccb->ccb_h.path);
315					printf("Still connected\n");
316				}
317				ahc_freeze_ccb(ccb);
318			}
319		}
320
321		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
322			ccb->ccb_h.status |= CAM_REQ_CMP;
323		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
324		ahc_free_scb(ahc, scb);
325		xpt_done(ccb);
326		return;
327	}
328
329	/*
330	 * If the recovery SCB completes, we have to be
331	 * out of our timeout.
332	 */
333	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
334		struct	scb *list_scb;
335
336		/*
337		 * We were able to complete the command successfully,
338		 * so reinstate the timeouts for all other pending
339		 * commands.
340		 */
341		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
342			union ccb *ccb;
343			uint64_t time;
344
345			ccb = list_scb->io_ctx;
346			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
347				continue;
348
349			time = ccb->ccb_h.timeout;
350			time *= hz;
351			time /= 1000;
352			ccb->ccb_h.timeout_ch =
353			    timeout(ahc_timeout, list_scb, time);
354		}
355
356		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
357		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
358			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
359		ahc_print_path(ahc, scb);
360		printf("no longer in timeout, status = %x\n",
361		       ccb->ccb_h.status);
362	}
363
364	/* Don't clobber any existing error state */
365	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
366		ccb->ccb_h.status |= CAM_REQ_CMP;
367	} else if ((scb->flags & SCB_SENSE) != 0) {
368		/*
369		 * We performed autosense retrieval.
370		 *
371		 * Zero any sense not transferred by the
372		 * device.  The SCSI spec mandates that any
373		 * untransfered data should be assumed to be
374		 * zero.  Complete the 'bounce' of sense information
375		 * through buffers accessible via bus-space by
376		 * copying it into the clients csio.
377		 */
378		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
379		memcpy(&ccb->csio.sense_data,
380		       ahc_get_sense_buf(ahc, scb),
381		       (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK)
382		       - ccb->csio.sense_resid);
383		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
384	}
385	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
386	ahc_free_scb(ahc, scb);
387	xpt_done(ccb);
388}
389
390static void
391ahc_action(struct cam_sim *sim, union ccb *ccb)
392{
393	struct	ahc_softc *ahc;
394	struct	ahc_tmode_lstate *lstate;
395	u_int	target_id;
396	u_int	our_id;
397	long	s;
398
399	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
400
401	ahc = (struct ahc_softc *)cam_sim_softc(sim);
402
403	target_id = ccb->ccb_h.target_id;
404	our_id = SIM_SCSI_ID(ahc, sim);
405
406	switch (ccb->ccb_h.func_code) {
407	/* Common cases first */
408	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
409	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
410	{
411		struct	   ahc_tmode_tstate *tstate;
412		cam_status status;
413
414		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
415					     &lstate, TRUE);
416
417		if (status != CAM_REQ_CMP) {
418			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
419				/* Response from the black hole device */
420				tstate = NULL;
421				lstate = ahc->black_hole;
422			} else {
423				ccb->ccb_h.status = status;
424				xpt_done(ccb);
425				break;
426			}
427		}
428		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
429
430			ahc_lock(ahc, &s);
431			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
432					  sim_links.sle);
433			ccb->ccb_h.status = CAM_REQ_INPROG;
434			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
435				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
436			ahc_unlock(ahc, &s);
437			break;
438		}
439
440		/*
441		 * The target_id represents the target we attempt to
442		 * select.  In target mode, this is the initiator of
443		 * the original command.
444		 */
445		our_id = target_id;
446		target_id = ccb->csio.init_id;
447		/* FALLTHROUGH */
448	}
449	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
450	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
451	{
452		struct	scb *scb;
453		struct	hardware_scb *hscb;
454
455		if ((ahc->flags & AHC_INITIATORROLE) == 0
456		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
457		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
458			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
459			xpt_done(ccb);
460			return;
461		}
462
463		/*
464		 * get an scb to use.
465		 */
466		ahc_lock(ahc, &s);
467		if ((scb = ahc_get_scb(ahc)) == NULL) {
468
469			xpt_freeze_simq(sim, /*count*/1);
470			ahc->flags |= AHC_RESOURCE_SHORTAGE;
471			ahc_unlock(ahc, &s);
472			ccb->ccb_h.status = CAM_REQUEUE_REQ;
473			xpt_done(ccb);
474			return;
475		}
476		ahc_unlock(ahc, &s);
477
478		hscb = scb->hscb;
479
480		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
481			  ("start scb(%p)\n", scb));
482		scb->io_ctx = ccb;
483		/*
484		 * So we can find the SCB when an abort is requested
485		 */
486		ccb->ccb_h.ccb_scb_ptr = scb;
487
488		/*
489		 * Put all the arguments for the xfer in the scb
490		 */
491		hscb->control = 0;
492		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
493		hscb->lun = ccb->ccb_h.target_lun;
494		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
495			hscb->cdb_len = 0;
496			scb->flags |= SCB_DEVICE_RESET;
497			hscb->control |= MK_MESSAGE;
498			ahc_execute_scb(scb, NULL, 0, 0);
499		} else {
500			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
501				struct target_data *tdata;
502
503				tdata = &hscb->shared_data.tdata;
504				if (ahc->pending_device == lstate)
505					scb->flags |= SCB_TARGET_IMMEDIATE;
506				hscb->control |= TARGET_SCB;
507				scb->flags |= SCB_TARGET_SCB;
508				tdata->target_phases = 0;
509				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
510					tdata->target_phases |= SPHASE_PENDING;
511					tdata->scsi_status =
512					    ccb->csio.scsi_status;
513				}
514	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
515					tdata->target_phases |= NO_DISCONNECT;
516
517				tdata->initiator_tag = ccb->csio.tag_id;
518			}
519			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
520				hscb->control |= ccb->csio.tag_action;
521
522			ahc_setup_data(ahc, sim, &ccb->csio, scb);
523		}
524		break;
525	}
526	case XPT_NOTIFY_ACK:
527	case XPT_IMMED_NOTIFY:
528	{
529		struct	   ahc_tmode_tstate *tstate;
530		struct	   ahc_tmode_lstate *lstate;
531		cam_status status;
532
533		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
534					     &lstate, TRUE);
535
536		if (status != CAM_REQ_CMP) {
537			ccb->ccb_h.status = status;
538			xpt_done(ccb);
539			break;
540		}
541		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
542				  sim_links.sle);
543		ccb->ccb_h.status = CAM_REQ_INPROG;
544		ahc_send_lstate_events(ahc, lstate);
545		break;
546	}
547	case XPT_EN_LUN:		/* Enable LUN as a target */
548		ahc_handle_en_lun(ahc, sim, ccb);
549		xpt_done(ccb);
550		break;
551	case XPT_ABORT:			/* Abort the specified CCB */
552	{
553		ahc_abort_ccb(ahc, sim, ccb);
554		break;
555	}
556	case XPT_SET_TRAN_SETTINGS:
557	{
558#ifdef AHC_NEW_TRAN_SETTINGS
559		struct	ahc_devinfo devinfo;
560		struct	ccb_trans_settings *cts;
561		struct	ccb_trans_settings_scsi *scsi;
562		struct	ccb_trans_settings_spi *spi;
563		struct	ahc_initiator_tinfo *tinfo;
564		struct	ahc_tmode_tstate *tstate;
565		uint16_t *discenable;
566		uint16_t *tagenable;
567		u_int	update_type;
568
569		cts = &ccb->cts;
570		scsi = &cts->proto_specific.scsi;
571		spi = &cts->xport_specific.spi;
572		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
573				    cts->ccb_h.target_id,
574				    cts->ccb_h.target_lun,
575				    SIM_CHANNEL(ahc, sim),
576				    ROLE_UNKNOWN);
577		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
578					    devinfo.our_scsiid,
579					    devinfo.target, &tstate);
580		update_type = 0;
581		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
582			update_type |= AHC_TRANS_GOAL;
583			discenable = &tstate->discenable;
584			tagenable = &tstate->tagenable;
585			tinfo->curr.protocol_version =
586			    cts->protocol_version;
587			tinfo->curr.transport_version =
588			    cts->transport_version;
589			tinfo->goal.protocol_version =
590			    cts->protocol_version;
591			tinfo->goal.transport_version =
592			    cts->transport_version;
593		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
594			update_type |= AHC_TRANS_USER;
595			discenable = &ahc->user_discenable;
596			tagenable = &ahc->user_tagenable;
597			tinfo->user.protocol_version =
598			    cts->protocol_version;
599			tinfo->user.transport_version =
600			    cts->transport_version;
601		} else {
602			ccb->ccb_h.status = CAM_REQ_INVALID;
603			xpt_done(ccb);
604			break;
605		}
606
607		ahc_lock(ahc, &s);
608
609		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
610			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
611				*discenable |= devinfo.target_mask;
612			else
613				*discenable &= ~devinfo.target_mask;
614		}
615
616		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
617			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
618				*tagenable |= devinfo.target_mask;
619			else
620				*tagenable &= ~devinfo.target_mask;
621		}
622
623		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
624			ahc_validate_width(ahc, /*tinfo limit*/NULL,
625					   &spi->bus_width, ROLE_UNKNOWN);
626			ahc_set_width(ahc, &devinfo, spi->bus_width,
627				      update_type, /*paused*/FALSE);
628		}
629
630		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
631			if (update_type == AHC_TRANS_USER)
632				spi->ppr_options = tinfo->user.ppr_options;
633			else
634				spi->ppr_options = tinfo->goal.ppr_options;
635		}
636
637		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
638			if (update_type == AHC_TRANS_USER)
639				spi->sync_offset = tinfo->user.offset;
640			else
641				spi->sync_offset = tinfo->goal.offset;
642		}
643
644		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
645			if (update_type == AHC_TRANS_USER)
646				spi->sync_period = tinfo->user.period;
647			else
648				spi->sync_period = tinfo->goal.period;
649		}
650
651		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
652		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
653			struct ahc_syncrate *syncrate;
654			u_int maxsync;
655
656			if ((ahc->features & AHC_ULTRA2) != 0)
657				maxsync = AHC_SYNCRATE_DT;
658			else if ((ahc->features & AHC_ULTRA) != 0)
659				maxsync = AHC_SYNCRATE_ULTRA;
660			else
661				maxsync = AHC_SYNCRATE_FAST;
662
663			if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
664				spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
665
666			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
667						     &spi->ppr_options,
668						     maxsync);
669			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
670					    syncrate, &spi->sync_offset,
671					    spi->bus_width, ROLE_UNKNOWN);
672
673			/* We use a period of 0 to represent async */
674			if (spi->sync_offset == 0) {
675				spi->sync_period = 0;
676				spi->ppr_options = 0;
677			}
678
679			ahc_set_syncrate(ahc, &devinfo, syncrate,
680					 spi->sync_period, spi->sync_offset,
681					 spi->ppr_options, update_type,
682					 /*paused*/FALSE);
683		}
684		ahc_unlock(ahc, &s);
685		ccb->ccb_h.status = CAM_REQ_CMP;
686		xpt_done(ccb);
687#else
688		struct	  ahc_devinfo devinfo;
689		struct	  ccb_trans_settings *cts;
690		struct	  ahc_initiator_tinfo *tinfo;
691		struct	  ahc_tmode_tstate *tstate;
692		uint16_t *discenable;
693		uint16_t *tagenable;
694		u_int	  update_type;
695		long	  s;
696
697		cts = &ccb->cts;
698		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
699				    cts->ccb_h.target_id,
700				    cts->ccb_h.target_lun,
701				    SIM_CHANNEL(ahc, sim),
702				    ROLE_UNKNOWN);
703		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
704					    devinfo.our_scsiid,
705					    devinfo.target, &tstate);
706		update_type = 0;
707		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
708			update_type |= AHC_TRANS_GOAL;
709			discenable = &tstate->discenable;
710			tagenable = &tstate->tagenable;
711		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
712			update_type |= AHC_TRANS_USER;
713			discenable = &ahc->user_discenable;
714			tagenable = &ahc->user_tagenable;
715		} else {
716			ccb->ccb_h.status = CAM_REQ_INVALID;
717			xpt_done(ccb);
718			break;
719		}
720
721		ahc_lock(ahc, &s);
722
723		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
724			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
725				*discenable |= devinfo.target_mask;
726			else
727				*discenable &= ~devinfo.target_mask;
728		}
729
730		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
731			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
732				*tagenable |= devinfo.target_mask;
733			else
734				*tagenable &= ~devinfo.target_mask;
735		}
736
737		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
738			ahc_validate_width(ahc, /*tinfo limit*/NULL,
739					   &cts->bus_width, ROLE_UNKNOWN);
740			ahc_set_width(ahc, &devinfo, cts->bus_width,
741				      update_type, /*paused*/FALSE);
742		}
743
744		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
745			if (update_type == AHC_TRANS_USER)
746				cts->sync_offset = tinfo->user.offset;
747			else
748				cts->sync_offset = tinfo->goal.offset;
749		}
750
751		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
752			if (update_type == AHC_TRANS_USER)
753				cts->sync_period = tinfo->user.period;
754			else
755				cts->sync_period = tinfo->goal.period;
756		}
757
758		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
759		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
760			struct ahc_syncrate *syncrate;
761			u_int ppr_options;
762			u_int maxsync;
763
764			if ((ahc->features & AHC_ULTRA2) != 0)
765				maxsync = AHC_SYNCRATE_DT;
766			else if ((ahc->features & AHC_ULTRA) != 0)
767				maxsync = AHC_SYNCRATE_ULTRA;
768			else
769				maxsync = AHC_SYNCRATE_FAST;
770
771			ppr_options = 0;
772			if (cts->sync_period <= 9
773			 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
774				ppr_options = MSG_EXT_PPR_DT_REQ;
775
776			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
777						     &ppr_options,
778						     maxsync);
779			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
780					    syncrate, &cts->sync_offset,
781					    MSG_EXT_WDTR_BUS_8_BIT,
782					    ROLE_UNKNOWN);
783
784			/* We use a period of 0 to represent async */
785			if (cts->sync_offset == 0) {
786				cts->sync_period = 0;
787				ppr_options = 0;
788			}
789
790			if (ppr_options == MSG_EXT_PPR_DT_REQ
791			 && tinfo->user.transport_version >= 3) {
792				tinfo->goal.transport_version =
793				    tinfo->user.transport_version;
794				tinfo->curr.transport_version =
795				    tinfo->user.transport_version;
796			}
797
798			ahc_set_syncrate(ahc, &devinfo, syncrate,
799					 cts->sync_period, cts->sync_offset,
800					 ppr_options, update_type,
801					 /*paused*/FALSE);
802		}
803		ahc_unlock(ahc, &s);
804		ccb->ccb_h.status = CAM_REQ_CMP;
805		xpt_done(ccb);
806#endif
807		break;
808	}
809	case XPT_GET_TRAN_SETTINGS:
810	/* Get default/user set transfer settings for the target */
811	{
812
813		ahc_lock(ahc, &s);
814		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
815				      SIM_CHANNEL(ahc, sim), &ccb->cts);
816		ahc_unlock(ahc, &s);
817		xpt_done(ccb);
818		break;
819	}
820	case XPT_CALC_GEOMETRY:
821	{
822		struct	  ccb_calc_geometry *ccg;
823		uint32_t size_mb;
824		uint32_t secs_per_cylinder;
825		int	  extended;
826
827		ccg = &ccb->ccg;
828		size_mb = ccg->volume_size
829			/ ((1024L * 1024L) / ccg->block_size);
830		extended = SIM_IS_SCSIBUS_B(ahc, sim)
831			? ahc->flags & AHC_EXTENDED_TRANS_B
832			: ahc->flags & AHC_EXTENDED_TRANS_A;
833
834		if (size_mb > 1024 && extended) {
835			ccg->heads = 255;
836			ccg->secs_per_track = 63;
837		} else {
838			ccg->heads = 64;
839			ccg->secs_per_track = 32;
840		}
841		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
842		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
843		ccb->ccb_h.status = CAM_REQ_CMP;
844		xpt_done(ccb);
845		break;
846	}
847	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
848	{
849		int  found;
850
851		ahc_lock(ahc, &s);
852		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
853					  /*initiate reset*/TRUE);
854		ahc_unlock(ahc, &s);
855		if (bootverbose) {
856			xpt_print_path(SIM_PATH(ahc, sim));
857			printf("SCSI bus reset delivered. "
858			       "%d SCBs aborted.\n", found);
859		}
860		ccb->ccb_h.status = CAM_REQ_CMP;
861		xpt_done(ccb);
862		break;
863	}
864	case XPT_TERM_IO:		/* Terminate the I/O process */
865		/* XXX Implement */
866		ccb->ccb_h.status = CAM_REQ_INVALID;
867		xpt_done(ccb);
868		break;
869	case XPT_PATH_INQ:		/* Path routing inquiry */
870	{
871		struct ccb_pathinq *cpi = &ccb->cpi;
872
873		cpi->version_num = 1; /* XXX??? */
874		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
875		if ((ahc->features & AHC_WIDE) != 0)
876			cpi->hba_inquiry |= PI_WIDE_16;
877		if ((ahc->features & AHC_TARGETMODE) != 0) {
878			cpi->target_sprt = PIT_PROCESSOR
879					 | PIT_DISCONNECT
880					 | PIT_TERM_IO;
881		} else {
882			cpi->target_sprt = 0;
883		}
884		cpi->hba_misc = 0;
885		cpi->hba_eng_cnt = 0;
886		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
887		cpi->max_lun = AHC_NUM_LUNS - 1;
888		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
889			cpi->initiator_id = ahc->our_id_b;
890			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
891				cpi->hba_misc |= PIM_NOBUSRESET;
892		} else {
893			cpi->initiator_id = ahc->our_id;
894			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
895				cpi->hba_misc |= PIM_NOBUSRESET;
896		}
897		cpi->bus_id = cam_sim_bus(sim);
898		cpi->base_transfer_speed = 3300;
899		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
900		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
901		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
902		cpi->unit_number = cam_sim_unit(sim);
903#ifdef AHC_NEW_TRAN_SETTINGS
904		cpi->protocol = PROTO_SCSI;
905		cpi->protocol_version = SCSI_REV_2;
906		cpi->transport = XPORT_SPI;
907		cpi->transport_version = 2;
908		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
909		if ((ahc->features & AHC_DT) != 0) {
910			cpi->transport_version = 3;
911			cpi->xport_specific.spi.ppr_options =
912			    SID_SPI_CLOCK_DT_ST;
913		}
914#endif
915		cpi->ccb_h.status = CAM_REQ_CMP;
916		xpt_done(ccb);
917		break;
918	}
919	default:
920		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
921		xpt_done(ccb);
922		break;
923	}
924}
925
926static void
927ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
928		      struct ccb_trans_settings *cts)
929{
930#ifdef AHC_NEW_TRAN_SETTINGS
931	struct	ahc_devinfo devinfo;
932	struct	ccb_trans_settings_scsi *scsi;
933	struct	ccb_trans_settings_spi *spi;
934	struct	ahc_initiator_tinfo *targ_info;
935	struct	ahc_tmode_tstate *tstate;
936	struct	ahc_transinfo *tinfo;
937
938	scsi = &cts->proto_specific.scsi;
939	spi = &cts->xport_specific.spi;
940	ahc_compile_devinfo(&devinfo, our_id,
941			    cts->ccb_h.target_id,
942			    cts->ccb_h.target_lun,
943			    channel, ROLE_UNKNOWN);
944	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
945					devinfo.our_scsiid,
946					devinfo.target, &tstate);
947
948	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
949		tinfo = &targ_info->curr;
950	else
951		tinfo = &targ_info->user;
952
953	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
954	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
955	if (cts->type == CTS_TYPE_USER_SETTINGS) {
956		if ((ahc->user_discenable & devinfo.target_mask) != 0)
957			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
958
959		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
960			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
961	} else {
962		if ((tstate->discenable & devinfo.target_mask) != 0)
963			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
964
965		if ((tstate->tagenable & devinfo.target_mask) != 0)
966			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
967	}
968	cts->protocol_version = tinfo->protocol_version;
969	cts->transport_version = tinfo->transport_version;
970
971	spi->sync_period = tinfo->period;
972	spi->sync_offset = tinfo->offset;
973	spi->bus_width = tinfo->width;
974	spi->ppr_options = tinfo->ppr_options;
975
976	cts->protocol = PROTO_SCSI;
977	cts->transport = XPORT_SPI;
978	spi->valid = CTS_SPI_VALID_SYNC_RATE
979		   | CTS_SPI_VALID_SYNC_OFFSET
980		   | CTS_SPI_VALID_BUS_WIDTH
981		   | CTS_SPI_VALID_PPR_OPTIONS;
982
983	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
984		scsi->valid = CTS_SCSI_VALID_TQ;
985		spi->valid |= CTS_SPI_VALID_DISC;
986	} else {
987		scsi->valid = 0;
988	}
989
990	cts->ccb_h.status = CAM_REQ_CMP;
991#else
992	struct	ahc_devinfo devinfo;
993	struct	ahc_initiator_tinfo *targ_info;
994	struct	ahc_tmode_tstate *tstate;
995	struct	ahc_transinfo *tinfo;
996
997	ahc_compile_devinfo(&devinfo, our_id,
998			    cts->ccb_h.target_id,
999			    cts->ccb_h.target_lun,
1000			    channel, ROLE_UNKNOWN);
1001	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
1002					devinfo.our_scsiid,
1003					devinfo.target, &tstate);
1004
1005	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
1006		tinfo = &targ_info->curr;
1007	else
1008		tinfo = &targ_info->user;
1009
1010	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1011	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
1012		if ((ahc->user_discenable & devinfo.target_mask) != 0)
1013			cts->flags |= CCB_TRANS_DISC_ENB;
1014
1015		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
1016			cts->flags |= CCB_TRANS_TAG_ENB;
1017	} else {
1018		if ((tstate->discenable & devinfo.target_mask) != 0)
1019			cts->flags |= CCB_TRANS_DISC_ENB;
1020
1021		if ((tstate->tagenable & devinfo.target_mask) != 0)
1022			cts->flags |= CCB_TRANS_TAG_ENB;
1023	}
1024	cts->sync_period = tinfo->period;
1025	cts->sync_offset = tinfo->offset;
1026	cts->bus_width = tinfo->width;
1027
1028	cts->valid = CCB_TRANS_SYNC_RATE_VALID
1029		   | CCB_TRANS_SYNC_OFFSET_VALID
1030		   | CCB_TRANS_BUS_WIDTH_VALID;
1031
1032	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
1033		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
1034
1035	cts->ccb_h.status = CAM_REQ_CMP;
1036#endif
1037}
1038
1039static void
1040ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1041{
1042	struct ahc_softc *ahc;
1043	struct cam_sim *sim;
1044
1045	sim = (struct cam_sim *)callback_arg;
1046	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1047	switch (code) {
1048	case AC_LOST_DEVICE:
1049	{
1050		struct	ahc_devinfo devinfo;
1051		long	s;
1052
1053		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1054				    xpt_path_target_id(path),
1055				    xpt_path_lun_id(path),
1056				    SIM_CHANNEL(ahc, sim),
1057				    ROLE_UNKNOWN);
1058
1059		/*
1060		 * Revert to async/narrow transfers
1061		 * for the next device.
1062		 */
1063		ahc_lock(ahc, &s);
1064		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1065			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1066		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1067				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1068				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1069				 /*paused*/FALSE);
1070		ahc_unlock(ahc, &s);
1071		break;
1072	}
1073	default:
1074		break;
1075	}
1076}
1077
1078static void
1079ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1080		int error)
1081{
1082	struct	scb *scb;
1083	union	ccb *ccb;
1084	struct	ahc_softc *ahc;
1085	struct	ahc_initiator_tinfo *tinfo;
1086	struct	ahc_tmode_tstate *tstate;
1087	u_int	mask;
1088	long	s;
1089
1090	scb = (struct scb *)arg;
1091	ccb = scb->io_ctx;
1092	ahc = scb->ahc_softc;
1093
1094	if (error != 0) {
1095		if (error == EFBIG)
1096			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1097		else
1098			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1099		if (nsegments != 0)
1100			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1101		ahc_lock(ahc, &s);
1102		ahc_free_scb(ahc, scb);
1103		ahc_unlock(ahc, &s);
1104		xpt_done(ccb);
1105		return;
1106	}
1107	if (nsegments != 0) {
1108		struct	  ahc_dma_seg *sg;
1109		bus_dma_segment_t *end_seg;
1110		/*XXX bus_dmasync_op_t*/int op;
1111
1112		end_seg = dm_segs + nsegments;
1113
1114		/* Copy the segments into our SG list */
1115		sg = scb->sg_list;
1116		while (dm_segs < end_seg) {
1117			uint32_t len;
1118
1119			sg->addr = ahc_htole32(dm_segs->ds_addr);
1120			len = dm_segs->ds_len
1121			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
1122			sg->len = ahc_htole32(len);
1123			sg++;
1124			dm_segs++;
1125		}
1126
1127		/*
1128		 * Note where to find the SG entries in bus space.
1129		 * We also set the full residual flag which the
1130		 * sequencer will clear as soon as a data transfer
1131		 * occurs.
1132		 */
1133		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
1134
1135		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1136			op = BUS_DMASYNC_PREREAD;
1137		else
1138			op = BUS_DMASYNC_PREWRITE;
1139
1140		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1141
1142		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1143			struct target_data *tdata;
1144
1145			tdata = &scb->hscb->shared_data.tdata;
1146			tdata->target_phases |= DPHASE_PENDING;
1147			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1148				tdata->data_phase = P_DATAOUT;
1149			else
1150				tdata->data_phase = P_DATAIN;
1151
1152			/*
1153			 * If the transfer is of an odd length and in the
1154			 * "in" direction (scsi->HostBus), then it may
1155			 * trigger a bug in the 'WideODD' feature of
1156			 * non-Ultra2 chips.  Force the total data-length
1157			 * to be even by adding an extra, 1 byte, SG,
1158			 * element.  We do this even if we are not currently
1159			 * negotiated wide as negotiation could occur before
1160			 * this command is executed.
1161			 */
1162			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1163			 && (ccb->csio.dxfer_len & 0x1) != 0
1164			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1165
1166				nsegments++;
1167				if (nsegments > AHC_NSEG) {
1168
1169					ahc_set_transaction_status(scb,
1170					    CAM_REQ_TOO_BIG);
1171					bus_dmamap_unload(ahc->buffer_dmat,
1172							  scb->dmamap);
1173					ahc_lock(ahc, &s);
1174					ahc_free_scb(ahc, scb);
1175					ahc_unlock(ahc, &s);
1176					xpt_done(ccb);
1177					return;
1178				}
1179				sg->addr = ahc_htole32(ahc->dma_bug_buf);
1180				sg->len = ahc_htole32(1);
1181				sg++;
1182			}
1183		}
1184		sg--;
1185		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
1186
1187		/* Copy the first SG into the "current" data pointer area */
1188		scb->hscb->dataptr = scb->sg_list->addr;
1189		scb->hscb->datacnt = scb->sg_list->len;
1190	} else {
1191		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
1192		scb->hscb->dataptr = 0;
1193		scb->hscb->datacnt = 0;
1194	}
1195
1196	scb->sg_count = nsegments;
1197
1198	ahc_lock(ahc, &s);
1199
1200	/*
1201	 * Last time we need to check if this SCB needs to
1202	 * be aborted.
1203	 */
1204	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1205		if (nsegments != 0)
1206			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1207		ahc_free_scb(ahc, scb);
1208		ahc_unlock(ahc, &s);
1209		xpt_done(ccb);
1210		return;
1211	}
1212
1213	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1214				    SCSIID_OUR_ID(scb->hscb->scsiid),
1215				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1216				    &tstate);
1217
1218	mask = SCB_GET_TARGET_MASK(ahc, scb);
1219	scb->hscb->scsirate = tinfo->scsirate;
1220	scb->hscb->scsioffset = tinfo->curr.offset;
1221	if ((tstate->ultraenb & mask) != 0)
1222		scb->hscb->control |= ULTRAENB;
1223
1224	if ((tstate->discenable & mask) != 0
1225	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1226		scb->hscb->control |= DISCENB;
1227
1228	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1229	 && (tinfo->goal.width != 0
1230	  || tinfo->goal.offset != 0
1231	  || tinfo->goal.ppr_options != 0)) {
1232		scb->flags |= SCB_NEGOTIATE;
1233		scb->hscb->control |= MK_MESSAGE;
1234	} else if ((tstate->auto_negotiate & mask) != 0) {
1235		scb->flags |= SCB_AUTO_NEGOTIATE;
1236		scb->hscb->control |= MK_MESSAGE;
1237	}
1238
1239	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1240
1241	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1242
1243	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1244		uint64_t time;
1245
1246		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1247			ccb->ccb_h.timeout = 5 * 1000;
1248
1249		time = ccb->ccb_h.timeout;
1250		time *= hz;
1251		time /= 1000;
1252		ccb->ccb_h.timeout_ch =
1253		    timeout(ahc_timeout, (caddr_t)scb, time);
1254	}
1255
1256	/*
1257	 * We only allow one untagged transaction
1258	 * per target in the initiator role unless
1259	 * we are storing a full busy target *lun*
1260	 * table in SCB space.
1261	 */
1262	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1263	 && (ahc->flags & AHC_SCB_BTT) == 0) {
1264		struct scb_tailq *untagged_q;
1265		int target_offset;
1266
1267		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1268		untagged_q = &(ahc->untagged_queues[target_offset]);
1269		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1270		scb->flags |= SCB_UNTAGGEDQ;
1271		if (TAILQ_FIRST(untagged_q) != scb) {
1272			ahc_unlock(ahc, &s);
1273			return;
1274		}
1275	}
1276	scb->flags |= SCB_ACTIVE;
1277
1278	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1279		/* Define a mapping from our tag to the SCB. */
1280		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
1281		ahc_pause(ahc);
1282		if ((ahc->flags & AHC_PAGESCBS) == 0)
1283			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1284		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
1285		ahc_unpause(ahc);
1286	} else {
1287		ahc_queue_scb(ahc, scb);
1288	}
1289
1290	ahc_unlock(ahc, &s);
1291}
1292
1293static void
1294ahc_poll(struct cam_sim *sim)
1295{
1296	struct ahc_softc *ahc;
1297
1298	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1299	ahc_intr(ahc);
1300}
1301
1302static void
1303ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1304	       struct ccb_scsiio *csio, struct scb *scb)
1305{
1306	struct hardware_scb *hscb;
1307	struct ccb_hdr *ccb_h;
1308
1309	hscb = scb->hscb;
1310	ccb_h = &csio->ccb_h;
1311
1312	csio->resid = 0;
1313	csio->sense_resid = 0;
1314	if (ccb_h->func_code == XPT_SCSI_IO) {
1315		hscb->cdb_len = csio->cdb_len;
1316		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1317
1318			if (hscb->cdb_len > sizeof(hscb->cdb32)
1319			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1320				u_long s;
1321
1322				ahc_set_transaction_status(scb,
1323							   CAM_REQ_INVALID);
1324				ahc_lock(ahc, &s);
1325				ahc_free_scb(ahc, scb);
1326				ahc_unlock(ahc, &s);
1327				xpt_done((union ccb *)csio);
1328				return;
1329			}
1330			if (hscb->cdb_len > 12) {
1331				memcpy(hscb->cdb32,
1332				       csio->cdb_io.cdb_ptr,
1333				       hscb->cdb_len);
1334				scb->flags |= SCB_CDB32_PTR;
1335			} else {
1336				memcpy(hscb->shared_data.cdb,
1337				       csio->cdb_io.cdb_ptr,
1338				       hscb->cdb_len);
1339			}
1340		} else {
1341			if (hscb->cdb_len > 12) {
1342				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1343				       hscb->cdb_len);
1344				scb->flags |= SCB_CDB32_PTR;
1345			} else {
1346				memcpy(hscb->shared_data.cdb,
1347				       csio->cdb_io.cdb_bytes,
1348				       hscb->cdb_len);
1349			}
1350		}
1351	}
1352
1353	/* Only use S/G if there is a transfer */
1354	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1355		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1356			/* We've been given a pointer to a single buffer */
1357			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1358				int s;
1359				int error;
1360
1361				s = splsoftvm();
1362				error = bus_dmamap_load(ahc->buffer_dmat,
1363							scb->dmamap,
1364							csio->data_ptr,
1365							csio->dxfer_len,
1366							ahc_execute_scb,
1367							scb, /*flags*/0);
1368				if (error == EINPROGRESS) {
1369					/*
1370					 * So as to maintain ordering,
1371					 * freeze the controller queue
1372					 * until our mapping is
1373					 * returned.
1374					 */
1375					xpt_freeze_simq(sim,
1376							/*count*/1);
1377					scb->io_ctx->ccb_h.status |=
1378					    CAM_RELEASE_SIMQ;
1379				}
1380				splx(s);
1381			} else {
1382				struct bus_dma_segment seg;
1383
1384				/* Pointer to physical buffer */
1385				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1386					panic("ahc_setup_data - Transfer size "
1387					      "larger than can device max");
1388
1389				seg.ds_addr =
1390				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1391				seg.ds_len = csio->dxfer_len;
1392				ahc_execute_scb(scb, &seg, 1, 0);
1393			}
1394		} else {
1395			struct bus_dma_segment *segs;
1396
1397			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1398				panic("ahc_setup_data - Physical segment "
1399				      "pointers unsupported");
1400
1401			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1402				panic("ahc_setup_data - Virtual segment "
1403				      "addresses unsupported");
1404
1405			/* Just use the segments provided */
1406			segs = (struct bus_dma_segment *)csio->data_ptr;
1407			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1408		}
1409	} else {
1410		ahc_execute_scb(scb, NULL, 0, 0);
1411	}
1412}
1413
1414static void
1415ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1416
1417	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1418		struct scb *list_scb;
1419
1420		scb->flags |= SCB_RECOVERY_SCB;
1421
1422		/*
1423		 * Take all queued, but not sent SCBs out of the equation.
1424		 * Also ensure that no new CCBs are queued to us while we
1425		 * try to fix this problem.
1426		 */
1427		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1428			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1429			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1430		}
1431
1432		/*
1433		 * Go through all of our pending SCBs and remove
1434		 * any scheduled timeouts for them.  We will reschedule
1435		 * them after we've successfully fixed this problem.
1436		 */
1437		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1438			union ccb *ccb;
1439
1440			ccb = list_scb->io_ctx;
1441			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1442		}
1443	}
1444}
1445
1446void
1447ahc_timeout(void *arg)
1448{
1449	struct	scb *scb;
1450	struct	ahc_softc *ahc;
1451	long	s;
1452	int	found;
1453	u_int	last_phase;
1454	int	target;
1455	int	lun;
1456	int	i;
1457	char	channel;
1458
1459	scb = (struct scb *)arg;
1460	ahc = (struct ahc_softc *)scb->ahc_softc;
1461
1462	ahc_lock(ahc, &s);
1463
1464	ahc_pause_and_flushwork(ahc);
1465
1466	if ((scb->flags & SCB_ACTIVE) == 0) {
1467		/* Previous timeout took care of me already */
1468		printf("%s: Timedout SCB already complete. "
1469		       "Interrupts may not be functioning.\n", ahc_name(ahc));
1470		ahc_unpause(ahc);
1471		ahc_unlock(ahc, &s);
1472		return;
1473	}
1474
1475	target = SCB_GET_TARGET(ahc, scb);
1476	channel = SCB_GET_CHANNEL(ahc, scb);
1477	lun = SCB_GET_LUN(scb);
1478
1479	ahc_print_path(ahc, scb);
1480	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
1481	ahc_dump_card_state(ahc);
1482	last_phase = ahc_inb(ahc, LASTPHASE);
1483	if (scb->sg_count > 0) {
1484		for (i = 0; i < scb->sg_count; i++) {
1485			printf("sg[%d] - Addr 0x%x : Length %d\n",
1486			       i,
1487			       scb->sg_list[i].addr,
1488			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
1489		}
1490	}
1491	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1492		/*
1493		 * Been down this road before.
1494		 * Do a full bus reset.
1495		 */
1496bus_reset:
1497		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1498		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1499		printf("%s: Issued Channel %c Bus Reset. "
1500		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1501	} else {
1502		/*
1503		 * If we are a target, transition to bus free and report
1504		 * the timeout.
1505		 *
1506		 * The target/initiator that is holding up the bus may not
1507		 * be the same as the one that triggered this timeout
1508		 * (different commands have different timeout lengths).
1509		 * If the bus is idle and we are actiing as the initiator
1510		 * for this request, queue a BDR message to the timed out
1511		 * target.  Otherwise, if the timed out transaction is
1512		 * active:
1513		 *   Initiator transaction:
1514		 *	Stuff the message buffer with a BDR message and assert
1515		 *	ATN in the hopes that the target will let go of the bus
1516		 *	and go to the mesgout phase.  If this fails, we'll
1517		 *	get another timeout 2 seconds later which will attempt
1518		 *	a bus reset.
1519		 *
1520		 *   Target transaction:
1521		 *	Transition to BUS FREE and report the error.
1522		 *	It's good to be the target!
1523		 */
1524		u_int active_scb_index;
1525		u_int saved_scbptr;
1526
1527		saved_scbptr = ahc_inb(ahc, SCBPTR);
1528		active_scb_index = ahc_inb(ahc, SCB_TAG);
1529
1530		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
1531		  && (active_scb_index < ahc->scb_data->numscbs)) {
1532			struct scb *active_scb;
1533
1534			/*
1535			 * If the active SCB is not us, assume that
1536			 * the active SCB has a longer timeout than
1537			 * the timedout SCB, and wait for the active
1538			 * SCB to timeout.
1539			 */
1540			active_scb = ahc_lookup_scb(ahc, active_scb_index);
1541			if (active_scb != scb) {
1542				struct	 ccb_hdr *ccbh;
1543				uint64_t newtimeout;
1544
1545				ahc_print_path(ahc, scb);
1546				printf("Other SCB Timeout%s",
1547			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1548				       ? " again\n" : "\n");
1549				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1550				newtimeout =
1551				    MAX(active_scb->io_ctx->ccb_h.timeout,
1552					scb->io_ctx->ccb_h.timeout);
1553				newtimeout *= hz;
1554				newtimeout /= 1000;
1555				ccbh = &scb->io_ctx->ccb_h;
1556				scb->io_ctx->ccb_h.timeout_ch =
1557				    timeout(ahc_timeout, scb, newtimeout);
1558				ahc_unpause(ahc);
1559				ahc_unlock(ahc, &s);
1560				return;
1561			}
1562
1563			/* It's us */
1564			if ((scb->flags & SCB_TARGET_SCB) != 0) {
1565
1566				/*
1567				 * Send back any queued up transactions
1568				 * and properly record the error condition.
1569				 */
1570				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
1571					       SCB_GET_CHANNEL(ahc, scb),
1572					       SCB_GET_LUN(scb),
1573					       scb->hscb->tag,
1574					       ROLE_TARGET,
1575					       CAM_CMD_TIMEOUT);
1576
1577				/* Will clear us from the bus */
1578				ahc_restart(ahc);
1579				ahc_unlock(ahc, &s);
1580				return;
1581			}
1582
1583			ahc_set_recoveryscb(ahc, active_scb);
1584			ahc_outb(ahc, MSG_OUT, HOST_MSG);
1585			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1586			ahc_print_path(ahc, active_scb);
1587			printf("BDR message in message buffer\n");
1588			active_scb->flags |= SCB_DEVICE_RESET;
1589			active_scb->io_ctx->ccb_h.timeout_ch =
1590			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1591			ahc_unpause(ahc);
1592		} else {
1593			int	 disconnected;
1594
1595			/* XXX Shouldn't panic.  Just punt instead? */
1596			if ((scb->flags & SCB_TARGET_SCB) != 0)
1597				panic("Timed-out target SCB but bus idle");
1598
1599			if (last_phase != P_BUSFREE
1600			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1601				/* XXX What happened to the SCB? */
1602				/* Hung target selection.  Goto busfree */
1603				printf("%s: Hung target selection\n",
1604				       ahc_name(ahc));
1605				ahc_restart(ahc);
1606				ahc_unlock(ahc, &s);
1607				return;
1608			}
1609
1610			if (ahc_search_qinfifo(ahc, target, channel, lun,
1611					       scb->hscb->tag, ROLE_INITIATOR,
1612					       /*status*/0, SEARCH_COUNT) > 0) {
1613				disconnected = FALSE;
1614			} else {
1615				disconnected = TRUE;
1616			}
1617
1618			if (disconnected) {
1619
1620				ahc_set_recoveryscb(ahc, scb);
1621				/*
1622				 * Actually re-queue this SCB in an attempt
1623				 * to select the device before it reconnects.
1624				 * In either case (selection or reselection),
1625				 * we will now issue a target reset to the
1626				 * timed-out device.
1627				 *
1628				 * Set the MK_MESSAGE control bit indicating
1629				 * that we desire to send a message.  We
1630				 * also set the disconnected flag since
1631				 * in the paging case there is no guarantee
1632				 * that our SCB control byte matches the
1633				 * version on the card.  We don't want the
1634				 * sequencer to abort the command thinking
1635				 * an unsolicited reselection occurred.
1636				 */
1637				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1638				scb->flags |= SCB_DEVICE_RESET;
1639
1640				/*
1641				 * Remove any cached copy of this SCB in the
1642				 * disconnected list in preparation for the
1643				 * queuing of our abort SCB.  We use the
1644				 * same element in the SCB, SCB_NEXT, for
1645				 * both the qinfifo and the disconnected list.
1646				 */
1647				ahc_search_disc_list(ahc, target, channel,
1648						     lun, scb->hscb->tag,
1649						     /*stop_on_first*/TRUE,
1650						     /*remove*/TRUE,
1651						     /*save_state*/FALSE);
1652
1653				/*
1654				 * In the non-paging case, the sequencer will
1655				 * never re-reference the in-core SCB.
1656				 * To make sure we are notified during
1657				 * reslection, set the MK_MESSAGE flag in
1658				 * the card's copy of the SCB.
1659				 */
1660				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1661					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1662					ahc_outb(ahc, SCB_CONTROL,
1663						 ahc_inb(ahc, SCB_CONTROL)
1664						| MK_MESSAGE);
1665				}
1666
1667				/*
1668				 * Clear out any entries in the QINFIFO first
1669				 * so we are the next SCB for this target
1670				 * to run.
1671				 */
1672				ahc_search_qinfifo(ahc,
1673						   SCB_GET_TARGET(ahc, scb),
1674						   channel, SCB_GET_LUN(scb),
1675						   SCB_LIST_NULL,
1676						   ROLE_INITIATOR,
1677						   CAM_REQUEUE_REQ,
1678						   SEARCH_COMPLETE);
1679				ahc_print_path(ahc, scb);
1680				printf("Queuing a BDR SCB\n");
1681				ahc_qinfifo_requeue_tail(ahc, scb);
1682				ahc_outb(ahc, SCBPTR, saved_scbptr);
1683				scb->io_ctx->ccb_h.timeout_ch =
1684				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1685				ahc_unpause(ahc);
1686			} else {
1687				/* Go "immediatly" to the bus reset */
1688				/* This shouldn't happen */
1689				ahc_set_recoveryscb(ahc, scb);
1690				ahc_print_path(ahc, scb);
1691				printf("SCB %d: Immediate reset.  "
1692					"Flags = 0x%x\n", scb->hscb->tag,
1693					scb->flags);
1694				goto bus_reset;
1695			}
1696		}
1697	}
1698	ahc_unlock(ahc, &s);
1699}
1700
1701static void
1702ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1703{
1704	union ccb *abort_ccb;
1705
1706	abort_ccb = ccb->cab.abort_ccb;
1707	switch (abort_ccb->ccb_h.func_code) {
1708	case XPT_ACCEPT_TARGET_IO:
1709	case XPT_IMMED_NOTIFY:
1710	case XPT_CONT_TARGET_IO:
1711	{
1712		struct ahc_tmode_tstate *tstate;
1713		struct ahc_tmode_lstate *lstate;
1714		struct ccb_hdr_slist *list;
1715		cam_status status;
1716
1717		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1718					     &lstate, TRUE);
1719
1720		if (status != CAM_REQ_CMP) {
1721			ccb->ccb_h.status = status;
1722			break;
1723		}
1724
1725		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1726			list = &lstate->accept_tios;
1727		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1728			list = &lstate->immed_notifies;
1729		else
1730			list = NULL;
1731
1732		if (list != NULL) {
1733			struct ccb_hdr *curelm;
1734			int found;
1735
1736			curelm = SLIST_FIRST(list);
1737			found = 0;
1738			if (curelm == &abort_ccb->ccb_h) {
1739				found = 1;
1740				SLIST_REMOVE_HEAD(list, sim_links.sle);
1741			} else {
1742				while(curelm != NULL) {
1743					struct ccb_hdr *nextelm;
1744
1745					nextelm =
1746					    SLIST_NEXT(curelm, sim_links.sle);
1747
1748					if (nextelm == &abort_ccb->ccb_h) {
1749						found = 1;
1750						SLIST_NEXT(curelm,
1751							   sim_links.sle) =
1752						    SLIST_NEXT(nextelm,
1753							       sim_links.sle);
1754						break;
1755					}
1756					curelm = nextelm;
1757				}
1758			}
1759
1760			if (found) {
1761				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1762				xpt_done(abort_ccb);
1763				ccb->ccb_h.status = CAM_REQ_CMP;
1764			} else {
1765				xpt_print_path(abort_ccb->ccb_h.path);
1766				printf("Not found\n");
1767				ccb->ccb_h.status = CAM_PATH_INVALID;
1768			}
1769			break;
1770		}
1771		/* FALLTHROUGH */
1772	}
1773	case XPT_SCSI_IO:
1774		/* XXX Fully implement the hard ones */
1775		ccb->ccb_h.status = CAM_UA_ABORT;
1776		break;
1777	default:
1778		ccb->ccb_h.status = CAM_REQ_INVALID;
1779		break;
1780	}
1781	xpt_done(ccb);
1782}
1783
1784void
1785ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1786		u_int lun, ac_code code, void *opt_arg)
1787{
1788	struct	ccb_trans_settings cts;
1789	struct cam_path *path;
1790	void *arg;
1791	int error;
1792
1793	arg = NULL;
1794	error = ahc_create_path(ahc, channel, target, lun, &path);
1795
1796	if (error != CAM_REQ_CMP)
1797		return;
1798
1799	switch (code) {
1800	case AC_TRANSFER_NEG:
1801	{
1802#ifdef AHC_NEW_TRAN_SETTINGS
1803		struct	ccb_trans_settings_scsi *scsi;
1804
1805		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1806		scsi = &cts.proto_specific.scsi;
1807#else
1808		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1809#endif
1810		cts.ccb_h.path = path;
1811		cts.ccb_h.target_id = target;
1812		cts.ccb_h.target_lun = lun;
1813		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1814							  : ahc->our_id_b,
1815				      channel, &cts);
1816		arg = &cts;
1817#ifdef AHC_NEW_TRAN_SETTINGS
1818		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1819		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1820#else
1821		cts.valid &= ~CCB_TRANS_TQ_VALID;
1822		cts.flags &= ~CCB_TRANS_TAG_ENB;
1823#endif
1824		if (opt_arg == NULL)
1825			break;
1826		if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED)
1827#ifdef AHC_NEW_TRAN_SETTINGS
1828			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1829		scsi->valid |= CTS_SCSI_VALID_TQ;
1830#else
1831			cts.flags |= CCB_TRANS_TAG_ENB;
1832		cts.valid |= CCB_TRANS_TQ_VALID;
1833#endif
1834		break;
1835	}
1836	case AC_SENT_BDR:
1837	case AC_BUS_RESET:
1838		break;
1839	default:
1840		panic("ahc_send_async: Unexpected async event");
1841	}
1842	xpt_async(code, path, arg);
1843	xpt_free_path(path);
1844}
1845
1846void
1847ahc_platform_set_tags(struct ahc_softc *ahc,
1848		      struct ahc_devinfo *devinfo, int enable)
1849{
1850}
1851
1852int
1853ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1854{
1855	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1856	    M_NOWAIT | M_ZERO);
1857	if (ahc->platform_data == NULL)
1858		return (ENOMEM);
1859	return (0);
1860}
1861
1862void
1863ahc_platform_free(struct ahc_softc *ahc)
1864{
1865	struct ahc_platform_data *pdata;
1866
1867	pdata = ahc->platform_data;
1868	if (pdata != NULL) {
1869		if (pdata->regs != NULL)
1870			bus_release_resource(ahc->dev_softc,
1871					     pdata->regs_res_type,
1872					     pdata->regs_res_id,
1873					     pdata->regs);
1874
1875		if (pdata->irq != NULL)
1876			bus_release_resource(ahc->dev_softc,
1877					     pdata->irq_res_type,
1878					     0, pdata->irq);
1879
1880		if (pdata->sim_b != NULL) {
1881			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1882			xpt_free_path(pdata->path_b);
1883			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1884			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1885		}
1886		if (pdata->sim != NULL) {
1887			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1888			xpt_free_path(pdata->path);
1889			xpt_bus_deregister(cam_sim_path(pdata->sim));
1890			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1891		}
1892		if (pdata->eh != NULL)
1893			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1894		free(ahc->platform_data, M_DEVBUF);
1895	}
1896}
1897
1898int
1899ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1900{
1901	/* We don't sort softcs under FreeBSD so report equal always */
1902	return (0);
1903}
1904
1905int
1906ahc_detach(device_t dev)
1907{
1908	struct ahc_softc *ahc;
1909	u_long l;
1910	u_long s;
1911
1912	ahc_list_lock(&l);
1913	device_printf(dev, "detaching device\n");
1914	ahc = device_get_softc(dev);
1915	ahc = ahc_find_softc(ahc);
1916	if (ahc == NULL) {
1917		device_printf(dev, "aic7xxx already detached\n");
1918		ahc_list_unlock(&l);
1919		return (ENOENT);
1920	}
1921	ahc_lock(ahc, &s);
1922	ahc_intr_enable(ahc, FALSE);
1923	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1924	ahc_unlock(ahc, &s);
1925	ahc_free(ahc);
1926	ahc_list_unlock(&l);
1927	return (0);
1928}
1929
1930#if UNUSED
1931static void
1932ahc_dump_targcmd(struct target_cmd *cmd)
1933{
1934	uint8_t *byte;
1935	uint8_t *last_byte;
1936	int i;
1937
1938	byte = &cmd->initiator_channel;
1939	/* Debugging info for received commands */
1940	last_byte = &cmd[1].initiator_channel;
1941
1942	i = 0;
1943	while (byte < last_byte) {
1944		if (i == 0)
1945			printf("\t");
1946		printf("%#x", *byte++);
1947		i++;
1948		if (i == 8) {
1949			printf("\n");
1950			i = 0;
1951		} else {
1952			printf(", ");
1953		}
1954	}
1955}
1956#endif
1957
1958static int
1959ahc_modevent(module_t mod, int type, void *data)
1960{
1961	/* XXX Deal with busy status on unload. */
1962	return 0;
1963}
1964
1965static moduledata_t ahc_mod = {
1966	"ahc",
1967	ahc_modevent,
1968	NULL
1969};
1970
1971DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1972MODULE_DEPEND(ahc, cam, 1, 1, 1);
1973MODULE_VERSION(ahc, 1);
1974