aic7xxx_osm.c revision 66647
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 66647 2000-10-05 04:24:14Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#include <sys/eventhandler.h>
40
41#ifndef AHC_TMODE_ENABLE
42#define AHC_TMODE_ENABLE 0
43#endif
44
45#define ccb_scb_ptr spriv_ptr0
46#define ccb_ahc_ptr spriv_ptr1
47
48#ifdef AHC_DEBUG
49static int     ahc_debug = AHC_DEBUG;
50#endif
51
52static void ahc_freebsd_intr(void *arg);
53
54#if UNUSED
55static void	ahc_dump_targcmd(struct target_cmd *cmd);
56#endif
57static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
58static void	ahc_get_tran_settings(struct ahc_softc *ahc,
59				      int our_id, char channel,
60				      struct ccb_trans_settings *cts);
61static void	ahc_async(void *callback_arg, uint32_t code,
62			  struct cam_path *path, void *arg);
63static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
64				int nsegments, int error);
65static void	ahc_poll(struct cam_sim *sim);
66static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
67			       struct ccb_scsiio *csio, struct scb *scb);
68static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
69			      union ccb *ccb);
70static int	ahc_create_path(struct ahc_softc *ahc,
71				char channel, u_int target, u_int lun,
72				struct cam_path **path);
73
74static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
75
76static int
77ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
78	        u_int lun, struct cam_path **path)
79{
80	path_id_t path_id;
81
82	if (channel == 'B')
83		path_id = cam_sim_path(ahc->platform_data->sim_b);
84	else
85		path_id = cam_sim_path(ahc->platform_data->sim);
86
87	return (xpt_create_path(path, /*periph*/NULL,
88				path_id, target, lun));
89}
90
91/*
92 * Attach all the sub-devices we can find
93 */
94int
95ahc_attach(struct ahc_softc *ahc)
96{
97	char   ahc_info[256];
98	struct ccb_setasync csa;
99	struct cam_devq *devq;
100	int bus_id;
101	int bus_id2;
102	struct cam_sim *sim;
103	struct cam_sim *sim2;
104	struct cam_path *path;
105	struct cam_path *path2;
106	long s;
107	int count;
108	int error;
109
110	count = 0;
111	sim = NULL;
112	sim2 = NULL;
113
114	ahc_controller_info(ahc, ahc_info);
115	printf("%s\n", ahc_info);
116	ahc_lock(ahc, &s);
117	/* Hook up our interrupt handler */
118	if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
119				    INTR_TYPE_CAM, ahc_freebsd_intr, ahc,
120				    &ahc->platform_data->ih)) != 0) {
121		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
122			      error);
123		goto fail;
124	}
125
126	/*
127	 * Attach secondary channel first if the user has
128	 * declared it the primary channel.
129	 */
130	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
131		bus_id = 1;
132		bus_id2 = 0;
133	} else {
134		bus_id = 0;
135		bus_id2 = 1;
136	}
137
138	/*
139	 * Create the device queue for our SIM(s).
140	 */
141	devq = cam_simq_alloc(AHC_SCB_MAX - 1);
142	if (devq == NULL)
143		goto fail;
144
145	/*
146	 * Construct our first channel SIM entry
147	 */
148	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
149			    device_get_unit(ahc->dev_softc),
150			    1, AHC_SCB_MAX - 1, devq);
151	if (sim == NULL) {
152		cam_simq_free(devq);
153		goto fail;
154	}
155
156	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
157		cam_sim_free(sim, /*free_devq*/TRUE);
158		sim = NULL;
159		goto fail;
160	}
161
162	if (xpt_create_path(&path, /*periph*/NULL,
163			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
164			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
165		xpt_bus_deregister(cam_sim_path(sim));
166		cam_sim_free(sim, /*free_devq*/TRUE);
167		sim = NULL;
168		goto fail;
169	}
170
171	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
172	csa.ccb_h.func_code = XPT_SASYNC_CB;
173	csa.event_enable = AC_LOST_DEVICE;
174	csa.callback = ahc_async;
175	csa.callback_arg = sim;
176	xpt_action((union ccb *)&csa);
177	count++;
178
179	if (ahc->features & AHC_TWIN) {
180		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
181				    ahc, device_get_unit(ahc->dev_softc), 1,
182				    AHC_SCB_MAX - 1, devq);
183
184		if (sim2 == NULL) {
185			printf("ahc_attach: Unable to attach second "
186			       "bus due to resource shortage");
187			goto fail;
188		}
189
190		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
191			printf("ahc_attach: Unable to attach second "
192			       "bus due to resource shortage");
193			/*
194			 * We do not want to destroy the device queue
195			 * because the first bus is using it.
196			 */
197			cam_sim_free(sim2, /*free_devq*/FALSE);
198			goto fail;
199		}
200
201		if (xpt_create_path(&path2, /*periph*/NULL,
202				    cam_sim_path(sim2),
203				    CAM_TARGET_WILDCARD,
204				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
205			xpt_bus_deregister(cam_sim_path(sim2));
206			cam_sim_free(sim2, /*free_devq*/FALSE);
207			sim2 = NULL;
208			goto fail;
209		}
210		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
211		csa.ccb_h.func_code = XPT_SASYNC_CB;
212		csa.event_enable = AC_LOST_DEVICE;
213		csa.callback = ahc_async;
214		csa.callback_arg = sim2;
215		xpt_action((union ccb *)&csa);
216		count++;
217	}
218
219fail:
220	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
221		ahc->platform_data->sim_b = sim;
222		ahc->platform_data->path_b = path;
223		ahc->platform_data->sim = sim2;
224		ahc->platform_data->path = path2;
225	} else {
226		ahc->platform_data->sim = sim;
227		ahc->platform_data->path = path;
228		ahc->platform_data->sim_b = sim2;
229		ahc->platform_data->path_b = path2;
230	}
231	ahc_unlock(ahc, &s);
232
233	if (count != 0)
234		/* We have to wait until after any system dumps... */
235		EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
236				      ahc, SHUTDOWN_PRI_DEFAULT);
237
238	return (count);
239}
240
241/*
242 * Catch an interrupt from the adapter
243 */
244void
245ahc_freebsd_intr(void *arg)
246{
247	struct	ahc_softc *ahc;
248
249	ahc = (struct ahc_softc *)arg;
250	ahc_intr(ahc);
251}
252
253/*
254 * We have an scb which has been processed by the
255 * adaptor, now we look to see how the operation
256 * went.
257 */
258void
259ahc_done(struct ahc_softc *ahc, struct scb *scb)
260{
261	union ccb *ccb;
262
263	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
264		  ("ahc_done - scb %d\n", scb->hscb->tag));
265
266	ccb = scb->io_ctx;
267	LIST_REMOVE(scb, pending_links);
268	if (ccb->ccb_h.func_code == XPT_SCSI_IO
269	  && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
270	   || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
271	  && (ahc->features & AHC_SCB_BTT) == 0) {
272		struct scb_tailq *untagged_q;
273
274		untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
275		TAILQ_REMOVE(untagged_q, scb, links.tqe);
276		ahc_run_untagged_queue(ahc, untagged_q);
277	}
278
279	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
280
281	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
282		bus_dmasync_op_t op;
283
284		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
285			op = BUS_DMASYNC_POSTREAD;
286		else
287			op = BUS_DMASYNC_POSTWRITE;
288		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
289		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
290	}
291
292	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
293		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
294			ccb->ccb_h.status |= CAM_REQ_CMP;
295		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
296		ahc_free_scb(ahc, scb);
297		xpt_done(ccb);
298		return;
299	}
300
301	/*
302	 * If the recovery SCB completes, we have to be
303	 * out of our timeout.
304	 */
305	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
306		struct	scb *list_scb;
307
308		/*
309		 * We were able to complete the command successfully,
310		 * so reinstate the timeouts for all other pending
311		 * commands.
312		 */
313		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
314			union ccb *ccb;
315
316			ccb = list_scb->io_ctx;
317			ccb->ccb_h.timeout_ch =
318			    timeout(ahc_timeout, list_scb,
319				    (ccb->ccb_h.timeout * hz)/1000);
320		}
321
322		/*
323		 * Ensure that we didn't put a second instance of this
324		 * SCB into the QINFIFO.
325		 */
326		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
327				   SCB_GET_CHANNEL(ahc, scb),
328				   SCB_GET_LUN(scb), scb->hscb->tag,
329				   ROLE_INITIATOR, /*status*/0,
330				   SEARCH_REMOVE);
331		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
332		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
333			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
334		ahc_print_path(ahc, scb);
335		printf("no longer in timeout, status = %x\n",
336		       ccb->ccb_h.status);
337	}
338
339	/* Don't clobber any existing error state */
340	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
341		ccb->ccb_h.status |= CAM_REQ_CMP;
342	} else if ((scb->flags & SCB_SENSE) != 0) {
343		/*
344		 * We performed autosense retrieval.
345		 *
346		 * Zero any sense not transferred by the
347		 * device.  The SCSI spec mandates that any
348		 * untransfered data should be assumed to be
349		 * zero.  Complete the 'bounce' of sense information
350		 * through buffers accessible via bus-space by
351		 * copying it into the clients csio.
352		 */
353		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
354		memcpy(&ccb->csio.sense_data,
355		       &ahc->scb_data->sense[scb->hscb->tag],
356		       (scb->sg_list->len & AHC_SG_LEN_MASK)
357		       - ccb->csio.sense_resid);
358		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
359	}
360	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
361	ahc_free_scb(ahc, scb);
362	xpt_done(ccb);
363}
364
365static void
366ahc_action(struct cam_sim *sim, union ccb *ccb)
367{
368	struct	ahc_softc *ahc;
369	struct	tmode_lstate *lstate;
370	u_int	target_id;
371	u_int	our_id;
372	long	s;
373
374	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
375
376	ahc = (struct ahc_softc *)cam_sim_softc(sim);
377
378	target_id = ccb->ccb_h.target_id;
379	our_id = SIM_SCSI_ID(ahc, sim);
380
381	switch (ccb->ccb_h.func_code) {
382	/* Common cases first */
383	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
384	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
385	{
386		struct	   tmode_tstate *tstate;
387		cam_status status;
388
389		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
390					     &lstate, TRUE);
391
392		if (status != CAM_REQ_CMP) {
393			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
394				/* Response from the black hole device */
395				tstate = NULL;
396				lstate = ahc->black_hole;
397			} else {
398				ccb->ccb_h.status = status;
399				xpt_done(ccb);
400				break;
401			}
402		}
403		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
404
405			ahc_lock(ahc, &s);
406			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
407					  sim_links.sle);
408			ccb->ccb_h.status = CAM_REQ_INPROG;
409			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
410				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
411			ahc_unlock(ahc, &s);
412			break;
413		}
414
415		/*
416		 * The target_id represents the target we attempt to
417		 * select.  In target mode, this is the initiator of
418		 * the original command.
419		 */
420		our_id = target_id;
421		target_id = ccb->csio.init_id;
422		/* FALLTHROUGH */
423	}
424	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
425	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
426	{
427		struct	   scb *scb;
428		struct	   hardware_scb *hscb;
429		struct	   ahc_initiator_tinfo *tinfo;
430		struct	   tmode_tstate *tstate;
431		uint16_t  mask;
432
433		/*
434		 * get an scb to use.
435		 */
436		if ((scb = ahc_get_scb(ahc)) == NULL) {
437
438			ahc_lock(ahc, &s);
439			ahc->flags |= AHC_RESOURCE_SHORTAGE;
440			ahc_unlock(ahc, &s);
441			xpt_freeze_simq(sim, /*count*/1);
442			ccb->ccb_h.status = CAM_REQUEUE_REQ;
443			xpt_done(ccb);
444			return;
445		}
446
447		hscb = scb->hscb;
448
449		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
450			  ("start scb(%p)\n", scb));
451		scb->io_ctx = ccb;
452		/*
453		 * So we can find the SCB when an abort is requested
454		 */
455		ccb->ccb_h.ccb_scb_ptr = scb;
456		ccb->ccb_h.ccb_ahc_ptr = ahc;
457
458		/*
459		 * Put all the arguments for the xfer in the scb
460		 */
461		hscb->control = 0;
462		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
463		hscb->lun = ccb->ccb_h.target_lun;
464		mask = SCB_GET_TARGET_MASK(ahc, scb);
465		tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id,
466					    target_id, &tstate);
467
468		hscb->scsirate = tinfo->scsirate;
469		hscb->scsioffset = tinfo->current.offset;
470		if ((tstate->ultraenb & mask) != 0)
471			hscb->control |= ULTRAENB;
472
473		if ((tstate->discenable & mask) != 0
474		 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
475			hscb->control |= DISCENB;
476
477		if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
478		 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
479			scb->flags |= SCB_NEGOTIATE;
480			hscb->control |= MK_MESSAGE;
481		}
482
483		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
484			hscb->cdb_len = 0;
485			scb->flags |= SCB_DEVICE_RESET;
486			hscb->control |= MK_MESSAGE;
487			ahc_execute_scb(scb, NULL, 0, 0);
488		} else {
489			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
490				struct target_data *tdata;
491
492				tdata = &hscb->shared_data.tdata;
493				if (ahc->pending_device == lstate) {
494					scb->flags |= SCB_TARGET_IMMEDIATE;
495					ahc->pending_device = NULL;
496				}
497				hscb->control |= TARGET_SCB;
498				tdata->target_phases = IDENTIFY_SEEN;
499				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
500					tdata->target_phases |= SPHASE_PENDING;
501					tdata->scsi_status =
502					    ccb->csio.scsi_status;
503				}
504				tdata->initiator_tag = ccb->csio.tag_id;
505			}
506			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
507				hscb->control |= ccb->csio.tag_action;
508
509			ahc_setup_data(ahc, sim, &ccb->csio, scb);
510		}
511		break;
512	}
513	case XPT_NOTIFY_ACK:
514	case XPT_IMMED_NOTIFY:
515	{
516		struct	   tmode_tstate *tstate;
517		struct	   tmode_lstate *lstate;
518		cam_status status;
519
520		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
521					     &lstate, TRUE);
522
523		if (status != CAM_REQ_CMP) {
524			ccb->ccb_h.status = status;
525			xpt_done(ccb);
526			break;
527		}
528		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
529				  sim_links.sle);
530		ccb->ccb_h.status = CAM_REQ_INPROG;
531		ahc_send_lstate_events(ahc, lstate);
532		break;
533	}
534	case XPT_EN_LUN:		/* Enable LUN as a target */
535		ahc_handle_en_lun(ahc, sim, ccb);
536		xpt_done(ccb);
537		break;
538	case XPT_ABORT:			/* Abort the specified CCB */
539	{
540		ahc_abort_ccb(ahc, sim, ccb);
541		break;
542	}
543	case XPT_SET_TRAN_SETTINGS:
544	{
545#ifdef AHC_NEW_TRAN_SETTINGS
546		struct	ahc_devinfo devinfo;
547		struct	ccb_trans_settings *cts;
548		struct	ccb_trans_settings_scsi *scsi;
549		struct	ccb_trans_settings_spi *spi;
550		struct	ahc_initiator_tinfo *tinfo;
551		struct	tmode_tstate *tstate;
552		uint16_t *discenable;
553		uint16_t *tagenable;
554		u_int	update_type;
555
556		cts = &ccb->cts;
557		scsi = &cts->proto_specific.scsi;
558		spi = &cts->xport_specific.spi;
559		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
560				    cts->ccb_h.target_id,
561				    cts->ccb_h.target_lun,
562				    SIM_CHANNEL(ahc, sim),
563				    ROLE_UNKNOWN);
564		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
565					    devinfo.our_scsiid,
566					    devinfo.target, &tstate);
567		update_type = 0;
568		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
569			update_type |= AHC_TRANS_GOAL;
570			discenable = &tstate->discenable;
571			tagenable = &tstate->tagenable;
572			tinfo->current.protocol_version =
573			    cts->protocol_version;
574			tinfo->current.transport_version =
575			    cts->transport_version;
576			tinfo->goal.protocol_version =
577			    cts->protocol_version;
578			tinfo->goal.transport_version =
579			    cts->transport_version;
580		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
581			update_type |= AHC_TRANS_USER;
582			discenable = &ahc->user_discenable;
583			tagenable = &ahc->user_tagenable;
584			tinfo->user.protocol_version =
585			    cts->protocol_version;
586			tinfo->user.transport_version =
587			    cts->transport_version;
588		} else {
589			ccb->ccb_h.status = CAM_REQ_INVALID;
590			xpt_done(ccb);
591			break;
592		}
593
594		ahc_lock(ahc, &s);
595
596		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
597			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
598				*discenable |= devinfo.target_mask;
599			else
600				*discenable &= ~devinfo.target_mask;
601		}
602
603		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
604			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
605				*tagenable |= devinfo.target_mask;
606			else
607				*tagenable &= ~devinfo.target_mask;
608		}
609
610		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
611			ahc_validate_width(ahc, &spi->bus_width);
612			ahc_set_width(ahc, &devinfo, spi->bus_width,
613				      update_type, /*paused*/FALSE);
614		}
615
616		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
617			if (update_type == AHC_TRANS_USER)
618				spi->ppr_options = tinfo->user.ppr_options;
619			else
620				spi->ppr_options = tinfo->goal.ppr_options;
621		}
622
623		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
624			if (update_type == AHC_TRANS_USER)
625				spi->sync_offset = tinfo->user.offset;
626			else
627				spi->sync_offset = tinfo->goal.offset;
628		}
629
630		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
631			if (update_type == AHC_TRANS_USER)
632				spi->sync_period = tinfo->user.period;
633			else
634				spi->sync_period = tinfo->goal.period;
635		}
636
637		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
638		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
639			struct ahc_syncrate *syncrate;
640			u_int maxsync;
641
642			if ((ahc->features & AHC_ULTRA2) != 0)
643				maxsync = AHC_SYNCRATE_DT;
644			else if ((ahc->features & AHC_ULTRA) != 0)
645				maxsync = AHC_SYNCRATE_ULTRA;
646			else
647				maxsync = AHC_SYNCRATE_FAST;
648
649			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
650						     &spi->ppr_options,
651						     maxsync);
652			ahc_validate_offset(ahc, syncrate, &spi->sync_offset,
653					    spi->bus_width);
654
655			/* We use a period of 0 to represent async */
656			if (spi->sync_offset == 0) {
657				spi->sync_period = 0;
658				spi->ppr_options = 0;
659			}
660
661			ahc_set_syncrate(ahc, &devinfo, syncrate,
662					 spi->sync_period, spi->sync_offset,
663					 spi->ppr_options, update_type,
664					 /*paused*/FALSE);
665		}
666		ahc_unlock(ahc, &s);
667		ccb->ccb_h.status = CAM_REQ_CMP;
668		xpt_done(ccb);
669#else
670		struct	  ahc_devinfo devinfo;
671		struct	  ccb_trans_settings *cts;
672		struct	  ahc_initiator_tinfo *tinfo;
673		struct	  tmode_tstate *tstate;
674		uint16_t *discenable;
675		uint16_t *tagenable;
676		u_int	  update_type;
677		long	  s;
678
679		cts = &ccb->cts;
680		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
681				    cts->ccb_h.target_id,
682				    cts->ccb_h.target_lun,
683				    SIM_CHANNEL(ahc, sim),
684				    ROLE_UNKNOWN);
685		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
686					    devinfo.our_scsiid,
687					    devinfo.target, &tstate);
688		update_type = 0;
689		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
690			update_type |= AHC_TRANS_GOAL;
691			discenable = &tstate->discenable;
692			tagenable = &tstate->tagenable;
693		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
694			update_type |= AHC_TRANS_USER;
695			discenable = &ahc->user_discenable;
696			tagenable = &ahc->user_tagenable;
697		} else {
698			ccb->ccb_h.status = CAM_REQ_INVALID;
699			xpt_done(ccb);
700			break;
701		}
702
703		ahc_lock(ahc, &s);
704
705		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
706			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
707				*discenable |= devinfo.target_mask;
708			else
709				*discenable &= ~devinfo.target_mask;
710		}
711
712		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
713			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
714				*tagenable |= devinfo.target_mask;
715			else
716				*tagenable &= ~devinfo.target_mask;
717		}
718
719		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
720			ahc_validate_width(ahc, &cts->bus_width);
721			ahc_set_width(ahc, &devinfo, cts->bus_width,
722				      update_type, /*paused*/FALSE);
723		}
724
725		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
726			if (update_type == AHC_TRANS_USER)
727				cts->sync_offset = tinfo->user.offset;
728			else
729				cts->sync_offset = tinfo->goal.offset;
730		}
731
732		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
733			if (update_type == AHC_TRANS_USER)
734				cts->sync_period = tinfo->user.period;
735			else
736				cts->sync_period = tinfo->goal.period;
737		}
738
739		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
740		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
741			struct ahc_syncrate *syncrate;
742			u_int ppr_options;
743			u_int maxsync;
744
745			if ((ahc->features & AHC_ULTRA2) != 0)
746				maxsync = AHC_SYNCRATE_DT;
747			else if ((ahc->features & AHC_ULTRA) != 0)
748				maxsync = AHC_SYNCRATE_ULTRA;
749			else
750				maxsync = AHC_SYNCRATE_FAST;
751
752			ppr_options = 0;
753			if (cts->sync_period <= 9)
754				ppr_options = MSG_EXT_PPR_DT_REQ;
755
756			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
757						     &ppr_options,
758						     maxsync);
759			ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
760					    MSG_EXT_WDTR_BUS_8_BIT);
761
762			/* We use a period of 0 to represent async */
763			if (cts->sync_offset == 0) {
764				cts->sync_period = 0;
765				ppr_options = 0;
766			}
767
768			if (ppr_options == MSG_EXT_PPR_DT_REQ
769			 && tinfo->user.transport_version >= 3) {
770				tinfo->goal.transport_version =
771				    tinfo->user.transport_version;
772				tinfo->current.transport_version =
773				    tinfo->user.transport_version;
774			}
775
776			ahc_set_syncrate(ahc, &devinfo, syncrate,
777					 cts->sync_period, cts->sync_offset,
778					 ppr_options, update_type,
779					 /*paused*/FALSE);
780		}
781		ahc_unlock(ahc, &s);
782		ccb->ccb_h.status = CAM_REQ_CMP;
783		xpt_done(ccb);
784#endif
785		break;
786	}
787	case XPT_GET_TRAN_SETTINGS:
788	/* Get default/user set transfer settings for the target */
789	{
790
791		ahc_lock(ahc, &s);
792		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
793				      SIM_CHANNEL(ahc, sim), &ccb->cts);
794		ahc_unlock(ahc, &s);
795		xpt_done(ccb);
796		break;
797	}
798	case XPT_CALC_GEOMETRY:
799	{
800		struct	  ccb_calc_geometry *ccg;
801		uint32_t size_mb;
802		uint32_t secs_per_cylinder;
803		int	  extended;
804
805		ccg = &ccb->ccg;
806		size_mb = ccg->volume_size
807			/ ((1024L * 1024L) / ccg->block_size);
808		extended = SIM_IS_SCSIBUS_B(ahc, sim)
809			? ahc->flags & AHC_EXTENDED_TRANS_B
810			: ahc->flags & AHC_EXTENDED_TRANS_A;
811
812		if (size_mb > 1024 && extended) {
813			ccg->heads = 255;
814			ccg->secs_per_track = 63;
815		} else {
816			ccg->heads = 64;
817			ccg->secs_per_track = 32;
818		}
819		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
820		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
821		ccb->ccb_h.status = CAM_REQ_CMP;
822		xpt_done(ccb);
823		break;
824	}
825	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
826	{
827		int  found;
828
829		ahc_lock(ahc, &s);
830		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
831					  /*initiate reset*/TRUE);
832		ahc_unlock(ahc, &s);
833		if (bootverbose) {
834			xpt_print_path(SIM_PATH(ahc, sim));
835			printf("SCSI bus reset delivered. "
836			       "%d SCBs aborted.\n", found);
837		}
838		ccb->ccb_h.status = CAM_REQ_CMP;
839		xpt_done(ccb);
840		break;
841	}
842	case XPT_TERM_IO:		/* Terminate the I/O process */
843		/* XXX Implement */
844		ccb->ccb_h.status = CAM_REQ_INVALID;
845		xpt_done(ccb);
846		break;
847	case XPT_PATH_INQ:		/* Path routing inquiry */
848	{
849		struct ccb_pathinq *cpi = &ccb->cpi;
850
851		cpi->version_num = 1; /* XXX??? */
852		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
853		if ((ahc->features & AHC_WIDE) != 0)
854			cpi->hba_inquiry |= PI_WIDE_16;
855		if ((ahc->flags & AHC_TARGETMODE) != 0) {
856			cpi->target_sprt = PIT_PROCESSOR
857					 | PIT_DISCONNECT
858					 | PIT_TERM_IO;
859		} else {
860			cpi->target_sprt = 0;
861		}
862		cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
863			      ? 0 : PIM_NOINITIATOR;
864		cpi->hba_eng_cnt = 0;
865		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
866		cpi->max_lun = 64;
867		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
868			cpi->initiator_id = ahc->our_id_b;
869			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
870				cpi->hba_misc |= PIM_NOBUSRESET;
871		} else {
872			cpi->initiator_id = ahc->our_id;
873			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
874				cpi->hba_misc |= PIM_NOBUSRESET;
875		}
876		cpi->bus_id = cam_sim_bus(sim);
877		cpi->base_transfer_speed = 3300;
878		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
879		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
880		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
881		cpi->unit_number = cam_sim_unit(sim);
882#ifdef AHC_NEW_TRAN_SETTINGS
883		cpi->protocol = PROTO_SCSI;
884		cpi->protocol_version = SCSI_REV_2;
885		cpi->transport = XPORT_SPI;
886		cpi->transport_version = 2;
887		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
888		if ((ahc->features & AHC_DT) != 0) {
889			cpi->transport_version = 3;
890			cpi->xport_specific.spi.ppr_options =
891			    SID_SPI_CLOCK_DT_ST;
892		}
893#endif
894		cpi->ccb_h.status = CAM_REQ_CMP;
895		xpt_done(ccb);
896		break;
897	}
898	default:
899		ccb->ccb_h.status = CAM_REQ_INVALID;
900		xpt_done(ccb);
901		break;
902	}
903}
904
905static void
906ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
907		      struct ccb_trans_settings *cts)
908{
909#ifdef AHC_NEW_TRAN_SETTINGS
910	struct	ahc_devinfo devinfo;
911	struct	ccb_trans_settings_scsi *scsi;
912	struct	ccb_trans_settings_spi *spi;
913	struct	ahc_initiator_tinfo *targ_info;
914	struct	tmode_tstate *tstate;
915	struct	ahc_transinfo *tinfo;
916
917	scsi = &cts->proto_specific.scsi;
918	spi = &cts->xport_specific.spi;
919	ahc_compile_devinfo(&devinfo, our_id,
920			    cts->ccb_h.target_id,
921			    cts->ccb_h.target_lun,
922			    channel, ROLE_UNKNOWN);
923	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
924					devinfo.our_scsiid,
925					devinfo.target, &tstate);
926
927	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
928		tinfo = &targ_info->current;
929	else
930		tinfo = &targ_info->user;
931
932	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
933	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
934	if (cts->type == CTS_TYPE_USER_SETTINGS) {
935		if ((ahc->user_discenable & devinfo.target_mask) != 0)
936			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
937
938		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
939			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
940	} else {
941		if ((tstate->discenable & devinfo.target_mask) != 0)
942			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
943
944		if ((tstate->tagenable & devinfo.target_mask) != 0)
945			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
946	}
947	cts->protocol_version = tinfo->protocol_version;
948	cts->transport_version = tinfo->transport_version;
949
950	spi->sync_period = tinfo->period;
951	spi->sync_offset = tinfo->offset;
952	spi->bus_width = tinfo->width;
953	spi->ppr_options = tinfo->ppr_options;
954
955	cts->protocol = PROTO_SCSI;
956	cts->transport = XPORT_SPI;
957	scsi->valid = CTS_SCSI_VALID_TQ;
958	spi->valid = CTS_SPI_VALID_SYNC_RATE
959		   | CTS_SPI_VALID_SYNC_OFFSET
960		   | CTS_SPI_VALID_BUS_WIDTH
961		   | CTS_SPI_VALID_DISC
962		   | CTS_SPI_VALID_PPR_OPTIONS;
963
964	cts->ccb_h.status = CAM_REQ_CMP;
965#else
966	struct	ahc_devinfo devinfo;
967	struct	ahc_initiator_tinfo *targ_info;
968	struct	tmode_tstate *tstate;
969	struct	ahc_transinfo *tinfo;
970	long	s;
971
972	ahc_compile_devinfo(&devinfo, our_id,
973			    cts->ccb_h.target_id,
974			    cts->ccb_h.target_lun,
975			    channel, ROLE_UNKNOWN);
976	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
977					devinfo.our_scsiid,
978					devinfo.target, &tstate);
979
980	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
981		tinfo = &targ_info->current;
982	else
983		tinfo = &targ_info->user;
984
985	ahc_lock(ahc, &s);
986
987	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
988	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
989		if ((ahc->user_discenable & devinfo.target_mask) != 0)
990			cts->flags |= CCB_TRANS_DISC_ENB;
991
992		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
993			cts->flags |= CCB_TRANS_TAG_ENB;
994	} else {
995		if ((tstate->discenable & devinfo.target_mask) != 0)
996			cts->flags |= CCB_TRANS_DISC_ENB;
997
998		if ((tstate->tagenable & devinfo.target_mask) != 0)
999			cts->flags |= CCB_TRANS_TAG_ENB;
1000	}
1001	cts->sync_period = tinfo->period;
1002	cts->sync_offset = tinfo->offset;
1003	cts->bus_width = tinfo->width;
1004
1005	ahc_unlock(ahc, &s);
1006
1007	cts->valid = CCB_TRANS_SYNC_RATE_VALID
1008		   | CCB_TRANS_SYNC_OFFSET_VALID
1009		   | CCB_TRANS_BUS_WIDTH_VALID
1010		   | CCB_TRANS_DISC_VALID
1011		   | CCB_TRANS_TQ_VALID;
1012
1013	cts->ccb_h.status = CAM_REQ_CMP;
1014#endif
1015}
1016
1017static void
1018ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1019{
1020	struct ahc_softc *ahc;
1021	struct cam_sim *sim;
1022
1023	sim = (struct cam_sim *)callback_arg;
1024	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1025	switch (code) {
1026	case AC_LOST_DEVICE:
1027	{
1028		struct	ahc_devinfo devinfo;
1029		long	s;
1030
1031		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1032				    xpt_path_target_id(path),
1033				    xpt_path_lun_id(path),
1034				    SIM_CHANNEL(ahc, sim),
1035				    ROLE_UNKNOWN);
1036
1037		/*
1038		 * Revert to async/narrow transfers
1039		 * for the next device.
1040		 */
1041		ahc_lock(ahc, &s);
1042		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1043			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1044		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1045				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1046				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1047				 /*paused*/FALSE);
1048		ahc_unlock(ahc, &s);
1049		break;
1050	}
1051	default:
1052		break;
1053	}
1054}
1055
1056static void
1057ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1058		int error)
1059{
1060	struct	 scb *scb;
1061	union	 ccb *ccb;
1062	struct	 ahc_softc *ahc;
1063	long	 s;
1064
1065	scb = (struct scb *)arg;
1066	ccb = scb->io_ctx;
1067	ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
1068
1069	if (error != 0) {
1070		if (error == EFBIG)
1071			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1072		else
1073			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1074		if (nsegments != 0)
1075			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1076		ahc_lock(ahc, &s);
1077		ahc_free_scb(ahc, scb);
1078		ahc_unlock(ahc, &s);
1079		xpt_done(ccb);
1080		return;
1081	}
1082	if (nsegments != 0) {
1083		struct	  ahc_dma_seg *sg;
1084		bus_dma_segment_t *end_seg;
1085		bus_dmasync_op_t op;
1086
1087		end_seg = dm_segs + nsegments;
1088
1089		/* Copy the segments into our SG list */
1090		sg = scb->sg_list;
1091		while (dm_segs < end_seg) {
1092			sg->addr = dm_segs->ds_addr;
1093/* XXX Add in the 5th byte of the address later. */
1094			sg->len = dm_segs->ds_len;
1095			sg++;
1096			dm_segs++;
1097		}
1098
1099		/*
1100		 * Note where to find the SG entries in bus space.
1101		 * We also set the full residual flag which the
1102		 * sequencer will clear as soon as a data transfer
1103		 * occurs.
1104		 */
1105		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1106
1107		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1108			op = BUS_DMASYNC_PREREAD;
1109		else
1110			op = BUS_DMASYNC_PREWRITE;
1111
1112		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1113
1114		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1115			struct target_data *tdata;
1116
1117			tdata = &scb->hscb->shared_data.tdata;
1118			tdata->target_phases |= DPHASE_PENDING;
1119			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1120				tdata->data_phase = P_DATAOUT;
1121			else
1122				tdata->data_phase = P_DATAIN;
1123
1124			/*
1125			 * If the transfer is of an odd length and in the
1126			 * "in" direction (scsi->HostBus), then it may
1127			 * trigger a bug in the 'WideODD' feature of
1128			 * non-Ultra2 chips.  Force the total data-length
1129			 * to be even by adding an extra, 1 byte, SG,
1130			 * element.  We do this even if we are not currently
1131			 * negotiated wide as negotiation could occur before
1132			 * this command is executed.
1133			 */
1134			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1135			 && (ccb->csio.dxfer_len & 0x1) != 0
1136			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1137
1138				nsegments++;
1139				if (nsegments > AHC_NSEG) {
1140
1141					ahc_set_transaction_status(scb,
1142					    CAM_REQ_TOO_BIG);
1143					bus_dmamap_unload(ahc->buffer_dmat,
1144							  scb->dmamap);
1145					ahc_lock(ahc, &s);
1146					ahc_free_scb(ahc, scb);
1147					ahc_unlock(ahc, &s);
1148					xpt_done(ccb);
1149					return;
1150				}
1151				sg->addr = ahc->dma_bug_buf;
1152				sg->len = 1;
1153				sg++;
1154			}
1155		}
1156		sg--;
1157		sg->len |= AHC_DMA_LAST_SEG;
1158
1159		/* Copy the first SG into the "current" data pointer area */
1160		scb->hscb->dataptr = scb->sg_list->addr;
1161		scb->hscb->datacnt = scb->sg_list->len;
1162	} else {
1163		scb->hscb->sgptr = SG_LIST_NULL;
1164		scb->hscb->dataptr = 0;
1165		scb->hscb->datacnt = 0;
1166	}
1167
1168	scb->sg_count = nsegments;
1169
1170	ahc_lock(ahc, &s);
1171
1172	/*
1173	 * Last time we need to check if this SCB needs to
1174	 * be aborted.
1175	 */
1176	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1177		if (nsegments != 0)
1178			bus_dmamap_unload(ahc->buffer_dmat,
1179					  scb->dmamap);
1180		ahc_free_scb(ahc, scb);
1181		ahc_unlock(ahc, &s);
1182		xpt_done(ccb);
1183		return;
1184	}
1185
1186	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1187
1188	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1189
1190	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1191		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1192			ccb->ccb_h.timeout = 5 * 1000;
1193		ccb->ccb_h.timeout_ch =
1194		    timeout(ahc_timeout, (caddr_t)scb,
1195			    (ccb->ccb_h.timeout * hz) / 1000);
1196	}
1197
1198	/*
1199	 * We only allow one untagged transaction
1200	 * per target in the initiator role unless
1201	 * we are storing a full busy target *lun*
1202	 * table in SCB space.
1203	 */
1204	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1205	 && (ahc->features & AHC_SCB_BTT) == 0) {
1206		struct scb_tailq *untagged_q;
1207
1208		untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
1209		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1210		if (TAILQ_FIRST(untagged_q) != scb) {
1211			ahc_unlock(ahc, &s);
1212			return;
1213		}
1214	}
1215	scb->flags |= SCB_ACTIVE;
1216
1217	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1218		pause_sequencer(ahc);
1219		if ((ahc->flags & AHC_PAGESCBS) == 0)
1220			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1221		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1222		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1223		unpause_sequencer(ahc);
1224	} else {
1225		ahc_queue_scb(ahc, scb);
1226	}
1227
1228	ahc_unlock(ahc, &s);
1229}
1230
1231static void
1232ahc_poll(struct cam_sim *sim)
1233{
1234	ahc_intr(cam_sim_softc(sim));
1235}
1236
1237static void
1238ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1239	       struct ccb_scsiio *csio, struct scb *scb)
1240{
1241	struct hardware_scb *hscb;
1242	struct ccb_hdr *ccb_h;
1243
1244	hscb = scb->hscb;
1245	ccb_h = &csio->ccb_h;
1246
1247	if (ccb_h->func_code == XPT_SCSI_IO) {
1248		hscb->cdb_len = csio->cdb_len;
1249		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1250
1251			if (hscb->cdb_len > sizeof(hscb->cdb32)
1252			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1253				u_long s;
1254
1255				ahc_set_transaction_status(scb,
1256							   CAM_REQ_INVALID);
1257				ahc_lock(ahc, &s);
1258				ahc_free_scb(ahc, scb);
1259				ahc_unlock(ahc, &s);
1260				xpt_done((union ccb *)csio);
1261				return;
1262			}
1263			if (hscb->cdb_len > 12) {
1264				memcpy(hscb->cdb32,
1265				       csio->cdb_io.cdb_ptr,
1266				       hscb->cdb_len);
1267				hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1268			} else {
1269				memcpy(hscb->shared_data.cdb,
1270				       csio->cdb_io.cdb_ptr,
1271				       hscb->cdb_len);
1272			}
1273		} else {
1274			if (hscb->cdb_len > 12) {
1275				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1276				       hscb->cdb_len);
1277				hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1278			} else {
1279				memcpy(hscb->shared_data.cdb,
1280				       csio->cdb_io.cdb_bytes,
1281				       hscb->cdb_len);
1282			}
1283		}
1284	}
1285
1286	/* Only use S/G if there is a transfer */
1287	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1288		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1289			/* We've been given a pointer to a single buffer */
1290			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1291				int s;
1292				int error;
1293
1294				s = splsoftvm();
1295				error = bus_dmamap_load(ahc->buffer_dmat,
1296							scb->dmamap,
1297							csio->data_ptr,
1298							csio->dxfer_len,
1299							ahc_execute_scb,
1300							scb, /*flags*/0);
1301				if (error == EINPROGRESS) {
1302					/*
1303					 * So as to maintain ordering,
1304					 * freeze the controller queue
1305					 * until our mapping is
1306					 * returned.
1307					 */
1308					xpt_freeze_simq(sim,
1309							/*count*/1);
1310					scb->io_ctx->ccb_h.status |=
1311					    CAM_RELEASE_SIMQ;
1312				}
1313				splx(s);
1314			} else {
1315				struct bus_dma_segment seg;
1316
1317				/* Pointer to physical buffer */
1318				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1319					panic("ahc_setup_data - Transfer size "
1320					      "larger than can device max");
1321
1322				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1323				seg.ds_len = csio->dxfer_len;
1324				ahc_execute_scb(scb, &seg, 1, 0);
1325			}
1326		} else {
1327			struct bus_dma_segment *segs;
1328
1329			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1330				panic("ahc_setup_data - Physical segment "
1331				      "pointers unsupported");
1332
1333			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1334				panic("ahc_setup_data - Virtual segment "
1335				      "addresses unsupported");
1336
1337			/* Just use the segments provided */
1338			segs = (struct bus_dma_segment *)csio->data_ptr;
1339			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1340		}
1341	} else {
1342		ahc_execute_scb(scb, NULL, 0, 0);
1343	}
1344}
1345
1346static void
1347ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1348
1349	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1350		struct scb *list_scb;
1351
1352		scb->flags |= SCB_RECOVERY_SCB;
1353
1354		/*
1355		 * Take all queued, but not sent SCBs out of the equation.
1356		 * Also ensure that no new CCBs are queued to us while we
1357		 * try to fix this problem.
1358		 */
1359		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1360			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1361			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1362		}
1363
1364		/*
1365		 * Go through all of our pending SCBs and remove
1366		 * any scheduled timeouts for them.  We will reschedule
1367		 * them after we've successfully fixed this problem.
1368		 */
1369		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1370			union ccb *ccb;
1371
1372			ccb = list_scb->io_ctx;
1373			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1374		}
1375	}
1376}
1377
1378void
1379ahc_timeout(void *arg)
1380{
1381	struct	scb *scb;
1382	struct	ahc_softc *ahc;
1383	long	s;
1384	int	found;
1385	u_int	last_phase;
1386	int	target;
1387	int	lun;
1388	int	i;
1389	char	channel;
1390
1391	scb = (struct scb *)arg;
1392	ahc = (struct ahc_softc *)scb->io_ctx->ccb_h.ccb_ahc_ptr;
1393
1394	ahc_lock(ahc, &s);
1395
1396	/*
1397	 * Ensure that the card doesn't do anything
1398	 * behind our back.  Also make sure that we
1399	 * didn't "just" miss an interrupt that would
1400	 * affect this timeout.
1401	 */
1402	do {
1403		ahc_intr(ahc);
1404		pause_sequencer(ahc);
1405	} while (ahc_inb(ahc, INTSTAT) & INT_PEND);
1406
1407	ahc_print_path(ahc, scb);
1408	if ((scb->flags & SCB_ACTIVE) == 0) {
1409		/* Previous timeout took care of me already */
1410		printf("Timedout SCB %d handled by another timeout\n",
1411		       scb->hscb->tag);
1412		unpause_sequencer(ahc);
1413		ahc_unlock(ahc, &s);
1414		return;
1415	}
1416
1417	target = SCB_GET_TARGET(ahc, scb);
1418	channel = SCB_GET_CHANNEL(ahc, scb);
1419	lun = SCB_GET_LUN(scb);
1420
1421	printf("SCB 0x%x - timed out ", scb->hscb->tag);
1422	/*
1423	 * Take a snapshot of the bus state and print out
1424	 * some information so we can track down driver bugs.
1425	 */
1426	last_phase = ahc_inb(ahc, LASTPHASE);
1427
1428	for (i = 0; i < num_phases; i++) {
1429		if (last_phase == phase_table[i].phase)
1430			break;
1431	}
1432	printf("%s", phase_table[i].phasemsg);
1433
1434	printf(", SEQADDR == 0x%x\n",
1435	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1436
1437	if (scb->sg_count > 0) {
1438		for (i = 0; i < scb->sg_count; i++) {
1439			printf("sg[%d] - Addr 0x%x : Length %d\n",
1440			       i,
1441			       scb->sg_list[i].addr,
1442			       scb->sg_list[i].len);
1443		}
1444	}
1445	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1446		/*
1447		 * Been down this road before.
1448		 * Do a full bus reset.
1449		 */
1450bus_reset:
1451		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1452		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1453		printf("%s: Issued Channel %c Bus Reset. "
1454		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1455	} else {
1456		/*
1457		 * If we are a target, transition to bus free and report
1458		 * the timeout.
1459		 *
1460		 * The target/initiator that is holding up the bus may not
1461		 * be the same as the one that triggered this timeout
1462		 * (different commands have different timeout lengths).
1463		 * If the bus is idle and we are actiing as the initiator
1464		 * for this request, queue a BDR message to the timed out
1465		 * target.  Otherwise, if the timed out transaction is
1466		 * active:
1467		 *   Initiator transaction:
1468		 *	Stuff the message buffer with a BDR message and assert
1469		 *	ATN in the hopes that the target will let go of the bus
1470		 *	and go to the mesgout phase.  If this fails, we'll
1471		 *	get another timeout 2 seconds later which will attempt
1472		 *	a bus reset.
1473		 *
1474		 *   Target transaction:
1475		 *	Transition to BUS FREE and report the error.
1476		 *	It's good to be the target!
1477		 */
1478		u_int active_scb_index;
1479
1480		active_scb_index = ahc_inb(ahc, SCB_TAG);
1481
1482		if (last_phase != P_BUSFREE
1483		  && (active_scb_index < ahc->scb_data->numscbs)) {
1484			struct scb *active_scb;
1485
1486			/*
1487			 * If the active SCB is not from our device,
1488			 * assume that another device is hogging the bus
1489			 * and wait for it's timeout to expire before
1490			 * taking additional action.
1491			 */
1492			active_scb = &ahc->scb_data->scbarray[active_scb_index];
1493			if (active_scb->hscb->scsiid != scb->hscb->scsiid
1494			 || active_scb->hscb->lun != scb->hscb->lun) {
1495				struct	ccb_hdr *ccbh;
1496				u_int	newtimeout;
1497
1498				ahc_print_path(ahc, scb);
1499				printf("Other SCB Timeout%s",
1500			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1501				       ? " again\n" : "\n");
1502				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1503				newtimeout =
1504				    MAX(active_scb->io_ctx->ccb_h.timeout,
1505					scb->io_ctx->ccb_h.timeout);
1506				ccbh = &scb->io_ctx->ccb_h;
1507				scb->io_ctx->ccb_h.timeout_ch =
1508				    timeout(ahc_timeout, scb,
1509					    (newtimeout * hz) / 1000);
1510				ahc_unlock(ahc, &s);
1511				return;
1512			}
1513
1514			/* It's us */
1515			if ((scb->hscb->control & TARGET_SCB) != 0) {
1516
1517				/*
1518				 * Send back any queued up transactions
1519				 * and properly record the error condition.
1520				 */
1521				ahc_freeze_devq(ahc, scb);
1522				ahc_set_transaction_status(scb,
1523							   CAM_CMD_TIMEOUT);
1524				ahc_freeze_scb(scb);
1525				ahc_done(ahc, scb);
1526
1527				/* Will clear us from the bus */
1528				restart_sequencer(ahc);
1529				return;
1530			}
1531
1532			ahc_set_recoveryscb(ahc, active_scb);
1533			ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET);
1534			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1535			ahc_print_path(ahc, active_scb);
1536			printf("BDR message in message buffer\n");
1537			active_scb->flags |=  SCB_DEVICE_RESET;
1538			active_scb->io_ctx->ccb_h.timeout_ch =
1539			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1540			unpause_sequencer(ahc);
1541		} else {
1542			int	 disconnected;
1543
1544			/* XXX Shouldn't panic.  Just punt instead */
1545			if ((scb->hscb->control & TARGET_SCB) != 0)
1546				panic("Timed-out target SCB but bus idle");
1547
1548			if (last_phase != P_BUSFREE
1549			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1550				/* XXX What happened to the SCB? */
1551				/* Hung target selection.  Goto busfree */
1552				printf("%s: Hung target selection\n",
1553				       ahc_name(ahc));
1554				restart_sequencer(ahc);
1555				return;
1556			}
1557
1558			if (ahc_search_qinfifo(ahc, target, channel, lun,
1559					       scb->hscb->tag, ROLE_INITIATOR,
1560					       /*status*/0, SEARCH_COUNT) > 0) {
1561				disconnected = FALSE;
1562			} else {
1563				disconnected = TRUE;
1564			}
1565
1566			if (disconnected) {
1567				u_int active_scb;
1568
1569				ahc_set_recoveryscb(ahc, scb);
1570				/*
1571				 * Simply set the MK_MESSAGE control bit.
1572				 */
1573				scb->hscb->control |= MK_MESSAGE;
1574				scb->flags |= SCB_QUEUED_MSG
1575					   |  SCB_DEVICE_RESET;
1576
1577				/*
1578				 * Mark the cached copy of this SCB in the
1579				 * disconnected list too, so that a reconnect
1580				 * at this point causes a BDR or abort.
1581				 */
1582				active_scb = ahc_inb(ahc, SCBPTR);
1583				if (ahc_search_disc_list(ahc, target,
1584							 channel, lun,
1585							 scb->hscb->tag,
1586							 /*stop_on_first*/TRUE,
1587							 /*remove*/FALSE,
1588							 /*save_state*/FALSE)) {
1589					u_int scb_control;
1590
1591					scb_control = ahc_inb(ahc, SCB_CONTROL);
1592					scb_control |= MK_MESSAGE;
1593					ahc_outb(ahc, SCB_CONTROL, scb_control);
1594				}
1595				ahc_outb(ahc, SCBPTR, active_scb);
1596
1597				/*
1598				 * Actually re-queue this SCB in case we can
1599				 * select the device before it reconnects.
1600				 * Clear out any entries in the QINFIFO first
1601				 * so we are the next SCB for this target
1602				 * to run.
1603				 */
1604				ahc_search_qinfifo(ahc,
1605						   SCB_GET_TARGET(ahc, scb),
1606						   channel, SCB_GET_LUN(scb),
1607						   SCB_LIST_NULL,
1608						   ROLE_INITIATOR,
1609						   CAM_REQUEUE_REQ,
1610						   SEARCH_COMPLETE);
1611				ahc_print_path(ahc, scb);
1612				printf("Queuing a BDR SCB\n");
1613				ahc->qinfifo[ahc->qinfifonext++] =
1614				    scb->hscb->tag;
1615				if ((ahc->features & AHC_QUEUE_REGS) != 0) {
1616					ahc_outb(ahc, HNSCB_QOFF,
1617						 ahc->qinfifonext);
1618				} else {
1619					ahc_outb(ahc, KERNEL_QINPOS,
1620						 ahc->qinfifonext);
1621				}
1622				scb->io_ctx->ccb_h.timeout_ch =
1623				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1624				unpause_sequencer(ahc);
1625			} else {
1626				/* Go "immediatly" to the bus reset */
1627				/* This shouldn't happen */
1628				ahc_set_recoveryscb(ahc, scb);
1629				ahc_print_path(ahc, scb);
1630				printf("SCB %d: Immediate reset.  "
1631					"Flags = 0x%x\n", scb->hscb->tag,
1632					scb->flags);
1633				goto bus_reset;
1634			}
1635		}
1636	}
1637	ahc_unlock(ahc, &s);
1638}
1639
1640static void
1641ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1642{
1643	union ccb *abort_ccb;
1644
1645	abort_ccb = ccb->cab.abort_ccb;
1646	switch (abort_ccb->ccb_h.func_code) {
1647	case XPT_ACCEPT_TARGET_IO:
1648	case XPT_IMMED_NOTIFY:
1649	case XPT_CONT_TARGET_IO:
1650	{
1651		struct tmode_tstate *tstate;
1652		struct tmode_lstate *lstate;
1653		struct ccb_hdr_slist *list;
1654		cam_status status;
1655
1656		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1657					     &lstate, TRUE);
1658
1659		if (status != CAM_REQ_CMP) {
1660			ccb->ccb_h.status = status;
1661			break;
1662		}
1663
1664		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1665			list = &lstate->accept_tios;
1666		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1667			list = &lstate->immed_notifies;
1668		else
1669			list = NULL;
1670
1671		if (list != NULL) {
1672			struct ccb_hdr *curelm;
1673			int found;
1674
1675			curelm = SLIST_FIRST(list);
1676			found = 0;
1677			if (curelm == &abort_ccb->ccb_h) {
1678				found = 1;
1679				SLIST_REMOVE_HEAD(list, sim_links.sle);
1680			} else {
1681				while(curelm != NULL) {
1682					struct ccb_hdr *nextelm;
1683
1684					nextelm =
1685					    SLIST_NEXT(curelm, sim_links.sle);
1686
1687					if (nextelm == &abort_ccb->ccb_h) {
1688						found = 1;
1689						SLIST_NEXT(curelm,
1690							   sim_links.sle) =
1691						    SLIST_NEXT(nextelm,
1692							       sim_links.sle);
1693						break;
1694					}
1695					curelm = nextelm;
1696				}
1697			}
1698
1699			if (found) {
1700				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1701				xpt_done(abort_ccb);
1702				ccb->ccb_h.status = CAM_REQ_CMP;
1703			} else {
1704				printf("Not found\n");
1705				ccb->ccb_h.status = CAM_PATH_INVALID;
1706			}
1707			break;
1708		}
1709		/* FALLTHROUGH */
1710	}
1711	case XPT_SCSI_IO:
1712		/* XXX Fully implement the hard ones */
1713		ccb->ccb_h.status = CAM_UA_ABORT;
1714		break;
1715	default:
1716		ccb->ccb_h.status = CAM_REQ_INVALID;
1717		break;
1718	}
1719	xpt_done(ccb);
1720}
1721
1722void
1723ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1724		u_int lun, ac_code code)
1725{
1726	struct	ccb_trans_settings cts;
1727	struct cam_path *path;
1728	void *arg;
1729	int error;
1730
1731	arg = NULL;
1732	error = ahc_create_path(ahc, channel, target, lun, &path);
1733
1734	if (error != CAM_REQ_CMP)
1735		return;
1736
1737	switch (code) {
1738	case AC_TRANSFER_NEG:
1739#ifdef AHC_NEW_TRAN_SETTINGS
1740		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1741#else
1742		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1743#endif
1744		cts.ccb_h.path = path;
1745		cts.ccb_h.target_id = target;
1746		cts.ccb_h.target_lun = lun;
1747		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1748							  : ahc->our_id_b,
1749				      channel, &cts);
1750		arg = &cts;
1751		break;
1752	case AC_SENT_BDR:
1753	case AC_BUS_RESET:
1754		break;
1755	default:
1756		panic("ahc_send_async: Unexpected async event");
1757	}
1758	xpt_async(code, path, arg);
1759}
1760
1761void
1762ahc_platform_set_tags(struct ahc_softc *ahc,
1763		      struct ahc_devinfo *devinfo, int enable)
1764{
1765}
1766
1767int
1768ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1769{
1770	ahc->platform_data =
1771	    malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
1772	if (ahc->platform_data == NULL)
1773		return (ENOMEM);
1774	memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1775	return (0);
1776}
1777
1778void
1779ahc_platform_free(struct ahc_softc *ahc)
1780{
1781	if (ahc->platform_data != NULL) {
1782		if (ahc->platform_data->regs != NULL)
1783			bus_release_resource(ahc->dev_softc,
1784					     ahc->platform_data->regs_res_type,
1785					     ahc->platform_data->regs_res_id,
1786					     ahc->platform_data->regs);
1787
1788		if (ahc->platform_data->irq != NULL)
1789			bus_release_resource(ahc->dev_softc,
1790					 ahc->platform_data->irq_res_type,
1791					 0, ahc->platform_data->irq);
1792
1793		free(ahc->platform_data, M_DEVBUF);
1794	}
1795}
1796
1797int
1798ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1799{
1800	/* We don't sort softcs under FreeBSD so report equal always */
1801	return (0);
1802}
1803
1804#if UNUSED
1805static void
1806ahc_dump_targcmd(struct target_cmd *cmd)
1807{
1808	uint8_t *byte;
1809	uint8_t *last_byte;
1810	int i;
1811
1812	byte = &cmd->initiator_channel;
1813	/* Debugging info for received commands */
1814	last_byte = &cmd[1].initiator_channel;
1815
1816	i = 0;
1817	while (byte < last_byte) {
1818		if (i == 0)
1819			printf("\t");
1820		printf("%#x", *byte++);
1821		i++;
1822		if (i == 8) {
1823			printf("\n");
1824			i = 0;
1825		} else {
1826			printf(", ");
1827		}
1828	}
1829}
1830#endif
1831