aic7xxx_osm.c revision 66760
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id$
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 66760 2000-10-06 19:34:40Z gibbs $
34 */
35
36#include <dev/aic7xxx/aic7xxx_freebsd.h>
37#include <dev/aic7xxx/aic7xxx_inline.h>
38
39#include <sys/eventhandler.h>
40
41#ifndef AHC_TMODE_ENABLE
42#define AHC_TMODE_ENABLE 0
43#endif
44
45#define ccb_scb_ptr spriv_ptr0
46#define ccb_ahc_ptr spriv_ptr1
47
48#ifdef AHC_DEBUG
49static int     ahc_debug = AHC_DEBUG;
50#endif
51
52static void ahc_freebsd_intr(void *arg);
53
54#if UNUSED
55static void	ahc_dump_targcmd(struct target_cmd *cmd);
56#endif
57static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
58static void	ahc_get_tran_settings(struct ahc_softc *ahc,
59				      int our_id, char channel,
60				      struct ccb_trans_settings *cts);
61static void	ahc_async(void *callback_arg, uint32_t code,
62			  struct cam_path *path, void *arg);
63static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
64				int nsegments, int error);
65static void	ahc_poll(struct cam_sim *sim);
66static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
67			       struct ccb_scsiio *csio, struct scb *scb);
68static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
69			      union ccb *ccb);
70static int	ahc_create_path(struct ahc_softc *ahc,
71				char channel, u_int target, u_int lun,
72				struct cam_path **path);
73
74static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
75
76static int
77ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
78	        u_int lun, struct cam_path **path)
79{
80	path_id_t path_id;
81
82	if (channel == 'B')
83		path_id = cam_sim_path(ahc->platform_data->sim_b);
84	else
85		path_id = cam_sim_path(ahc->platform_data->sim);
86
87	return (xpt_create_path(path, /*periph*/NULL,
88				path_id, target, lun));
89}
90
91/*
92 * Attach all the sub-devices we can find
93 */
94int
95ahc_attach(struct ahc_softc *ahc)
96{
97	char   ahc_info[256];
98	struct ccb_setasync csa;
99	struct cam_devq *devq;
100	int bus_id;
101	int bus_id2;
102	struct cam_sim *sim;
103	struct cam_sim *sim2;
104	struct cam_path *path;
105	struct cam_path *path2;
106	long s;
107	int count;
108	int error;
109
110	count = 0;
111	sim = NULL;
112	sim2 = NULL;
113
114	ahc_controller_info(ahc, ahc_info);
115	printf("%s\n", ahc_info);
116	ahc_lock(ahc, &s);
117	/* Hook up our interrupt handler */
118	if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
119				    INTR_TYPE_CAM, ahc_freebsd_intr, ahc,
120				    &ahc->platform_data->ih)) != 0) {
121		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
122			      error);
123		goto fail;
124	}
125
126	/*
127	 * Attach secondary channel first if the user has
128	 * declared it the primary channel.
129	 */
130	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
131		bus_id = 1;
132		bus_id2 = 0;
133	} else {
134		bus_id = 0;
135		bus_id2 = 1;
136	}
137
138	/*
139	 * Create the device queue for our SIM(s).
140	 */
141	devq = cam_simq_alloc(AHC_SCB_MAX - 1);
142	if (devq == NULL)
143		goto fail;
144
145	/*
146	 * Construct our first channel SIM entry
147	 */
148	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
149			    device_get_unit(ahc->dev_softc),
150			    1, AHC_SCB_MAX - 1, devq);
151	if (sim == NULL) {
152		cam_simq_free(devq);
153		goto fail;
154	}
155
156	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
157		cam_sim_free(sim, /*free_devq*/TRUE);
158		sim = NULL;
159		goto fail;
160	}
161
162	if (xpt_create_path(&path, /*periph*/NULL,
163			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
164			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
165		xpt_bus_deregister(cam_sim_path(sim));
166		cam_sim_free(sim, /*free_devq*/TRUE);
167		sim = NULL;
168		goto fail;
169	}
170
171	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
172	csa.ccb_h.func_code = XPT_SASYNC_CB;
173	csa.event_enable = AC_LOST_DEVICE;
174	csa.callback = ahc_async;
175	csa.callback_arg = sim;
176	xpt_action((union ccb *)&csa);
177	count++;
178
179	if (ahc->features & AHC_TWIN) {
180		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
181				    ahc, device_get_unit(ahc->dev_softc), 1,
182				    AHC_SCB_MAX - 1, devq);
183
184		if (sim2 == NULL) {
185			printf("ahc_attach: Unable to attach second "
186			       "bus due to resource shortage");
187			goto fail;
188		}
189
190		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
191			printf("ahc_attach: Unable to attach second "
192			       "bus due to resource shortage");
193			/*
194			 * We do not want to destroy the device queue
195			 * because the first bus is using it.
196			 */
197			cam_sim_free(sim2, /*free_devq*/FALSE);
198			goto fail;
199		}
200
201		if (xpt_create_path(&path2, /*periph*/NULL,
202				    cam_sim_path(sim2),
203				    CAM_TARGET_WILDCARD,
204				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
205			xpt_bus_deregister(cam_sim_path(sim2));
206			cam_sim_free(sim2, /*free_devq*/FALSE);
207			sim2 = NULL;
208			goto fail;
209		}
210		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
211		csa.ccb_h.func_code = XPT_SASYNC_CB;
212		csa.event_enable = AC_LOST_DEVICE;
213		csa.callback = ahc_async;
214		csa.callback_arg = sim2;
215		xpt_action((union ccb *)&csa);
216		count++;
217	}
218
219fail:
220	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
221		ahc->platform_data->sim_b = sim;
222		ahc->platform_data->path_b = path;
223		ahc->platform_data->sim = sim2;
224		ahc->platform_data->path = path2;
225	} else {
226		ahc->platform_data->sim = sim;
227		ahc->platform_data->path = path;
228		ahc->platform_data->sim_b = sim2;
229		ahc->platform_data->path_b = path2;
230	}
231	ahc_unlock(ahc, &s);
232
233	if (count != 0)
234		/* We have to wait until after any system dumps... */
235		EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
236				      ahc, SHUTDOWN_PRI_DEFAULT);
237
238	return (count);
239}
240
241/*
242 * Catch an interrupt from the adapter
243 */
244void
245ahc_freebsd_intr(void *arg)
246{
247	struct	ahc_softc *ahc;
248
249	ahc = (struct ahc_softc *)arg;
250	ahc_intr(ahc);
251}
252
253/*
254 * We have an scb which has been processed by the
255 * adaptor, now we look to see how the operation
256 * went.
257 */
258void
259ahc_done(struct ahc_softc *ahc, struct scb *scb)
260{
261	union ccb *ccb;
262
263	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
264		  ("ahc_done - scb %d\n", scb->hscb->tag));
265
266	ccb = scb->io_ctx;
267	LIST_REMOVE(scb, pending_links);
268	if (ccb->ccb_h.func_code == XPT_SCSI_IO
269	  && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
270	   || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
271	  && (ahc->features & AHC_SCB_BTT) == 0) {
272		struct scb_tailq *untagged_q;
273
274		untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
275		TAILQ_REMOVE(untagged_q, scb, links.tqe);
276		ahc_run_untagged_queue(ahc, untagged_q);
277	}
278
279	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
280
281	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
282		bus_dmasync_op_t op;
283
284		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
285			op = BUS_DMASYNC_POSTREAD;
286		else
287			op = BUS_DMASYNC_POSTWRITE;
288		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
289		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
290	}
291
292	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
293		if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG)
294			ccb->ccb_h.status |= CAM_REQ_CMP;
295		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
296		ahc_free_scb(ahc, scb);
297		xpt_done(ccb);
298		return;
299	}
300
301	/*
302	 * If the recovery SCB completes, we have to be
303	 * out of our timeout.
304	 */
305	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
306		struct	scb *list_scb;
307
308		/*
309		 * We were able to complete the command successfully,
310		 * so reinstate the timeouts for all other pending
311		 * commands.
312		 */
313		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
314			union ccb *ccb;
315
316			ccb = list_scb->io_ctx;
317			ccb->ccb_h.timeout_ch =
318			    timeout(ahc_timeout, list_scb,
319				    (ccb->ccb_h.timeout * hz)/1000);
320		}
321
322		/*
323		 * Ensure that we didn't put a second instance of this
324		 * SCB into the QINFIFO.
325		 */
326		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
327				   SCB_GET_CHANNEL(ahc, scb),
328				   SCB_GET_LUN(scb), scb->hscb->tag,
329				   ROLE_INITIATOR, /*status*/0,
330				   SEARCH_REMOVE);
331		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
332		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
333			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
334		ahc_print_path(ahc, scb);
335		printf("no longer in timeout, status = %x\n",
336		       ccb->ccb_h.status);
337	}
338
339	/* Don't clobber any existing error state */
340	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
341		ccb->ccb_h.status |= CAM_REQ_CMP;
342	} else if ((scb->flags & SCB_SENSE) != 0) {
343		/*
344		 * We performed autosense retrieval.
345		 *
346		 * Zero any sense not transferred by the
347		 * device.  The SCSI spec mandates that any
348		 * untransfered data should be assumed to be
349		 * zero.  Complete the 'bounce' of sense information
350		 * through buffers accessible via bus-space by
351		 * copying it into the clients csio.
352		 */
353		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
354		memcpy(&ccb->csio.sense_data,
355		       &ahc->scb_data->sense[scb->hscb->tag],
356		       (scb->sg_list->len & AHC_SG_LEN_MASK)
357		       - ccb->csio.sense_resid);
358		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
359	}
360	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
361	ahc_free_scb(ahc, scb);
362	xpt_done(ccb);
363}
364
365static void
366ahc_action(struct cam_sim *sim, union ccb *ccb)
367{
368	struct	ahc_softc *ahc;
369	struct	tmode_lstate *lstate;
370	u_int	target_id;
371	u_int	our_id;
372	long	s;
373
374	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
375
376	ahc = (struct ahc_softc *)cam_sim_softc(sim);
377
378	target_id = ccb->ccb_h.target_id;
379	our_id = SIM_SCSI_ID(ahc, sim);
380
381	switch (ccb->ccb_h.func_code) {
382	/* Common cases first */
383	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
384	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
385	{
386		struct	   tmode_tstate *tstate;
387		cam_status status;
388
389		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
390					     &lstate, TRUE);
391
392		if (status != CAM_REQ_CMP) {
393			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
394				/* Response from the black hole device */
395				tstate = NULL;
396				lstate = ahc->black_hole;
397			} else {
398				ccb->ccb_h.status = status;
399				xpt_done(ccb);
400				break;
401			}
402		}
403		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
404
405			ahc_lock(ahc, &s);
406			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
407					  sim_links.sle);
408			ccb->ccb_h.status = CAM_REQ_INPROG;
409			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
410				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
411			ahc_unlock(ahc, &s);
412			break;
413		}
414
415		/*
416		 * The target_id represents the target we attempt to
417		 * select.  In target mode, this is the initiator of
418		 * the original command.
419		 */
420		our_id = target_id;
421		target_id = ccb->csio.init_id;
422		/* FALLTHROUGH */
423	}
424	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
425	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
426	{
427		struct	scb *scb;
428		struct	hardware_scb *hscb;
429
430		/*
431		 * get an scb to use.
432		 */
433		ahc_lock(ahc, &s);
434		if ((scb = ahc_get_scb(ahc)) == NULL) {
435
436			ahc->flags |= AHC_RESOURCE_SHORTAGE;
437			ahc_unlock(ahc, &s);
438			xpt_freeze_simq(sim, /*count*/1);
439			ccb->ccb_h.status = CAM_REQUEUE_REQ;
440			xpt_done(ccb);
441			return;
442		}
443		ahc_unlock(ahc, &s);
444
445		hscb = scb->hscb;
446
447		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
448			  ("start scb(%p)\n", scb));
449		scb->io_ctx = ccb;
450		/*
451		 * So we can find the SCB when an abort is requested
452		 */
453		ccb->ccb_h.ccb_scb_ptr = scb;
454		ccb->ccb_h.ccb_ahc_ptr = ahc;
455
456		/*
457		 * Put all the arguments for the xfer in the scb
458		 */
459		hscb->control = 0;
460		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
461		hscb->lun = ccb->ccb_h.target_lun;
462		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
463			hscb->cdb_len = 0;
464			scb->flags |= SCB_DEVICE_RESET;
465			hscb->control |= MK_MESSAGE;
466			ahc_execute_scb(scb, NULL, 0, 0);
467		} else {
468			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
469				struct target_data *tdata;
470
471				tdata = &hscb->shared_data.tdata;
472				if (ahc->pending_device == lstate) {
473					scb->flags |= SCB_TARGET_IMMEDIATE;
474					ahc->pending_device = NULL;
475				}
476				hscb->control |= TARGET_SCB;
477				tdata->target_phases = IDENTIFY_SEEN;
478				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
479					tdata->target_phases |= SPHASE_PENDING;
480					tdata->scsi_status =
481					    ccb->csio.scsi_status;
482				}
483				tdata->initiator_tag = ccb->csio.tag_id;
484			}
485			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
486				hscb->control |= ccb->csio.tag_action;
487
488			ahc_setup_data(ahc, sim, &ccb->csio, scb);
489		}
490		break;
491	}
492	case XPT_NOTIFY_ACK:
493	case XPT_IMMED_NOTIFY:
494	{
495		struct	   tmode_tstate *tstate;
496		struct	   tmode_lstate *lstate;
497		cam_status status;
498
499		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
500					     &lstate, TRUE);
501
502		if (status != CAM_REQ_CMP) {
503			ccb->ccb_h.status = status;
504			xpt_done(ccb);
505			break;
506		}
507		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
508				  sim_links.sle);
509		ccb->ccb_h.status = CAM_REQ_INPROG;
510		ahc_send_lstate_events(ahc, lstate);
511		break;
512	}
513	case XPT_EN_LUN:		/* Enable LUN as a target */
514		ahc_handle_en_lun(ahc, sim, ccb);
515		xpt_done(ccb);
516		break;
517	case XPT_ABORT:			/* Abort the specified CCB */
518	{
519		ahc_abort_ccb(ahc, sim, ccb);
520		break;
521	}
522	case XPT_SET_TRAN_SETTINGS:
523	{
524#ifdef AHC_NEW_TRAN_SETTINGS
525		struct	ahc_devinfo devinfo;
526		struct	ccb_trans_settings *cts;
527		struct	ccb_trans_settings_scsi *scsi;
528		struct	ccb_trans_settings_spi *spi;
529		struct	ahc_initiator_tinfo *tinfo;
530		struct	tmode_tstate *tstate;
531		uint16_t *discenable;
532		uint16_t *tagenable;
533		u_int	update_type;
534
535		cts = &ccb->cts;
536		scsi = &cts->proto_specific.scsi;
537		spi = &cts->xport_specific.spi;
538		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
539				    cts->ccb_h.target_id,
540				    cts->ccb_h.target_lun,
541				    SIM_CHANNEL(ahc, sim),
542				    ROLE_UNKNOWN);
543		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
544					    devinfo.our_scsiid,
545					    devinfo.target, &tstate);
546		update_type = 0;
547		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
548			update_type |= AHC_TRANS_GOAL;
549			discenable = &tstate->discenable;
550			tagenable = &tstate->tagenable;
551			tinfo->current.protocol_version =
552			    cts->protocol_version;
553			tinfo->current.transport_version =
554			    cts->transport_version;
555			tinfo->goal.protocol_version =
556			    cts->protocol_version;
557			tinfo->goal.transport_version =
558			    cts->transport_version;
559		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
560			update_type |= AHC_TRANS_USER;
561			discenable = &ahc->user_discenable;
562			tagenable = &ahc->user_tagenable;
563			tinfo->user.protocol_version =
564			    cts->protocol_version;
565			tinfo->user.transport_version =
566			    cts->transport_version;
567		} else {
568			ccb->ccb_h.status = CAM_REQ_INVALID;
569			xpt_done(ccb);
570			break;
571		}
572
573		ahc_lock(ahc, &s);
574
575		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
576			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
577				*discenable |= devinfo.target_mask;
578			else
579				*discenable &= ~devinfo.target_mask;
580		}
581
582		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
583			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
584				*tagenable |= devinfo.target_mask;
585			else
586				*tagenable &= ~devinfo.target_mask;
587		}
588
589		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
590			ahc_validate_width(ahc, &spi->bus_width);
591			ahc_set_width(ahc, &devinfo, spi->bus_width,
592				      update_type, /*paused*/FALSE);
593		}
594
595		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
596			if (update_type == AHC_TRANS_USER)
597				spi->ppr_options = tinfo->user.ppr_options;
598			else
599				spi->ppr_options = tinfo->goal.ppr_options;
600		}
601
602		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
603			if (update_type == AHC_TRANS_USER)
604				spi->sync_offset = tinfo->user.offset;
605			else
606				spi->sync_offset = tinfo->goal.offset;
607		}
608
609		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
610			if (update_type == AHC_TRANS_USER)
611				spi->sync_period = tinfo->user.period;
612			else
613				spi->sync_period = tinfo->goal.period;
614		}
615
616		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
617		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
618			struct ahc_syncrate *syncrate;
619			u_int maxsync;
620
621			if ((ahc->features & AHC_ULTRA2) != 0)
622				maxsync = AHC_SYNCRATE_DT;
623			else if ((ahc->features & AHC_ULTRA) != 0)
624				maxsync = AHC_SYNCRATE_ULTRA;
625			else
626				maxsync = AHC_SYNCRATE_FAST;
627
628			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
629						     &spi->ppr_options,
630						     maxsync);
631			ahc_validate_offset(ahc, syncrate, &spi->sync_offset,
632					    spi->bus_width);
633
634			/* We use a period of 0 to represent async */
635			if (spi->sync_offset == 0) {
636				spi->sync_period = 0;
637				spi->ppr_options = 0;
638			}
639
640			ahc_set_syncrate(ahc, &devinfo, syncrate,
641					 spi->sync_period, spi->sync_offset,
642					 spi->ppr_options, update_type,
643					 /*paused*/FALSE);
644		}
645		ahc_unlock(ahc, &s);
646		ccb->ccb_h.status = CAM_REQ_CMP;
647		xpt_done(ccb);
648#else
649		struct	  ahc_devinfo devinfo;
650		struct	  ccb_trans_settings *cts;
651		struct	  ahc_initiator_tinfo *tinfo;
652		struct	  tmode_tstate *tstate;
653		uint16_t *discenable;
654		uint16_t *tagenable;
655		u_int	  update_type;
656		long	  s;
657
658		cts = &ccb->cts;
659		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
660				    cts->ccb_h.target_id,
661				    cts->ccb_h.target_lun,
662				    SIM_CHANNEL(ahc, sim),
663				    ROLE_UNKNOWN);
664		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
665					    devinfo.our_scsiid,
666					    devinfo.target, &tstate);
667		update_type = 0;
668		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
669			update_type |= AHC_TRANS_GOAL;
670			discenable = &tstate->discenable;
671			tagenable = &tstate->tagenable;
672		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
673			update_type |= AHC_TRANS_USER;
674			discenable = &ahc->user_discenable;
675			tagenable = &ahc->user_tagenable;
676		} else {
677			ccb->ccb_h.status = CAM_REQ_INVALID;
678			xpt_done(ccb);
679			break;
680		}
681
682		ahc_lock(ahc, &s);
683
684		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
685			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
686				*discenable |= devinfo.target_mask;
687			else
688				*discenable &= ~devinfo.target_mask;
689		}
690
691		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
692			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
693				*tagenable |= devinfo.target_mask;
694			else
695				*tagenable &= ~devinfo.target_mask;
696		}
697
698		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
699			ahc_validate_width(ahc, &cts->bus_width);
700			ahc_set_width(ahc, &devinfo, cts->bus_width,
701				      update_type, /*paused*/FALSE);
702		}
703
704		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
705			if (update_type == AHC_TRANS_USER)
706				cts->sync_offset = tinfo->user.offset;
707			else
708				cts->sync_offset = tinfo->goal.offset;
709		}
710
711		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
712			if (update_type == AHC_TRANS_USER)
713				cts->sync_period = tinfo->user.period;
714			else
715				cts->sync_period = tinfo->goal.period;
716		}
717
718		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
719		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
720			struct ahc_syncrate *syncrate;
721			u_int ppr_options;
722			u_int maxsync;
723
724			if ((ahc->features & AHC_ULTRA2) != 0)
725				maxsync = AHC_SYNCRATE_DT;
726			else if ((ahc->features & AHC_ULTRA) != 0)
727				maxsync = AHC_SYNCRATE_ULTRA;
728			else
729				maxsync = AHC_SYNCRATE_FAST;
730
731			ppr_options = 0;
732			if (cts->sync_period <= 9)
733				ppr_options = MSG_EXT_PPR_DT_REQ;
734
735			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
736						     &ppr_options,
737						     maxsync);
738			ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
739					    MSG_EXT_WDTR_BUS_8_BIT);
740
741			/* We use a period of 0 to represent async */
742			if (cts->sync_offset == 0) {
743				cts->sync_period = 0;
744				ppr_options = 0;
745			}
746
747			if (ppr_options == MSG_EXT_PPR_DT_REQ
748			 && tinfo->user.transport_version >= 3) {
749				tinfo->goal.transport_version =
750				    tinfo->user.transport_version;
751				tinfo->current.transport_version =
752				    tinfo->user.transport_version;
753			}
754
755			ahc_set_syncrate(ahc, &devinfo, syncrate,
756					 cts->sync_period, cts->sync_offset,
757					 ppr_options, update_type,
758					 /*paused*/FALSE);
759		}
760		ahc_unlock(ahc, &s);
761		ccb->ccb_h.status = CAM_REQ_CMP;
762		xpt_done(ccb);
763#endif
764		break;
765	}
766	case XPT_GET_TRAN_SETTINGS:
767	/* Get default/user set transfer settings for the target */
768	{
769
770		ahc_lock(ahc, &s);
771		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
772				      SIM_CHANNEL(ahc, sim), &ccb->cts);
773		ahc_unlock(ahc, &s);
774		xpt_done(ccb);
775		break;
776	}
777	case XPT_CALC_GEOMETRY:
778	{
779		struct	  ccb_calc_geometry *ccg;
780		uint32_t size_mb;
781		uint32_t secs_per_cylinder;
782		int	  extended;
783
784		ccg = &ccb->ccg;
785		size_mb = ccg->volume_size
786			/ ((1024L * 1024L) / ccg->block_size);
787		extended = SIM_IS_SCSIBUS_B(ahc, sim)
788			? ahc->flags & AHC_EXTENDED_TRANS_B
789			: ahc->flags & AHC_EXTENDED_TRANS_A;
790
791		if (size_mb > 1024 && extended) {
792			ccg->heads = 255;
793			ccg->secs_per_track = 63;
794		} else {
795			ccg->heads = 64;
796			ccg->secs_per_track = 32;
797		}
798		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
799		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
800		ccb->ccb_h.status = CAM_REQ_CMP;
801		xpt_done(ccb);
802		break;
803	}
804	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
805	{
806		int  found;
807
808		ahc_lock(ahc, &s);
809		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
810					  /*initiate reset*/TRUE);
811		ahc_unlock(ahc, &s);
812		if (bootverbose) {
813			xpt_print_path(SIM_PATH(ahc, sim));
814			printf("SCSI bus reset delivered. "
815			       "%d SCBs aborted.\n", found);
816		}
817		ccb->ccb_h.status = CAM_REQ_CMP;
818		xpt_done(ccb);
819		break;
820	}
821	case XPT_TERM_IO:		/* Terminate the I/O process */
822		/* XXX Implement */
823		ccb->ccb_h.status = CAM_REQ_INVALID;
824		xpt_done(ccb);
825		break;
826	case XPT_PATH_INQ:		/* Path routing inquiry */
827	{
828		struct ccb_pathinq *cpi = &ccb->cpi;
829
830		cpi->version_num = 1; /* XXX??? */
831		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
832		if ((ahc->features & AHC_WIDE) != 0)
833			cpi->hba_inquiry |= PI_WIDE_16;
834		if ((ahc->flags & AHC_TARGETMODE) != 0) {
835			cpi->target_sprt = PIT_PROCESSOR
836					 | PIT_DISCONNECT
837					 | PIT_TERM_IO;
838		} else {
839			cpi->target_sprt = 0;
840		}
841		cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
842			      ? 0 : PIM_NOINITIATOR;
843		cpi->hba_eng_cnt = 0;
844		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
845		cpi->max_lun = 64;
846		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
847			cpi->initiator_id = ahc->our_id_b;
848			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
849				cpi->hba_misc |= PIM_NOBUSRESET;
850		} else {
851			cpi->initiator_id = ahc->our_id;
852			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
853				cpi->hba_misc |= PIM_NOBUSRESET;
854		}
855		cpi->bus_id = cam_sim_bus(sim);
856		cpi->base_transfer_speed = 3300;
857		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
858		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
859		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
860		cpi->unit_number = cam_sim_unit(sim);
861#ifdef AHC_NEW_TRAN_SETTINGS
862		cpi->protocol = PROTO_SCSI;
863		cpi->protocol_version = SCSI_REV_2;
864		cpi->transport = XPORT_SPI;
865		cpi->transport_version = 2;
866		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
867		if ((ahc->features & AHC_DT) != 0) {
868			cpi->transport_version = 3;
869			cpi->xport_specific.spi.ppr_options =
870			    SID_SPI_CLOCK_DT_ST;
871		}
872#endif
873		cpi->ccb_h.status = CAM_REQ_CMP;
874		xpt_done(ccb);
875		break;
876	}
877	default:
878		ccb->ccb_h.status = CAM_REQ_INVALID;
879		xpt_done(ccb);
880		break;
881	}
882}
883
884static void
885ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
886		      struct ccb_trans_settings *cts)
887{
888#ifdef AHC_NEW_TRAN_SETTINGS
889	struct	ahc_devinfo devinfo;
890	struct	ccb_trans_settings_scsi *scsi;
891	struct	ccb_trans_settings_spi *spi;
892	struct	ahc_initiator_tinfo *targ_info;
893	struct	tmode_tstate *tstate;
894	struct	ahc_transinfo *tinfo;
895
896	scsi = &cts->proto_specific.scsi;
897	spi = &cts->xport_specific.spi;
898	ahc_compile_devinfo(&devinfo, our_id,
899			    cts->ccb_h.target_id,
900			    cts->ccb_h.target_lun,
901			    channel, ROLE_UNKNOWN);
902	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
903					devinfo.our_scsiid,
904					devinfo.target, &tstate);
905
906	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
907		tinfo = &targ_info->current;
908	else
909		tinfo = &targ_info->user;
910
911	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
912	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
913	if (cts->type == CTS_TYPE_USER_SETTINGS) {
914		if ((ahc->user_discenable & devinfo.target_mask) != 0)
915			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
916
917		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
918			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
919	} else {
920		if ((tstate->discenable & devinfo.target_mask) != 0)
921			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
922
923		if ((tstate->tagenable & devinfo.target_mask) != 0)
924			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
925	}
926	cts->protocol_version = tinfo->protocol_version;
927	cts->transport_version = tinfo->transport_version;
928
929	spi->sync_period = tinfo->period;
930	spi->sync_offset = tinfo->offset;
931	spi->bus_width = tinfo->width;
932	spi->ppr_options = tinfo->ppr_options;
933
934	cts->protocol = PROTO_SCSI;
935	cts->transport = XPORT_SPI;
936	spi->valid = CTS_SPI_VALID_SYNC_RATE
937		   | CTS_SPI_VALID_SYNC_OFFSET
938		   | CTS_SPI_VALID_BUS_WIDTH
939		   | CTS_SPI_VALID_PPR_OPTIONS;
940
941	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
942		scsi->valid = CTS_SCSI_VALID_TQ;
943		spi->valid |= CTS_SPI_VALID_DISC;
944	} else {
945		scsi->valid = 0;
946	}
947
948	cts->ccb_h.status = CAM_REQ_CMP;
949#else
950	struct	ahc_devinfo devinfo;
951	struct	ahc_initiator_tinfo *targ_info;
952	struct	tmode_tstate *tstate;
953	struct	ahc_transinfo *tinfo;
954	long	s;
955
956	ahc_compile_devinfo(&devinfo, our_id,
957			    cts->ccb_h.target_id,
958			    cts->ccb_h.target_lun,
959			    channel, ROLE_UNKNOWN);
960	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
961					devinfo.our_scsiid,
962					devinfo.target, &tstate);
963
964	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
965		tinfo = &targ_info->current;
966	else
967		tinfo = &targ_info->user;
968
969	ahc_lock(ahc, &s);
970
971	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
972	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
973		if ((ahc->user_discenable & devinfo.target_mask) != 0)
974			cts->flags |= CCB_TRANS_DISC_ENB;
975
976		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
977			cts->flags |= CCB_TRANS_TAG_ENB;
978	} else {
979		if ((tstate->discenable & devinfo.target_mask) != 0)
980			cts->flags |= CCB_TRANS_DISC_ENB;
981
982		if ((tstate->tagenable & devinfo.target_mask) != 0)
983			cts->flags |= CCB_TRANS_TAG_ENB;
984	}
985	cts->sync_period = tinfo->period;
986	cts->sync_offset = tinfo->offset;
987	cts->bus_width = tinfo->width;
988
989	ahc_unlock(ahc, &s);
990
991	cts->valid = CCB_TRANS_SYNC_RATE_VALID
992		   | CCB_TRANS_SYNC_OFFSET_VALID
993		   | CCB_TRANS_BUS_WIDTH_VALID;
994
995	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
996		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
997
998	cts->ccb_h.status = CAM_REQ_CMP;
999#endif
1000}
1001
1002static void
1003ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
1004{
1005	struct ahc_softc *ahc;
1006	struct cam_sim *sim;
1007
1008	sim = (struct cam_sim *)callback_arg;
1009	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1010	switch (code) {
1011	case AC_LOST_DEVICE:
1012	{
1013		struct	ahc_devinfo devinfo;
1014		long	s;
1015
1016		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
1017				    xpt_path_target_id(path),
1018				    xpt_path_lun_id(path),
1019				    SIM_CHANNEL(ahc, sim),
1020				    ROLE_UNKNOWN);
1021
1022		/*
1023		 * Revert to async/narrow transfers
1024		 * for the next device.
1025		 */
1026		ahc_lock(ahc, &s);
1027		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1028			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
1029		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
1030				 /*period*/0, /*offset*/0, /*ppr_options*/0,
1031				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
1032				 /*paused*/FALSE);
1033		ahc_unlock(ahc, &s);
1034		break;
1035	}
1036	default:
1037		break;
1038	}
1039}
1040
1041static void
1042ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1043		int error)
1044{
1045	struct	scb *scb;
1046	union	ccb *ccb;
1047	struct	ahc_softc *ahc;
1048	struct	ahc_initiator_tinfo *tinfo;
1049	struct	tmode_tstate *tstate;
1050	u_int	mask;
1051	long	s;
1052
1053	scb = (struct scb *)arg;
1054	ccb = scb->io_ctx;
1055	ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
1056
1057	if (error != 0) {
1058		if (error == EFBIG)
1059			ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1060		else
1061			ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1062		if (nsegments != 0)
1063			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1064		ahc_lock(ahc, &s);
1065		ahc_free_scb(ahc, scb);
1066		ahc_unlock(ahc, &s);
1067		xpt_done(ccb);
1068		return;
1069	}
1070	if (nsegments != 0) {
1071		struct	  ahc_dma_seg *sg;
1072		bus_dma_segment_t *end_seg;
1073		bus_dmasync_op_t op;
1074
1075		end_seg = dm_segs + nsegments;
1076
1077		/* Copy the segments into our SG list */
1078		sg = scb->sg_list;
1079		while (dm_segs < end_seg) {
1080			sg->addr = dm_segs->ds_addr;
1081/* XXX Add in the 5th byte of the address later. */
1082			sg->len = dm_segs->ds_len;
1083			sg++;
1084			dm_segs++;
1085		}
1086
1087		/*
1088		 * Note where to find the SG entries in bus space.
1089		 * We also set the full residual flag which the
1090		 * sequencer will clear as soon as a data transfer
1091		 * occurs.
1092		 */
1093		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
1094
1095		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1096			op = BUS_DMASYNC_PREREAD;
1097		else
1098			op = BUS_DMASYNC_PREWRITE;
1099
1100		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1101
1102		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1103			struct target_data *tdata;
1104
1105			tdata = &scb->hscb->shared_data.tdata;
1106			tdata->target_phases |= DPHASE_PENDING;
1107			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1108				tdata->data_phase = P_DATAOUT;
1109			else
1110				tdata->data_phase = P_DATAIN;
1111
1112			/*
1113			 * If the transfer is of an odd length and in the
1114			 * "in" direction (scsi->HostBus), then it may
1115			 * trigger a bug in the 'WideODD' feature of
1116			 * non-Ultra2 chips.  Force the total data-length
1117			 * to be even by adding an extra, 1 byte, SG,
1118			 * element.  We do this even if we are not currently
1119			 * negotiated wide as negotiation could occur before
1120			 * this command is executed.
1121			 */
1122			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1123			 && (ccb->csio.dxfer_len & 0x1) != 0
1124			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1125
1126				nsegments++;
1127				if (nsegments > AHC_NSEG) {
1128
1129					ahc_set_transaction_status(scb,
1130					    CAM_REQ_TOO_BIG);
1131					bus_dmamap_unload(ahc->buffer_dmat,
1132							  scb->dmamap);
1133					ahc_lock(ahc, &s);
1134					ahc_free_scb(ahc, scb);
1135					ahc_unlock(ahc, &s);
1136					xpt_done(ccb);
1137					return;
1138				}
1139				sg->addr = ahc->dma_bug_buf;
1140				sg->len = 1;
1141				sg++;
1142			}
1143		}
1144		sg--;
1145		sg->len |= AHC_DMA_LAST_SEG;
1146
1147		/* Copy the first SG into the "current" data pointer area */
1148		scb->hscb->dataptr = scb->sg_list->addr;
1149		scb->hscb->datacnt = scb->sg_list->len;
1150	} else {
1151		scb->hscb->sgptr = SG_LIST_NULL;
1152		scb->hscb->dataptr = 0;
1153		scb->hscb->datacnt = 0;
1154	}
1155
1156	scb->sg_count = nsegments;
1157
1158	ahc_lock(ahc, &s);
1159
1160	/*
1161	 * Last time we need to check if this SCB needs to
1162	 * be aborted.
1163	 */
1164	if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) {
1165		if (nsegments != 0)
1166			bus_dmamap_unload(ahc->buffer_dmat,
1167					  scb->dmamap);
1168		ahc_free_scb(ahc, scb);
1169		ahc_unlock(ahc, &s);
1170		xpt_done(ccb);
1171		return;
1172	}
1173
1174	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1175				    SCSIID_OUR_ID(scb->hscb->scsiid),
1176				    ccb->ccb_h.target_id, &tstate);
1177
1178	mask = SCB_GET_TARGET_MASK(ahc, scb);
1179	scb->hscb->scsirate = tinfo->scsirate;
1180	scb->hscb->scsioffset = tinfo->current.offset;
1181	if ((tstate->ultraenb & mask) != 0)
1182		scb->hscb->control |= ULTRAENB;
1183
1184	if ((tstate->discenable & mask) != 0
1185	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1186		scb->hscb->control |= DISCENB;
1187
1188	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1189	 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
1190		scb->flags |= SCB_NEGOTIATE;
1191		scb->hscb->control |= MK_MESSAGE;
1192	}
1193
1194	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1195
1196	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1197
1198	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1199		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1200			ccb->ccb_h.timeout = 5 * 1000;
1201		ccb->ccb_h.timeout_ch =
1202		    timeout(ahc_timeout, (caddr_t)scb,
1203			    (ccb->ccb_h.timeout * hz) / 1000);
1204	}
1205
1206	/*
1207	 * We only allow one untagged transaction
1208	 * per target in the initiator role unless
1209	 * we are storing a full busy target *lun*
1210	 * table in SCB space.
1211	 */
1212	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1213	 && (ahc->features & AHC_SCB_BTT) == 0) {
1214		struct scb_tailq *untagged_q;
1215
1216		untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
1217		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1218		if (TAILQ_FIRST(untagged_q) != scb) {
1219			ahc_unlock(ahc, &s);
1220			return;
1221		}
1222	}
1223	scb->flags |= SCB_ACTIVE;
1224
1225	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1226		pause_sequencer(ahc);
1227		if ((ahc->flags & AHC_PAGESCBS) == 0)
1228			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1229		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
1230		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
1231		unpause_sequencer(ahc);
1232	} else {
1233		ahc_queue_scb(ahc, scb);
1234	}
1235
1236	ahc_unlock(ahc, &s);
1237}
1238
1239static void
1240ahc_poll(struct cam_sim *sim)
1241{
1242	ahc_intr(cam_sim_softc(sim));
1243}
1244
1245static void
1246ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1247	       struct ccb_scsiio *csio, struct scb *scb)
1248{
1249	struct hardware_scb *hscb;
1250	struct ccb_hdr *ccb_h;
1251
1252	hscb = scb->hscb;
1253	ccb_h = &csio->ccb_h;
1254
1255	if (ccb_h->func_code == XPT_SCSI_IO) {
1256		hscb->cdb_len = csio->cdb_len;
1257		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1258
1259			if (hscb->cdb_len > sizeof(hscb->cdb32)
1260			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1261				u_long s;
1262
1263				ahc_set_transaction_status(scb,
1264							   CAM_REQ_INVALID);
1265				ahc_lock(ahc, &s);
1266				ahc_free_scb(ahc, scb);
1267				ahc_unlock(ahc, &s);
1268				xpt_done((union ccb *)csio);
1269				return;
1270			}
1271			if (hscb->cdb_len > 12) {
1272				memcpy(hscb->cdb32,
1273				       csio->cdb_io.cdb_ptr,
1274				       hscb->cdb_len);
1275				hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1276			} else {
1277				memcpy(hscb->shared_data.cdb,
1278				       csio->cdb_io.cdb_ptr,
1279				       hscb->cdb_len);
1280			}
1281		} else {
1282			if (hscb->cdb_len > 12) {
1283				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1284				       hscb->cdb_len);
1285				hscb->shared_data.cdb_ptr = scb->cdb32_busaddr;
1286			} else {
1287				memcpy(hscb->shared_data.cdb,
1288				       csio->cdb_io.cdb_bytes,
1289				       hscb->cdb_len);
1290			}
1291		}
1292	}
1293
1294	/* Only use S/G if there is a transfer */
1295	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1296		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1297			/* We've been given a pointer to a single buffer */
1298			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1299				int s;
1300				int error;
1301
1302				s = splsoftvm();
1303				error = bus_dmamap_load(ahc->buffer_dmat,
1304							scb->dmamap,
1305							csio->data_ptr,
1306							csio->dxfer_len,
1307							ahc_execute_scb,
1308							scb, /*flags*/0);
1309				if (error == EINPROGRESS) {
1310					/*
1311					 * So as to maintain ordering,
1312					 * freeze the controller queue
1313					 * until our mapping is
1314					 * returned.
1315					 */
1316					xpt_freeze_simq(sim,
1317							/*count*/1);
1318					scb->io_ctx->ccb_h.status |=
1319					    CAM_RELEASE_SIMQ;
1320				}
1321				splx(s);
1322			} else {
1323				struct bus_dma_segment seg;
1324
1325				/* Pointer to physical buffer */
1326				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1327					panic("ahc_setup_data - Transfer size "
1328					      "larger than can device max");
1329
1330				seg.ds_addr = (bus_addr_t)csio->data_ptr;
1331				seg.ds_len = csio->dxfer_len;
1332				ahc_execute_scb(scb, &seg, 1, 0);
1333			}
1334		} else {
1335			struct bus_dma_segment *segs;
1336
1337			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1338				panic("ahc_setup_data - Physical segment "
1339				      "pointers unsupported");
1340
1341			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1342				panic("ahc_setup_data - Virtual segment "
1343				      "addresses unsupported");
1344
1345			/* Just use the segments provided */
1346			segs = (struct bus_dma_segment *)csio->data_ptr;
1347			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1348		}
1349	} else {
1350		ahc_execute_scb(scb, NULL, 0, 0);
1351	}
1352}
1353
1354static void
1355ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
1356
1357	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1358		struct scb *list_scb;
1359
1360		scb->flags |= SCB_RECOVERY_SCB;
1361
1362		/*
1363		 * Take all queued, but not sent SCBs out of the equation.
1364		 * Also ensure that no new CCBs are queued to us while we
1365		 * try to fix this problem.
1366		 */
1367		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1368			xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1);
1369			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1370		}
1371
1372		/*
1373		 * Go through all of our pending SCBs and remove
1374		 * any scheduled timeouts for them.  We will reschedule
1375		 * them after we've successfully fixed this problem.
1376		 */
1377		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
1378			union ccb *ccb;
1379
1380			ccb = list_scb->io_ctx;
1381			untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch);
1382		}
1383	}
1384}
1385
1386void
1387ahc_timeout(void *arg)
1388{
1389	struct	scb *scb;
1390	struct	ahc_softc *ahc;
1391	long	s;
1392	int	found;
1393	u_int	last_phase;
1394	int	target;
1395	int	lun;
1396	int	i;
1397	char	channel;
1398
1399	scb = (struct scb *)arg;
1400	ahc = (struct ahc_softc *)scb->io_ctx->ccb_h.ccb_ahc_ptr;
1401
1402	ahc_lock(ahc, &s);
1403
1404	/*
1405	 * Ensure that the card doesn't do anything
1406	 * behind our back.  Also make sure that we
1407	 * didn't "just" miss an interrupt that would
1408	 * affect this timeout.
1409	 */
1410	do {
1411		ahc_intr(ahc);
1412		pause_sequencer(ahc);
1413	} while (ahc_inb(ahc, INTSTAT) & INT_PEND);
1414
1415	ahc_print_path(ahc, scb);
1416	if ((scb->flags & SCB_ACTIVE) == 0) {
1417		/* Previous timeout took care of me already */
1418		printf("Timedout SCB %d handled by another timeout\n",
1419		       scb->hscb->tag);
1420		unpause_sequencer(ahc);
1421		ahc_unlock(ahc, &s);
1422		return;
1423	}
1424
1425	target = SCB_GET_TARGET(ahc, scb);
1426	channel = SCB_GET_CHANNEL(ahc, scb);
1427	lun = SCB_GET_LUN(scb);
1428
1429	printf("SCB 0x%x - timed out ", scb->hscb->tag);
1430	/*
1431	 * Take a snapshot of the bus state and print out
1432	 * some information so we can track down driver bugs.
1433	 */
1434	last_phase = ahc_inb(ahc, LASTPHASE);
1435
1436	for (i = 0; i < num_phases; i++) {
1437		if (last_phase == phase_table[i].phase)
1438			break;
1439	}
1440	printf("%s", phase_table[i].phasemsg);
1441
1442	printf(", SEQADDR == 0x%x\n",
1443	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
1444
1445	if (scb->sg_count > 0) {
1446		for (i = 0; i < scb->sg_count; i++) {
1447			printf("sg[%d] - Addr 0x%x : Length %d\n",
1448			       i,
1449			       scb->sg_list[i].addr,
1450			       scb->sg_list[i].len);
1451		}
1452	}
1453	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1454		/*
1455		 * Been down this road before.
1456		 * Do a full bus reset.
1457		 */
1458bus_reset:
1459		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1460		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
1461		printf("%s: Issued Channel %c Bus Reset. "
1462		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
1463	} else {
1464		/*
1465		 * If we are a target, transition to bus free and report
1466		 * the timeout.
1467		 *
1468		 * The target/initiator that is holding up the bus may not
1469		 * be the same as the one that triggered this timeout
1470		 * (different commands have different timeout lengths).
1471		 * If the bus is idle and we are actiing as the initiator
1472		 * for this request, queue a BDR message to the timed out
1473		 * target.  Otherwise, if the timed out transaction is
1474		 * active:
1475		 *   Initiator transaction:
1476		 *	Stuff the message buffer with a BDR message and assert
1477		 *	ATN in the hopes that the target will let go of the bus
1478		 *	and go to the mesgout phase.  If this fails, we'll
1479		 *	get another timeout 2 seconds later which will attempt
1480		 *	a bus reset.
1481		 *
1482		 *   Target transaction:
1483		 *	Transition to BUS FREE and report the error.
1484		 *	It's good to be the target!
1485		 */
1486		u_int active_scb_index;
1487
1488		active_scb_index = ahc_inb(ahc, SCB_TAG);
1489
1490		if (last_phase != P_BUSFREE
1491		  && (active_scb_index < ahc->scb_data->numscbs)) {
1492			struct scb *active_scb;
1493
1494			/*
1495			 * If the active SCB is not from our device,
1496			 * assume that another device is hogging the bus
1497			 * and wait for it's timeout to expire before
1498			 * taking additional action.
1499			 */
1500			active_scb = &ahc->scb_data->scbarray[active_scb_index];
1501			if (active_scb->hscb->scsiid != scb->hscb->scsiid
1502			 || active_scb->hscb->lun != scb->hscb->lun) {
1503				struct	ccb_hdr *ccbh;
1504				u_int	newtimeout;
1505
1506				ahc_print_path(ahc, scb);
1507				printf("Other SCB Timeout%s",
1508			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1509				       ? " again\n" : "\n");
1510				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1511				newtimeout =
1512				    MAX(active_scb->io_ctx->ccb_h.timeout,
1513					scb->io_ctx->ccb_h.timeout);
1514				ccbh = &scb->io_ctx->ccb_h;
1515				scb->io_ctx->ccb_h.timeout_ch =
1516				    timeout(ahc_timeout, scb,
1517					    (newtimeout * hz) / 1000);
1518				ahc_unlock(ahc, &s);
1519				return;
1520			}
1521
1522			/* It's us */
1523			if ((scb->hscb->control & TARGET_SCB) != 0) {
1524
1525				/*
1526				 * Send back any queued up transactions
1527				 * and properly record the error condition.
1528				 */
1529				ahc_freeze_devq(ahc, scb);
1530				ahc_set_transaction_status(scb,
1531							   CAM_CMD_TIMEOUT);
1532				ahc_freeze_scb(scb);
1533				ahc_done(ahc, scb);
1534
1535				/* Will clear us from the bus */
1536				restart_sequencer(ahc);
1537				ahc_unlock(ahc, &s);
1538				return;
1539			}
1540
1541			ahc_set_recoveryscb(ahc, active_scb);
1542			ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET);
1543			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
1544			ahc_print_path(ahc, active_scb);
1545			printf("BDR message in message buffer\n");
1546			active_scb->flags |=  SCB_DEVICE_RESET;
1547			active_scb->io_ctx->ccb_h.timeout_ch =
1548			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
1549			unpause_sequencer(ahc);
1550		} else {
1551			int	 disconnected;
1552
1553			/* XXX Shouldn't panic.  Just punt instead */
1554			if ((scb->hscb->control & TARGET_SCB) != 0)
1555				panic("Timed-out target SCB but bus idle");
1556
1557			if (last_phase != P_BUSFREE
1558			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
1559				/* XXX What happened to the SCB? */
1560				/* Hung target selection.  Goto busfree */
1561				printf("%s: Hung target selection\n",
1562				       ahc_name(ahc));
1563				restart_sequencer(ahc);
1564				ahc_unlock(ahc, &s);
1565				return;
1566			}
1567
1568			if (ahc_search_qinfifo(ahc, target, channel, lun,
1569					       scb->hscb->tag, ROLE_INITIATOR,
1570					       /*status*/0, SEARCH_COUNT) > 0) {
1571				disconnected = FALSE;
1572			} else {
1573				disconnected = TRUE;
1574			}
1575
1576			if (disconnected) {
1577				u_int active_scb;
1578
1579				ahc_set_recoveryscb(ahc, scb);
1580				/*
1581				 * Simply set the MK_MESSAGE control bit.
1582				 */
1583				scb->hscb->control |= MK_MESSAGE;
1584				scb->flags |= SCB_QUEUED_MSG
1585					   |  SCB_DEVICE_RESET;
1586
1587				/*
1588				 * Mark the cached copy of this SCB in the
1589				 * disconnected list too, so that a reconnect
1590				 * at this point causes a BDR or abort.
1591				 */
1592				active_scb = ahc_inb(ahc, SCBPTR);
1593				if (ahc_search_disc_list(ahc, target,
1594							 channel, lun,
1595							 scb->hscb->tag,
1596							 /*stop_on_first*/TRUE,
1597							 /*remove*/FALSE,
1598							 /*save_state*/FALSE)) {
1599					u_int scb_control;
1600
1601					scb_control = ahc_inb(ahc, SCB_CONTROL);
1602					scb_control |= MK_MESSAGE;
1603					ahc_outb(ahc, SCB_CONTROL, scb_control);
1604				}
1605				ahc_outb(ahc, SCBPTR, active_scb);
1606
1607				/*
1608				 * Actually re-queue this SCB in case we can
1609				 * select the device before it reconnects.
1610				 * Clear out any entries in the QINFIFO first
1611				 * so we are the next SCB for this target
1612				 * to run.
1613				 */
1614				ahc_search_qinfifo(ahc,
1615						   SCB_GET_TARGET(ahc, scb),
1616						   channel, SCB_GET_LUN(scb),
1617						   SCB_LIST_NULL,
1618						   ROLE_INITIATOR,
1619						   CAM_REQUEUE_REQ,
1620						   SEARCH_COMPLETE);
1621				ahc_print_path(ahc, scb);
1622				printf("Queuing a BDR SCB\n");
1623				ahc->qinfifo[ahc->qinfifonext++] =
1624				    scb->hscb->tag;
1625				if ((ahc->features & AHC_QUEUE_REGS) != 0) {
1626					ahc_outb(ahc, HNSCB_QOFF,
1627						 ahc->qinfifonext);
1628				} else {
1629					ahc_outb(ahc, KERNEL_QINPOS,
1630						 ahc->qinfifonext);
1631				}
1632				scb->io_ctx->ccb_h.timeout_ch =
1633				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
1634				unpause_sequencer(ahc);
1635			} else {
1636				/* Go "immediatly" to the bus reset */
1637				/* This shouldn't happen */
1638				ahc_set_recoveryscb(ahc, scb);
1639				ahc_print_path(ahc, scb);
1640				printf("SCB %d: Immediate reset.  "
1641					"Flags = 0x%x\n", scb->hscb->tag,
1642					scb->flags);
1643				goto bus_reset;
1644			}
1645		}
1646	}
1647	ahc_unlock(ahc, &s);
1648}
1649
1650static void
1651ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1652{
1653	union ccb *abort_ccb;
1654
1655	abort_ccb = ccb->cab.abort_ccb;
1656	switch (abort_ccb->ccb_h.func_code) {
1657	case XPT_ACCEPT_TARGET_IO:
1658	case XPT_IMMED_NOTIFY:
1659	case XPT_CONT_TARGET_IO:
1660	{
1661		struct tmode_tstate *tstate;
1662		struct tmode_lstate *lstate;
1663		struct ccb_hdr_slist *list;
1664		cam_status status;
1665
1666		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1667					     &lstate, TRUE);
1668
1669		if (status != CAM_REQ_CMP) {
1670			ccb->ccb_h.status = status;
1671			break;
1672		}
1673
1674		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1675			list = &lstate->accept_tios;
1676		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1677			list = &lstate->immed_notifies;
1678		else
1679			list = NULL;
1680
1681		if (list != NULL) {
1682			struct ccb_hdr *curelm;
1683			int found;
1684
1685			curelm = SLIST_FIRST(list);
1686			found = 0;
1687			if (curelm == &abort_ccb->ccb_h) {
1688				found = 1;
1689				SLIST_REMOVE_HEAD(list, sim_links.sle);
1690			} else {
1691				while(curelm != NULL) {
1692					struct ccb_hdr *nextelm;
1693
1694					nextelm =
1695					    SLIST_NEXT(curelm, sim_links.sle);
1696
1697					if (nextelm == &abort_ccb->ccb_h) {
1698						found = 1;
1699						SLIST_NEXT(curelm,
1700							   sim_links.sle) =
1701						    SLIST_NEXT(nextelm,
1702							       sim_links.sle);
1703						break;
1704					}
1705					curelm = nextelm;
1706				}
1707			}
1708
1709			if (found) {
1710				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1711				xpt_done(abort_ccb);
1712				ccb->ccb_h.status = CAM_REQ_CMP;
1713			} else {
1714				printf("Not found\n");
1715				ccb->ccb_h.status = CAM_PATH_INVALID;
1716			}
1717			break;
1718		}
1719		/* FALLTHROUGH */
1720	}
1721	case XPT_SCSI_IO:
1722		/* XXX Fully implement the hard ones */
1723		ccb->ccb_h.status = CAM_UA_ABORT;
1724		break;
1725	default:
1726		ccb->ccb_h.status = CAM_REQ_INVALID;
1727		break;
1728	}
1729	xpt_done(ccb);
1730}
1731
1732void
1733ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1734		u_int lun, ac_code code)
1735{
1736	struct	ccb_trans_settings cts;
1737	struct cam_path *path;
1738	void *arg;
1739	int error;
1740
1741	arg = NULL;
1742	error = ahc_create_path(ahc, channel, target, lun, &path);
1743
1744	if (error != CAM_REQ_CMP)
1745		return;
1746
1747	switch (code) {
1748	case AC_TRANSFER_NEG:
1749#ifdef AHC_NEW_TRAN_SETTINGS
1750		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1751#else
1752		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1753#endif
1754		cts.ccb_h.path = path;
1755		cts.ccb_h.target_id = target;
1756		cts.ccb_h.target_lun = lun;
1757		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1758							  : ahc->our_id_b,
1759				      channel, &cts);
1760		arg = &cts;
1761		break;
1762	case AC_SENT_BDR:
1763	case AC_BUS_RESET:
1764		break;
1765	default:
1766		panic("ahc_send_async: Unexpected async event");
1767	}
1768	xpt_async(code, path, arg);
1769}
1770
1771void
1772ahc_platform_set_tags(struct ahc_softc *ahc,
1773		      struct ahc_devinfo *devinfo, int enable)
1774{
1775}
1776
1777int
1778ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1779{
1780	ahc->platform_data =
1781	    malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
1782	if (ahc->platform_data == NULL)
1783		return (ENOMEM);
1784	memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1785	return (0);
1786}
1787
1788void
1789ahc_platform_free(struct ahc_softc *ahc)
1790{
1791	if (ahc->platform_data != NULL) {
1792		if (ahc->platform_data->regs != NULL)
1793			bus_release_resource(ahc->dev_softc,
1794					     ahc->platform_data->regs_res_type,
1795					     ahc->platform_data->regs_res_id,
1796					     ahc->platform_data->regs);
1797
1798		if (ahc->platform_data->irq != NULL)
1799			bus_release_resource(ahc->dev_softc,
1800					 ahc->platform_data->irq_res_type,
1801					 0, ahc->platform_data->irq);
1802
1803		free(ahc->platform_data, M_DEVBUF);
1804	}
1805}
1806
1807int
1808ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1809{
1810	/* We don't sort softcs under FreeBSD so report equal always */
1811	return (0);
1812}
1813
1814#if UNUSED
1815static void
1816ahc_dump_targcmd(struct target_cmd *cmd)
1817{
1818	uint8_t *byte;
1819	uint8_t *last_byte;
1820	int i;
1821
1822	byte = &cmd->initiator_channel;
1823	/* Debugging info for received commands */
1824	last_byte = &cmd[1].initiator_channel;
1825
1826	i = 0;
1827	while (byte < last_byte) {
1828		if (i == 0)
1829			printf("\t");
1830		printf("%#x", *byte++);
1831		i++;
1832		if (i == 8) {
1833			printf("\n");
1834			i = 0;
1835		} else {
1836			printf(", ");
1837		}
1838	}
1839}
1840#endif
1841