aic7xxx_osm.c revision 168752
1/*-
2 * Bus independent FreeBSD shim for the aic7xxx based Adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#20 $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 168752 2007-04-15 08:49:19Z scottl $");
36
37#include <dev/aic7xxx/aic7xxx_osm.h>
38#include <dev/aic7xxx/aic7xxx_inline.h>
39
40#include <sys/kthread.h>
41
42#ifndef AHC_TMODE_ENABLE
43#define AHC_TMODE_ENABLE 0
44#endif
45
46#include <dev/aic7xxx/aic_osm_lib.c>
47
48#define ccb_scb_ptr spriv_ptr0
49
50devclass_t ahc_devclass;
51
52#if 0
53static void	ahc_dump_targcmd(struct target_cmd *cmd);
54#endif
55static int	ahc_modevent(module_t mod, int type, void *data);
56static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
57static void	ahc_get_tran_settings(struct ahc_softc *ahc,
58				      int our_id, char channel,
59				      struct ccb_trans_settings *cts);
60static void	ahc_async(void *callback_arg, uint32_t code,
61			  struct cam_path *path, void *arg);
62static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
63				int nsegments, int error);
64static void	ahc_poll(struct cam_sim *sim);
65static void	ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
66			       struct ccb_scsiio *csio, struct scb *scb);
67static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
68			      union ccb *ccb);
69static int	ahc_create_path(struct ahc_softc *ahc,
70				char channel, u_int target, u_int lun,
71				struct cam_path **path);
72
73
74static int
75ahc_create_path(struct ahc_softc *ahc, char channel, u_int target,
76	        u_int lun, struct cam_path **path)
77{
78	path_id_t path_id;
79
80	if (channel == 'B')
81		path_id = cam_sim_path(ahc->platform_data->sim_b);
82	else
83		path_id = cam_sim_path(ahc->platform_data->sim);
84
85	return (xpt_create_path(path, /*periph*/NULL,
86				path_id, target, lun));
87}
88
89int
90ahc_map_int(struct ahc_softc *ahc)
91{
92	int error;
93	int zero;
94	int shareable;
95
96	zero = 0;
97	shareable = (ahc->flags & AHC_EDGE_INTERRUPT) ? 0: RF_SHAREABLE;
98	ahc->platform_data->irq =
99	    bus_alloc_resource_any(ahc->dev_softc, SYS_RES_IRQ, &zero,
100				   RF_ACTIVE | shareable);
101	if (ahc->platform_data->irq == NULL) {
102		device_printf(ahc->dev_softc,
103			      "bus_alloc_resource() failed to allocate IRQ\n");
104		return (ENOMEM);
105	}
106	ahc->platform_data->irq_res_type = SYS_RES_IRQ;
107
108	/* Hook up our interrupt handler */
109	error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq,
110			       INTR_TYPE_CAM, NULL, ahc_platform_intr, ahc,
111			       &ahc->platform_data->ih);
112
113	if (error != 0)
114		device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n",
115			      error);
116	return (error);
117}
118
119int
120aic7770_map_registers(struct ahc_softc *ahc, u_int unused_ioport_arg)
121{
122	struct	resource *regs;
123	int	rid;
124
125	rid = 0;
126	regs = bus_alloc_resource_any(ahc->dev_softc, SYS_RES_IOPORT, &rid,
127				      RF_ACTIVE);
128	if (regs == NULL) {
129		device_printf(ahc->dev_softc, "Unable to map I/O space?!\n");
130		return ENOMEM;
131	}
132	ahc->platform_data->regs_res_type = SYS_RES_IOPORT;
133	ahc->platform_data->regs_res_id = rid,
134	ahc->platform_data->regs = regs;
135	ahc->tag = rman_get_bustag(regs);
136	ahc->bsh = rman_get_bushandle(regs);
137	return (0);
138}
139
140/*
141 * Attach all the sub-devices we can find
142 */
143int
144ahc_attach(struct ahc_softc *ahc)
145{
146	char   ahc_info[256];
147	struct ccb_setasync csa;
148	struct cam_devq *devq;
149	int bus_id;
150	int bus_id2;
151	struct cam_sim *sim;
152	struct cam_sim *sim2;
153	struct cam_path *path;
154	struct cam_path *path2;
155	long s;
156	int count;
157
158	count = 0;
159	sim = NULL;
160	sim2 = NULL;
161	path = NULL;
162	path2 = NULL;
163
164	/*
165	 * Create a thread to perform all recovery.
166	 */
167	if (ahc_spawn_recovery_thread(ahc) != 0)
168		goto fail;
169
170	ahc_controller_info(ahc, ahc_info);
171	printf("%s\n", ahc_info);
172	ahc_lock(ahc, &s);
173
174	/*
175	 * Attach secondary channel first if the user has
176	 * declared it the primary channel.
177	 */
178	if ((ahc->features & AHC_TWIN) != 0
179	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
180		bus_id = 1;
181		bus_id2 = 0;
182	} else {
183		bus_id = 0;
184		bus_id2 = 1;
185	}
186
187	/*
188	 * Create the device queue for our SIM(s).
189	 */
190	devq = cam_simq_alloc(AHC_MAX_QUEUE);
191	if (devq == NULL)
192		goto fail;
193
194	/*
195	 * Construct our first channel SIM entry
196	 */
197	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc,
198			    device_get_unit(ahc->dev_softc),
199			    &Giant, 1, AHC_MAX_QUEUE, devq);
200	if (sim == NULL) {
201		cam_simq_free(devq);
202		goto fail;
203	}
204
205	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
206		cam_sim_free(sim, /*free_devq*/TRUE);
207		sim = NULL;
208		goto fail;
209	}
210
211	if (xpt_create_path(&path, /*periph*/NULL,
212			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
213			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
214		xpt_bus_deregister(cam_sim_path(sim));
215		cam_sim_free(sim, /*free_devq*/TRUE);
216		sim = NULL;
217		goto fail;
218	}
219
220	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
221	csa.ccb_h.func_code = XPT_SASYNC_CB;
222	csa.event_enable = AC_LOST_DEVICE;
223	csa.callback = ahc_async;
224	csa.callback_arg = sim;
225	xpt_action((union ccb *)&csa);
226	count++;
227
228	if (ahc->features & AHC_TWIN) {
229		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
230				    ahc, device_get_unit(ahc->dev_softc),
231				    &Giant, 1,
232				    AHC_MAX_QUEUE, devq);
233
234		if (sim2 == NULL) {
235			printf("ahc_attach: Unable to attach second "
236			       "bus due to resource shortage");
237			goto fail;
238		}
239
240		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
241			printf("ahc_attach: Unable to attach second "
242			       "bus due to resource shortage");
243			/*
244			 * We do not want to destroy the device queue
245			 * because the first bus is using it.
246			 */
247			cam_sim_free(sim2, /*free_devq*/FALSE);
248			goto fail;
249		}
250
251		if (xpt_create_path(&path2, /*periph*/NULL,
252				    cam_sim_path(sim2),
253				    CAM_TARGET_WILDCARD,
254				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
255			xpt_bus_deregister(cam_sim_path(sim2));
256			cam_sim_free(sim2, /*free_devq*/FALSE);
257			sim2 = NULL;
258			goto fail;
259		}
260		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
261		csa.ccb_h.func_code = XPT_SASYNC_CB;
262		csa.event_enable = AC_LOST_DEVICE;
263		csa.callback = ahc_async;
264		csa.callback_arg = sim2;
265		xpt_action((union ccb *)&csa);
266		count++;
267	}
268
269fail:
270	if ((ahc->features & AHC_TWIN) != 0
271	 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) {
272		ahc->platform_data->sim_b = sim;
273		ahc->platform_data->path_b = path;
274		ahc->platform_data->sim = sim2;
275		ahc->platform_data->path = path2;
276	} else {
277		ahc->platform_data->sim = sim;
278		ahc->platform_data->path = path;
279		ahc->platform_data->sim_b = sim2;
280		ahc->platform_data->path_b = path2;
281	}
282
283	if (count != 0) {
284		/* We have to wait until after any system dumps... */
285		ahc->platform_data->eh =
286		    EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
287					  ahc, SHUTDOWN_PRI_DEFAULT);
288		ahc_intr_enable(ahc, TRUE);
289	}
290
291	ahc_unlock(ahc, &s);
292	return (count);
293}
294
295/*
296 * Catch an interrupt from the adapter
297 */
298void
299ahc_platform_intr(void *arg)
300{
301	struct	ahc_softc *ahc;
302
303	ahc = (struct ahc_softc *)arg;
304	ahc_intr(ahc);
305}
306
307/*
308 * We have an scb which has been processed by the
309 * adaptor, now we look to see how the operation
310 * went.
311 */
312void
313ahc_done(struct ahc_softc *ahc, struct scb *scb)
314{
315	union ccb *ccb;
316
317	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
318		  ("ahc_done - scb %d\n", scb->hscb->tag));
319
320	ccb = scb->io_ctx;
321	LIST_REMOVE(scb, pending_links);
322	if ((scb->flags & SCB_TIMEDOUT) != 0)
323		LIST_REMOVE(scb, timedout_links);
324	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
325		struct scb_tailq *untagged_q;
326		int target_offset;
327
328		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
329		untagged_q = &ahc->untagged_queues[target_offset];
330		TAILQ_REMOVE(untagged_q, scb, links.tqe);
331		scb->flags &= ~SCB_UNTAGGEDQ;
332		ahc_run_untagged_queue(ahc, untagged_q);
333	}
334
335	untimeout(ahc_platform_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
336
337	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
338		bus_dmasync_op_t op;
339
340		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
341			op = BUS_DMASYNC_POSTREAD;
342		else
343			op = BUS_DMASYNC_POSTWRITE;
344		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
345		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
346	}
347
348	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
349		struct cam_path *ccb_path;
350
351		/*
352		 * If we have finally disconnected, clean up our
353		 * pending device state.
354		 * XXX - There may be error states that cause where
355		 *       we will remain connected.
356		 */
357		ccb_path = ccb->ccb_h.path;
358		if (ahc->pending_device != NULL
359		 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) {
360
361			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
362				ahc->pending_device = NULL;
363			} else {
364				if (bootverbose) {
365					xpt_print_path(ccb->ccb_h.path);
366					printf("Still connected\n");
367				}
368				aic_freeze_ccb(ccb);
369			}
370		}
371
372		if (aic_get_transaction_status(scb) == CAM_REQ_INPROG)
373			ccb->ccb_h.status |= CAM_REQ_CMP;
374		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
375		ahc_free_scb(ahc, scb);
376		xpt_done(ccb);
377		return;
378	}
379
380	/*
381	 * If the recovery SCB completes, we have to be
382	 * out of our timeout.
383	 */
384	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
385		struct	scb *list_scb;
386
387		ahc->scb_data->recovery_scbs--;
388
389		if (aic_get_transaction_status(scb) == CAM_BDR_SENT
390		 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED)
391			aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
392
393		if (ahc->scb_data->recovery_scbs == 0) {
394			/*
395			 * All recovery actions have completed successfully,
396			 * so reinstate the timeouts for all other pending
397			 * commands.
398			 */
399			LIST_FOREACH(list_scb, &ahc->pending_scbs,
400				     pending_links) {
401
402				aic_scb_timer_reset(list_scb,
403						    aic_get_timeout(scb));
404			}
405
406			ahc_print_path(ahc, scb);
407			printf("no longer in timeout, status = %x\n",
408			       ccb->ccb_h.status);
409		}
410	}
411
412	/* Don't clobber any existing error state */
413	if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) {
414		ccb->ccb_h.status |= CAM_REQ_CMP;
415	} else if ((scb->flags & SCB_SENSE) != 0) {
416		/*
417		 * We performed autosense retrieval.
418		 *
419		 * Zero any sense not transferred by the
420		 * device.  The SCSI spec mandates that any
421		 * untransfered data should be assumed to be
422		 * zero.  Complete the 'bounce' of sense information
423		 * through buffers accessible via bus-space by
424		 * copying it into the clients csio.
425		 */
426		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
427		memcpy(&ccb->csio.sense_data,
428		       ahc_get_sense_buf(ahc, scb),
429		       (aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK)
430		       - ccb->csio.sense_resid);
431		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
432	}
433	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
434	ahc_free_scb(ahc, scb);
435	xpt_done(ccb);
436}
437
438static void
439ahc_action(struct cam_sim *sim, union ccb *ccb)
440{
441	struct	ahc_softc *ahc;
442	struct	ahc_tmode_lstate *lstate;
443	u_int	target_id;
444	u_int	our_id;
445	long	s;
446
447	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
448
449	ahc = (struct ahc_softc *)cam_sim_softc(sim);
450
451	target_id = ccb->ccb_h.target_id;
452	our_id = SIM_SCSI_ID(ahc, sim);
453
454	switch (ccb->ccb_h.func_code) {
455	/* Common cases first */
456	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
457	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
458	{
459		struct	   ahc_tmode_tstate *tstate;
460		cam_status status;
461
462		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
463					     &lstate, TRUE);
464
465		if (status != CAM_REQ_CMP) {
466			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
467				/* Response from the black hole device */
468				tstate = NULL;
469				lstate = ahc->black_hole;
470			} else {
471				ccb->ccb_h.status = status;
472				xpt_done(ccb);
473				break;
474			}
475		}
476		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
477
478			ahc_lock(ahc, &s);
479			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
480					  sim_links.sle);
481			ccb->ccb_h.status = CAM_REQ_INPROG;
482			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
483				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
484			ahc_unlock(ahc, &s);
485			break;
486		}
487
488		/*
489		 * The target_id represents the target we attempt to
490		 * select.  In target mode, this is the initiator of
491		 * the original command.
492		 */
493		our_id = target_id;
494		target_id = ccb->csio.init_id;
495		/* FALLTHROUGH */
496	}
497	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
498	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
499	{
500		struct	scb *scb;
501		struct	hardware_scb *hscb;
502
503		if ((ahc->flags & AHC_INITIATORROLE) == 0
504		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
505		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
506			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
507			xpt_done(ccb);
508			return;
509		}
510
511		/*
512		 * get an scb to use.
513		 */
514		ahc_lock(ahc, &s);
515		if ((scb = ahc_get_scb(ahc)) == NULL) {
516
517			xpt_freeze_simq(sim, /*count*/1);
518			ahc->flags |= AHC_RESOURCE_SHORTAGE;
519			ahc_unlock(ahc, &s);
520			ccb->ccb_h.status = CAM_REQUEUE_REQ;
521			xpt_done(ccb);
522			return;
523		}
524		ahc_unlock(ahc, &s);
525
526		hscb = scb->hscb;
527
528		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
529			  ("start scb(%p)\n", scb));
530		scb->io_ctx = ccb;
531		/*
532		 * So we can find the SCB when an abort is requested
533		 */
534		ccb->ccb_h.ccb_scb_ptr = scb;
535
536		/*
537		 * Put all the arguments for the xfer in the scb
538		 */
539		hscb->control = 0;
540		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
541		hscb->lun = ccb->ccb_h.target_lun;
542		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
543			hscb->cdb_len = 0;
544			scb->flags |= SCB_DEVICE_RESET;
545			hscb->control |= MK_MESSAGE;
546			ahc_execute_scb(scb, NULL, 0, 0);
547		} else {
548			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
549				struct target_data *tdata;
550
551				tdata = &hscb->shared_data.tdata;
552				if (ahc->pending_device == lstate)
553					scb->flags |= SCB_TARGET_IMMEDIATE;
554				hscb->control |= TARGET_SCB;
555				scb->flags |= SCB_TARGET_SCB;
556				tdata->target_phases = 0;
557				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
558					tdata->target_phases |= SPHASE_PENDING;
559					tdata->scsi_status =
560					    ccb->csio.scsi_status;
561				}
562	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
563					tdata->target_phases |= NO_DISCONNECT;
564
565				tdata->initiator_tag = ccb->csio.tag_id;
566			}
567			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
568				hscb->control |= ccb->csio.tag_action;
569
570			ahc_setup_data(ahc, sim, &ccb->csio, scb);
571		}
572		break;
573	}
574	case XPT_NOTIFY_ACK:
575	case XPT_IMMED_NOTIFY:
576	{
577		struct	   ahc_tmode_tstate *tstate;
578		struct	   ahc_tmode_lstate *lstate;
579		cam_status status;
580
581		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
582					     &lstate, TRUE);
583
584		if (status != CAM_REQ_CMP) {
585			ccb->ccb_h.status = status;
586			xpt_done(ccb);
587			break;
588		}
589		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
590				  sim_links.sle);
591		ccb->ccb_h.status = CAM_REQ_INPROG;
592		ahc_send_lstate_events(ahc, lstate);
593		break;
594	}
595	case XPT_EN_LUN:		/* Enable LUN as a target */
596		ahc_handle_en_lun(ahc, sim, ccb);
597		xpt_done(ccb);
598		break;
599	case XPT_ABORT:			/* Abort the specified CCB */
600	{
601		ahc_abort_ccb(ahc, sim, ccb);
602		break;
603	}
604	case XPT_SET_TRAN_SETTINGS:
605	{
606		struct	ahc_devinfo devinfo;
607		struct	ccb_trans_settings *cts;
608		struct	ccb_trans_settings_scsi *scsi;
609		struct	ccb_trans_settings_spi *spi;
610		struct	ahc_initiator_tinfo *tinfo;
611		struct	ahc_tmode_tstate *tstate;
612		uint16_t *discenable;
613		uint16_t *tagenable;
614		u_int	update_type;
615
616		cts = &ccb->cts;
617		scsi = &cts->proto_specific.scsi;
618		spi = &cts->xport_specific.spi;
619		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
620				    cts->ccb_h.target_id,
621				    cts->ccb_h.target_lun,
622				    SIM_CHANNEL(ahc, sim),
623				    ROLE_UNKNOWN);
624		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
625					    devinfo.our_scsiid,
626					    devinfo.target, &tstate);
627		update_type = 0;
628		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
629			update_type |= AHC_TRANS_GOAL;
630			discenable = &tstate->discenable;
631			tagenable = &tstate->tagenable;
632			tinfo->curr.protocol_version =
633			    cts->protocol_version;
634			tinfo->curr.transport_version =
635			    cts->transport_version;
636			tinfo->goal.protocol_version =
637			    cts->protocol_version;
638			tinfo->goal.transport_version =
639			    cts->transport_version;
640		} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
641			update_type |= AHC_TRANS_USER;
642			discenable = &ahc->user_discenable;
643			tagenable = &ahc->user_tagenable;
644			tinfo->user.protocol_version =
645			    cts->protocol_version;
646			tinfo->user.transport_version =
647			    cts->transport_version;
648		} else {
649			ccb->ccb_h.status = CAM_REQ_INVALID;
650			xpt_done(ccb);
651			break;
652		}
653
654		ahc_lock(ahc, &s);
655
656		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
657			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
658				*discenable |= devinfo.target_mask;
659			else
660				*discenable &= ~devinfo.target_mask;
661		}
662
663		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
664			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
665				*tagenable |= devinfo.target_mask;
666			else
667				*tagenable &= ~devinfo.target_mask;
668		}
669
670		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
671			ahc_validate_width(ahc, /*tinfo limit*/NULL,
672					   &spi->bus_width, ROLE_UNKNOWN);
673			ahc_set_width(ahc, &devinfo, spi->bus_width,
674				      update_type, /*paused*/FALSE);
675		}
676
677		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
678			if (update_type == AHC_TRANS_USER)
679				spi->ppr_options = tinfo->user.ppr_options;
680			else
681				spi->ppr_options = tinfo->goal.ppr_options;
682		}
683
684		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
685			if (update_type == AHC_TRANS_USER)
686				spi->sync_offset = tinfo->user.offset;
687			else
688				spi->sync_offset = tinfo->goal.offset;
689		}
690
691		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
692			if (update_type == AHC_TRANS_USER)
693				spi->sync_period = tinfo->user.period;
694			else
695				spi->sync_period = tinfo->goal.period;
696		}
697
698		if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
699		 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
700			struct ahc_syncrate *syncrate;
701			u_int maxsync;
702
703			if ((ahc->features & AHC_ULTRA2) != 0)
704				maxsync = AHC_SYNCRATE_DT;
705			else if ((ahc->features & AHC_ULTRA) != 0)
706				maxsync = AHC_SYNCRATE_ULTRA;
707			else
708				maxsync = AHC_SYNCRATE_FAST;
709
710			if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
711				spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
712
713			syncrate = ahc_find_syncrate(ahc, &spi->sync_period,
714						     &spi->ppr_options,
715						     maxsync);
716			ahc_validate_offset(ahc, /*tinfo limit*/NULL,
717					    syncrate, &spi->sync_offset,
718					    spi->bus_width, ROLE_UNKNOWN);
719
720			/* We use a period of 0 to represent async */
721			if (spi->sync_offset == 0) {
722				spi->sync_period = 0;
723				spi->ppr_options = 0;
724			}
725
726			ahc_set_syncrate(ahc, &devinfo, syncrate,
727					 spi->sync_period, spi->sync_offset,
728					 spi->ppr_options, update_type,
729					 /*paused*/FALSE);
730		}
731		ahc_unlock(ahc, &s);
732		ccb->ccb_h.status = CAM_REQ_CMP;
733		xpt_done(ccb);
734		break;
735	}
736	case XPT_GET_TRAN_SETTINGS:
737	/* Get default/user set transfer settings for the target */
738	{
739
740		ahc_lock(ahc, &s);
741		ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim),
742				      SIM_CHANNEL(ahc, sim), &ccb->cts);
743		ahc_unlock(ahc, &s);
744		xpt_done(ccb);
745		break;
746	}
747	case XPT_CALC_GEOMETRY:
748	{
749		int extended;
750
751		extended = SIM_IS_SCSIBUS_B(ahc, sim)
752			 ? ahc->flags & AHC_EXTENDED_TRANS_B
753			 : ahc->flags & AHC_EXTENDED_TRANS_A;
754		aic_calc_geometry(&ccb->ccg, extended);
755		xpt_done(ccb);
756		break;
757	}
758	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
759	{
760		int  found;
761
762		ahc_lock(ahc, &s);
763		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
764					  /*initiate reset*/TRUE);
765		ahc_unlock(ahc, &s);
766		if (bootverbose) {
767			xpt_print_path(SIM_PATH(ahc, sim));
768			printf("SCSI bus reset delivered. "
769			       "%d SCBs aborted.\n", found);
770		}
771		ccb->ccb_h.status = CAM_REQ_CMP;
772		xpt_done(ccb);
773		break;
774	}
775	case XPT_TERM_IO:		/* Terminate the I/O process */
776		/* XXX Implement */
777		ccb->ccb_h.status = CAM_REQ_INVALID;
778		xpt_done(ccb);
779		break;
780	case XPT_PATH_INQ:		/* Path routing inquiry */
781	{
782		struct ccb_pathinq *cpi = &ccb->cpi;
783
784		cpi->version_num = 1; /* XXX??? */
785		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
786		if ((ahc->features & AHC_WIDE) != 0)
787			cpi->hba_inquiry |= PI_WIDE_16;
788		if ((ahc->features & AHC_TARGETMODE) != 0) {
789			cpi->target_sprt = PIT_PROCESSOR
790					 | PIT_DISCONNECT
791					 | PIT_TERM_IO;
792		} else {
793			cpi->target_sprt = 0;
794		}
795		cpi->hba_misc = 0;
796		cpi->hba_eng_cnt = 0;
797		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
798		cpi->max_lun = AHC_NUM_LUNS - 1;
799		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
800			cpi->initiator_id = ahc->our_id_b;
801			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
802				cpi->hba_misc |= PIM_NOBUSRESET;
803		} else {
804			cpi->initiator_id = ahc->our_id;
805			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
806				cpi->hba_misc |= PIM_NOBUSRESET;
807		}
808		cpi->bus_id = cam_sim_bus(sim);
809		cpi->base_transfer_speed = 3300;
810		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
811		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
812		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
813		cpi->unit_number = cam_sim_unit(sim);
814		cpi->protocol = PROTO_SCSI;
815		cpi->protocol_version = SCSI_REV_2;
816		cpi->transport = XPORT_SPI;
817		cpi->transport_version = 2;
818		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
819		if ((ahc->features & AHC_DT) != 0) {
820			cpi->transport_version = 3;
821			cpi->xport_specific.spi.ppr_options =
822			    SID_SPI_CLOCK_DT_ST;
823		}
824		cpi->ccb_h.status = CAM_REQ_CMP;
825		xpt_done(ccb);
826		break;
827	}
828	default:
829		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
830		xpt_done(ccb);
831		break;
832	}
833}
834
835static void
836ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel,
837		      struct ccb_trans_settings *cts)
838{
839	struct	ahc_devinfo devinfo;
840	struct	ccb_trans_settings_scsi *scsi;
841	struct	ccb_trans_settings_spi *spi;
842	struct	ahc_initiator_tinfo *targ_info;
843	struct	ahc_tmode_tstate *tstate;
844	struct	ahc_transinfo *tinfo;
845
846	scsi = &cts->proto_specific.scsi;
847	spi = &cts->xport_specific.spi;
848	ahc_compile_devinfo(&devinfo, our_id,
849			    cts->ccb_h.target_id,
850			    cts->ccb_h.target_lun,
851			    channel, ROLE_UNKNOWN);
852	targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
853					devinfo.our_scsiid,
854					devinfo.target, &tstate);
855
856	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
857		tinfo = &targ_info->curr;
858	else
859		tinfo = &targ_info->user;
860
861	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
862	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
863	if (cts->type == CTS_TYPE_USER_SETTINGS) {
864		if ((ahc->user_discenable & devinfo.target_mask) != 0)
865			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
866
867		if ((ahc->user_tagenable & devinfo.target_mask) != 0)
868			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
869	} else {
870		if ((tstate->discenable & devinfo.target_mask) != 0)
871			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
872
873		if ((tstate->tagenable & devinfo.target_mask) != 0)
874			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
875	}
876	cts->protocol_version = tinfo->protocol_version;
877	cts->transport_version = tinfo->transport_version;
878
879	spi->sync_period = tinfo->period;
880	spi->sync_offset = tinfo->offset;
881	spi->bus_width = tinfo->width;
882	spi->ppr_options = tinfo->ppr_options;
883
884	cts->protocol = PROTO_SCSI;
885	cts->transport = XPORT_SPI;
886	spi->valid = CTS_SPI_VALID_SYNC_RATE
887		   | CTS_SPI_VALID_SYNC_OFFSET
888		   | CTS_SPI_VALID_BUS_WIDTH
889		   | CTS_SPI_VALID_PPR_OPTIONS;
890
891	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
892		scsi->valid = CTS_SCSI_VALID_TQ;
893		spi->valid |= CTS_SPI_VALID_DISC;
894	} else {
895		scsi->valid = 0;
896	}
897
898	cts->ccb_h.status = CAM_REQ_CMP;
899}
900
901static void
902ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
903{
904	struct ahc_softc *ahc;
905	struct cam_sim *sim;
906
907	sim = (struct cam_sim *)callback_arg;
908	ahc = (struct ahc_softc *)cam_sim_softc(sim);
909	switch (code) {
910	case AC_LOST_DEVICE:
911	{
912		struct	ahc_devinfo devinfo;
913		long	s;
914
915		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
916				    xpt_path_target_id(path),
917				    xpt_path_lun_id(path),
918				    SIM_CHANNEL(ahc, sim),
919				    ROLE_UNKNOWN);
920
921		/*
922		 * Revert to async/narrow transfers
923		 * for the next device.
924		 */
925		ahc_lock(ahc, &s);
926		ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
927			      AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE);
928		ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
929				 /*period*/0, /*offset*/0, /*ppr_options*/0,
930				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
931				 /*paused*/FALSE);
932		ahc_unlock(ahc, &s);
933		break;
934	}
935	default:
936		break;
937	}
938}
939
940static void
941ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
942		int error)
943{
944	struct	scb *scb;
945	union	ccb *ccb;
946	struct	ahc_softc *ahc;
947	struct	ahc_initiator_tinfo *tinfo;
948	struct	ahc_tmode_tstate *tstate;
949	u_int	mask;
950	long	s;
951
952	scb = (struct scb *)arg;
953	ccb = scb->io_ctx;
954	ahc = scb->ahc_softc;
955
956	if (error != 0) {
957		if (error == EFBIG)
958			aic_set_transaction_status(scb, CAM_REQ_TOO_BIG);
959		else
960			aic_set_transaction_status(scb, CAM_REQ_CMP_ERR);
961		if (nsegments != 0)
962			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
963		ahc_lock(ahc, &s);
964		ahc_free_scb(ahc, scb);
965		ahc_unlock(ahc, &s);
966		xpt_done(ccb);
967		return;
968	}
969	if (nsegments != 0) {
970		struct	  ahc_dma_seg *sg;
971		bus_dma_segment_t *end_seg;
972		bus_dmasync_op_t op;
973
974		end_seg = dm_segs + nsegments;
975
976		/* Copy the segments into our SG list */
977		sg = scb->sg_list;
978		while (dm_segs < end_seg) {
979			uint32_t len;
980
981			sg->addr = aic_htole32(dm_segs->ds_addr);
982			len = dm_segs->ds_len
983			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
984			sg->len = aic_htole32(len);
985			sg++;
986			dm_segs++;
987		}
988
989		/*
990		 * Note where to find the SG entries in bus space.
991		 * We also set the full residual flag which the
992		 * sequencer will clear as soon as a data transfer
993		 * occurs.
994		 */
995		scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
996
997		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
998			op = BUS_DMASYNC_PREREAD;
999		else
1000			op = BUS_DMASYNC_PREWRITE;
1001
1002		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
1003
1004		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1005			struct target_data *tdata;
1006
1007			tdata = &scb->hscb->shared_data.tdata;
1008			tdata->target_phases |= DPHASE_PENDING;
1009			/*
1010			 * CAM data direction is relative to the initiator.
1011			 */
1012			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1013				tdata->data_phase = P_DATAOUT;
1014			else
1015				tdata->data_phase = P_DATAIN;
1016
1017			/*
1018			 * If the transfer is of an odd length and in the
1019			 * "in" direction (scsi->HostBus), then it may
1020			 * trigger a bug in the 'WideODD' feature of
1021			 * non-Ultra2 chips.  Force the total data-length
1022			 * to be even by adding an extra, 1 byte, SG,
1023			 * element.  We do this even if we are not currently
1024			 * negotiated wide as negotiation could occur before
1025			 * this command is executed.
1026			 */
1027			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
1028			 && (ccb->csio.dxfer_len & 0x1) != 0
1029			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1030
1031				nsegments++;
1032				if (nsegments > AHC_NSEG) {
1033
1034					aic_set_transaction_status(scb,
1035					    CAM_REQ_TOO_BIG);
1036					bus_dmamap_unload(ahc->buffer_dmat,
1037							  scb->dmamap);
1038					ahc_lock(ahc, &s);
1039					ahc_free_scb(ahc, scb);
1040					ahc_unlock(ahc, &s);
1041					xpt_done(ccb);
1042					return;
1043				}
1044				sg->addr = aic_htole32(ahc->dma_bug_buf);
1045				sg->len = aic_htole32(1);
1046				sg++;
1047			}
1048		}
1049		sg--;
1050		sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
1051
1052		/* Copy the first SG into the "current" data pointer area */
1053		scb->hscb->dataptr = scb->sg_list->addr;
1054		scb->hscb->datacnt = scb->sg_list->len;
1055	} else {
1056		scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
1057		scb->hscb->dataptr = 0;
1058		scb->hscb->datacnt = 0;
1059	}
1060
1061	scb->sg_count = nsegments;
1062
1063	ahc_lock(ahc, &s);
1064
1065	/*
1066	 * Last time we need to check if this SCB needs to
1067	 * be aborted.
1068	 */
1069	if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) {
1070		if (nsegments != 0)
1071			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
1072		ahc_free_scb(ahc, scb);
1073		ahc_unlock(ahc, &s);
1074		xpt_done(ccb);
1075		return;
1076	}
1077
1078	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
1079				    SCSIID_OUR_ID(scb->hscb->scsiid),
1080				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
1081				    &tstate);
1082
1083	mask = SCB_GET_TARGET_MASK(ahc, scb);
1084	scb->hscb->scsirate = tinfo->scsirate;
1085	scb->hscb->scsioffset = tinfo->curr.offset;
1086	if ((tstate->ultraenb & mask) != 0)
1087		scb->hscb->control |= ULTRAENB;
1088
1089	if ((tstate->discenable & mask) != 0
1090	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1091		scb->hscb->control |= DISCENB;
1092
1093	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1094	 && (tinfo->goal.width != 0
1095	  || tinfo->goal.offset != 0
1096	  || tinfo->goal.ppr_options != 0)) {
1097		scb->flags |= SCB_NEGOTIATE;
1098		scb->hscb->control |= MK_MESSAGE;
1099	} else if ((tstate->auto_negotiate & mask) != 0) {
1100		scb->flags |= SCB_AUTO_NEGOTIATE;
1101		scb->hscb->control |= MK_MESSAGE;
1102	}
1103
1104	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1105
1106	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1107
1108	/*
1109	 * We only allow one untagged transaction
1110	 * per target in the initiator role unless
1111	 * we are storing a full busy target *lun*
1112	 * table in SCB space.
1113	 */
1114	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
1115	 && (ahc->flags & AHC_SCB_BTT) == 0) {
1116		struct scb_tailq *untagged_q;
1117		int target_offset;
1118
1119		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
1120		untagged_q = &(ahc->untagged_queues[target_offset]);
1121		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1122		scb->flags |= SCB_UNTAGGEDQ;
1123		if (TAILQ_FIRST(untagged_q) != scb) {
1124			ahc_unlock(ahc, &s);
1125			return;
1126		}
1127	}
1128	scb->flags |= SCB_ACTIVE;
1129
1130	/*
1131	 * Timers are disabled while recovery is in progress.
1132	 */
1133	aic_scb_timer_start(scb);
1134
1135	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1136		/* Define a mapping from our tag to the SCB. */
1137		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
1138		ahc_pause(ahc);
1139		if ((ahc->flags & AHC_PAGESCBS) == 0)
1140			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1141		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
1142		ahc_unpause(ahc);
1143	} else {
1144		ahc_queue_scb(ahc, scb);
1145	}
1146
1147	ahc_unlock(ahc, &s);
1148}
1149
1150static void
1151ahc_poll(struct cam_sim *sim)
1152{
1153	struct ahc_softc *ahc;
1154
1155	ahc = (struct ahc_softc *)cam_sim_softc(sim);
1156	ahc_intr(ahc);
1157}
1158
1159static void
1160ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
1161	       struct ccb_scsiio *csio, struct scb *scb)
1162{
1163	struct hardware_scb *hscb;
1164	struct ccb_hdr *ccb_h;
1165
1166	hscb = scb->hscb;
1167	ccb_h = &csio->ccb_h;
1168
1169	csio->resid = 0;
1170	csio->sense_resid = 0;
1171	if (ccb_h->func_code == XPT_SCSI_IO) {
1172		hscb->cdb_len = csio->cdb_len;
1173		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1174
1175			if (hscb->cdb_len > sizeof(hscb->cdb32)
1176			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
1177				u_long s;
1178
1179				aic_set_transaction_status(scb,
1180							   CAM_REQ_INVALID);
1181				ahc_lock(ahc, &s);
1182				ahc_free_scb(ahc, scb);
1183				ahc_unlock(ahc, &s);
1184				xpt_done((union ccb *)csio);
1185				return;
1186			}
1187			if (hscb->cdb_len > 12) {
1188				memcpy(hscb->cdb32,
1189				       csio->cdb_io.cdb_ptr,
1190				       hscb->cdb_len);
1191				scb->flags |= SCB_CDB32_PTR;
1192			} else {
1193				memcpy(hscb->shared_data.cdb,
1194				       csio->cdb_io.cdb_ptr,
1195				       hscb->cdb_len);
1196			}
1197		} else {
1198			if (hscb->cdb_len > 12) {
1199				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
1200				       hscb->cdb_len);
1201				scb->flags |= SCB_CDB32_PTR;
1202			} else {
1203				memcpy(hscb->shared_data.cdb,
1204				       csio->cdb_io.cdb_bytes,
1205				       hscb->cdb_len);
1206			}
1207		}
1208	}
1209
1210	/* Only use S/G if there is a transfer */
1211	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1212		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1213			/* We've been given a pointer to a single buffer */
1214			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1215				int s;
1216				int error;
1217
1218				s = splsoftvm();
1219				error = bus_dmamap_load(ahc->buffer_dmat,
1220							scb->dmamap,
1221							csio->data_ptr,
1222							csio->dxfer_len,
1223							ahc_execute_scb,
1224							scb, /*flags*/0);
1225				if (error == EINPROGRESS) {
1226					/*
1227					 * So as to maintain ordering,
1228					 * freeze the controller queue
1229					 * until our mapping is
1230					 * returned.
1231					 */
1232					xpt_freeze_simq(sim,
1233							/*count*/1);
1234					scb->io_ctx->ccb_h.status |=
1235					    CAM_RELEASE_SIMQ;
1236				}
1237				splx(s);
1238			} else {
1239				struct bus_dma_segment seg;
1240
1241				/* Pointer to physical buffer */
1242				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
1243					panic("ahc_setup_data - Transfer size "
1244					      "larger than can device max");
1245
1246				seg.ds_addr =
1247				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1248				seg.ds_len = csio->dxfer_len;
1249				ahc_execute_scb(scb, &seg, 1, 0);
1250			}
1251		} else {
1252			struct bus_dma_segment *segs;
1253
1254			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1255				panic("ahc_setup_data - Physical segment "
1256				      "pointers unsupported");
1257
1258			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1259				panic("ahc_setup_data - Virtual segment "
1260				      "addresses unsupported");
1261
1262			/* Just use the segments provided */
1263			segs = (struct bus_dma_segment *)csio->data_ptr;
1264			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
1265		}
1266	} else {
1267		ahc_execute_scb(scb, NULL, 0, 0);
1268	}
1269}
1270
1271static void
1272ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
1273{
1274	union ccb *abort_ccb;
1275
1276	abort_ccb = ccb->cab.abort_ccb;
1277	switch (abort_ccb->ccb_h.func_code) {
1278	case XPT_ACCEPT_TARGET_IO:
1279	case XPT_IMMED_NOTIFY:
1280	case XPT_CONT_TARGET_IO:
1281	{
1282		struct ahc_tmode_tstate *tstate;
1283		struct ahc_tmode_lstate *lstate;
1284		struct ccb_hdr_slist *list;
1285		cam_status status;
1286
1287		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
1288					     &lstate, TRUE);
1289
1290		if (status != CAM_REQ_CMP) {
1291			ccb->ccb_h.status = status;
1292			break;
1293		}
1294
1295		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1296			list = &lstate->accept_tios;
1297		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1298			list = &lstate->immed_notifies;
1299		else
1300			list = NULL;
1301
1302		if (list != NULL) {
1303			struct ccb_hdr *curelm;
1304			int found;
1305
1306			curelm = SLIST_FIRST(list);
1307			found = 0;
1308			if (curelm == &abort_ccb->ccb_h) {
1309				found = 1;
1310				SLIST_REMOVE_HEAD(list, sim_links.sle);
1311			} else {
1312				while(curelm != NULL) {
1313					struct ccb_hdr *nextelm;
1314
1315					nextelm =
1316					    SLIST_NEXT(curelm, sim_links.sle);
1317
1318					if (nextelm == &abort_ccb->ccb_h) {
1319						found = 1;
1320						SLIST_NEXT(curelm,
1321							   sim_links.sle) =
1322						    SLIST_NEXT(nextelm,
1323							       sim_links.sle);
1324						break;
1325					}
1326					curelm = nextelm;
1327				}
1328			}
1329
1330			if (found) {
1331				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1332				xpt_done(abort_ccb);
1333				ccb->ccb_h.status = CAM_REQ_CMP;
1334			} else {
1335				xpt_print_path(abort_ccb->ccb_h.path);
1336				printf("Not found\n");
1337				ccb->ccb_h.status = CAM_PATH_INVALID;
1338			}
1339			break;
1340		}
1341		/* FALLTHROUGH */
1342	}
1343	case XPT_SCSI_IO:
1344		/* XXX Fully implement the hard ones */
1345		ccb->ccb_h.status = CAM_UA_ABORT;
1346		break;
1347	default:
1348		ccb->ccb_h.status = CAM_REQ_INVALID;
1349		break;
1350	}
1351	xpt_done(ccb);
1352}
1353
1354void
1355ahc_send_async(struct ahc_softc *ahc, char channel, u_int target,
1356		u_int lun, ac_code code, void *opt_arg)
1357{
1358	struct	ccb_trans_settings cts;
1359	struct cam_path *path;
1360	void *arg;
1361	int error;
1362
1363	arg = NULL;
1364	error = ahc_create_path(ahc, channel, target, lun, &path);
1365
1366	if (error != CAM_REQ_CMP)
1367		return;
1368
1369	switch (code) {
1370	case AC_TRANSFER_NEG:
1371	{
1372		struct	ccb_trans_settings_scsi *scsi;
1373
1374		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1375		scsi = &cts.proto_specific.scsi;
1376		cts.ccb_h.path = path;
1377		cts.ccb_h.target_id = target;
1378		cts.ccb_h.target_lun = lun;
1379		ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id
1380							  : ahc->our_id_b,
1381				      channel, &cts);
1382		arg = &cts;
1383		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1384		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1385		if (opt_arg == NULL)
1386			break;
1387		if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED)
1388			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1389		scsi->valid |= CTS_SCSI_VALID_TQ;
1390		break;
1391	}
1392	case AC_SENT_BDR:
1393	case AC_BUS_RESET:
1394		break;
1395	default:
1396		panic("ahc_send_async: Unexpected async event");
1397	}
1398	xpt_async(code, path, arg);
1399	xpt_free_path(path);
1400}
1401
1402void
1403ahc_platform_set_tags(struct ahc_softc *ahc,
1404		      struct ahc_devinfo *devinfo, int enable)
1405{
1406}
1407
1408int
1409ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1410{
1411	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1412	    M_NOWAIT | M_ZERO);
1413	if (ahc->platform_data == NULL)
1414		return (ENOMEM);
1415	return (0);
1416}
1417
1418void
1419ahc_platform_free(struct ahc_softc *ahc)
1420{
1421	struct ahc_platform_data *pdata;
1422
1423	pdata = ahc->platform_data;
1424	if (pdata != NULL) {
1425		if (pdata->regs != NULL)
1426			bus_release_resource(ahc->dev_softc,
1427					     pdata->regs_res_type,
1428					     pdata->regs_res_id,
1429					     pdata->regs);
1430
1431		if (pdata->irq != NULL)
1432			bus_release_resource(ahc->dev_softc,
1433					     pdata->irq_res_type,
1434					     0, pdata->irq);
1435
1436		if (pdata->sim_b != NULL) {
1437			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1438			xpt_free_path(pdata->path_b);
1439			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1440			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1441		}
1442		if (pdata->sim != NULL) {
1443			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1444			xpt_free_path(pdata->path);
1445			xpt_bus_deregister(cam_sim_path(pdata->sim));
1446			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1447		}
1448		if (pdata->eh != NULL)
1449			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1450		free(ahc->platform_data, M_DEVBUF);
1451	}
1452}
1453
1454int
1455ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1456{
1457	/* We don't sort softcs under FreeBSD so report equal always */
1458	return (0);
1459}
1460
1461int
1462ahc_detach(device_t dev)
1463{
1464	struct ahc_softc *ahc;
1465	u_long l;
1466	u_long s;
1467
1468	ahc_list_lock(&l);
1469	device_printf(dev, "detaching device\n");
1470	ahc = device_get_softc(dev);
1471	ahc = ahc_find_softc(ahc);
1472	if (ahc == NULL) {
1473		device_printf(dev, "aic7xxx already detached\n");
1474		ahc_list_unlock(&l);
1475		return (ENOENT);
1476	}
1477	TAILQ_REMOVE(&ahc_tailq, ahc, links);
1478	ahc_list_unlock(&l);
1479	ahc_lock(ahc, &s);
1480	ahc_intr_enable(ahc, FALSE);
1481	bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih);
1482	ahc_unlock(ahc, &s);
1483	ahc_free(ahc);
1484	return (0);
1485}
1486
1487#if 0
1488static void
1489ahc_dump_targcmd(struct target_cmd *cmd)
1490{
1491	uint8_t *byte;
1492	uint8_t *last_byte;
1493	int i;
1494
1495	byte = &cmd->initiator_channel;
1496	/* Debugging info for received commands */
1497	last_byte = &cmd[1].initiator_channel;
1498
1499	i = 0;
1500	while (byte < last_byte) {
1501		if (i == 0)
1502			printf("\t");
1503		printf("%#x", *byte++);
1504		i++;
1505		if (i == 8) {
1506			printf("\n");
1507			i = 0;
1508		} else {
1509			printf(", ");
1510		}
1511	}
1512}
1513#endif
1514
1515static int
1516ahc_modevent(module_t mod, int type, void *data)
1517{
1518	/* XXX Deal with busy status on unload. */
1519	/* XXX Deal with unknown events */
1520	return 0;
1521}
1522
1523static moduledata_t ahc_mod = {
1524	"ahc",
1525	ahc_modevent,
1526	NULL
1527};
1528
1529DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1530MODULE_DEPEND(ahc, cam, 1, 1, 1);
1531MODULE_VERSION(ahc, 1);
1532