aic79xx_osm.c revision 113296
1/*
2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
3 *
4 * Copyright (c) 1994-2002 Justin T. Gibbs.
5 * Copyright (c) 2001-2002 Adaptec Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU Public License ("GPL").
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
33 *
34 * $FreeBSD: head/sys/dev/aic7xxx/aic79xx_osm.c 113296 2003-04-09 14:58:02Z jake $
35 */
36
37#include <dev/aic7xxx/aic79xx_osm.h>
38#include <dev/aic7xxx/aic79xx_inline.h>
39
40#include "opt_ddb.h"
41#ifdef DDB
42#include <ddb/ddb.h>
43#endif
44
45#ifndef AHD_TMODE_ENABLE
46#define AHD_TMODE_ENABLE 0
47#endif
48
49#define ccb_scb_ptr spriv_ptr0
50
51#if UNUSED
52static void	ahd_dump_targcmd(struct target_cmd *cmd);
53#endif
54static int	ahd_modevent(module_t mod, int type, void *data);
55static void	ahd_action(struct cam_sim *sim, union ccb *ccb);
56static void	ahd_set_tran_settings(struct ahd_softc *ahd,
57				      int our_id, char channel,
58				      struct ccb_trans_settings *cts);
59static void	ahd_get_tran_settings(struct ahd_softc *ahd,
60				      int our_id, char channel,
61				      struct ccb_trans_settings *cts);
62static void	ahd_async(void *callback_arg, uint32_t code,
63			  struct cam_path *path, void *arg);
64static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
65				int nsegments, int error);
66static void	ahd_poll(struct cam_sim *sim);
67static void	ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
68			       struct ccb_scsiio *csio, struct scb *scb);
69static void	ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
70			      union ccb *ccb);
71static int	ahd_create_path(struct ahd_softc *ahd,
72				char channel, u_int target, u_int lun,
73				struct cam_path **path);
74
75#if NOT_YET
76static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
77#endif
78
79static int
80ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
81	        u_int lun, struct cam_path **path)
82{
83	path_id_t path_id;
84
85	if (channel == 'B')
86		path_id = cam_sim_path(ahd->platform_data->sim_b);
87	else
88		path_id = cam_sim_path(ahd->platform_data->sim);
89
90	return (xpt_create_path(path, /*periph*/NULL,
91				path_id, target, lun));
92}
93
94int
95ahd_map_int(struct ahd_softc *ahd)
96{
97	int error;
98
99	/* Hook up our interrupt handler */
100	error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
101			       INTR_TYPE_CAM, ahd_platform_intr, ahd,
102			       &ahd->platform_data->ih);
103	if (error != 0)
104		device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
105			      error);
106	return (error);
107}
108
109/*
110 * Attach all the sub-devices we can find
111 */
112int
113ahd_attach(struct ahd_softc *ahd)
114{
115	char   ahd_info[256];
116	struct ccb_setasync csa;
117	struct cam_devq *devq;
118	struct cam_sim *sim;
119	struct cam_path *path;
120	long s;
121	int count;
122
123	count = 0;
124	sim = NULL;
125
126	ahd_controller_info(ahd, ahd_info);
127	printf("%s\n", ahd_info);
128	ahd_lock(ahd, &s);
129
130	/*
131	 * Create the device queue for our SIM(s).
132	 */
133	devq = cam_simq_alloc(AHD_MAX_QUEUE);
134	if (devq == NULL)
135		goto fail;
136
137	/*
138	 * Construct our SIM entry
139	 */
140	sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
141			    device_get_unit(ahd->dev_softc),
142			    1, /*XXX*/256, devq);
143	if (sim == NULL) {
144		cam_simq_free(devq);
145		goto fail;
146	}
147
148	if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) {
149		cam_sim_free(sim, /*free_devq*/TRUE);
150		sim = NULL;
151		goto fail;
152	}
153
154	if (xpt_create_path(&path, /*periph*/NULL,
155			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
156			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
157		xpt_bus_deregister(cam_sim_path(sim));
158		cam_sim_free(sim, /*free_devq*/TRUE);
159		sim = NULL;
160		goto fail;
161	}
162
163	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
164	csa.ccb_h.func_code = XPT_SASYNC_CB;
165	csa.event_enable = AC_LOST_DEVICE;
166	csa.callback = ahd_async;
167	csa.callback_arg = sim;
168	xpt_action((union ccb *)&csa);
169	count++;
170
171fail:
172	ahd->platform_data->sim = sim;
173	ahd->platform_data->path = path;
174	if (count != 0) {
175		/* We have to wait until after any system dumps... */
176		ahd->platform_data->eh =
177		    EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
178					  ahd, SHUTDOWN_PRI_DEFAULT);
179		ahd_intr_enable(ahd, TRUE);
180	}
181
182	ahd_unlock(ahd, &s);
183
184	return (count);
185}
186
187/*
188 * Catch an interrupt from the adapter
189 */
190void
191ahd_platform_intr(void *arg)
192{
193	struct	ahd_softc *ahd;
194
195	ahd = (struct ahd_softc *)arg;
196	ahd_intr(ahd);
197}
198
199/*
200 * We have an scb which has been processed by the
201 * adaptor, now we look to see how the operation
202 * went.
203 */
204void
205ahd_done(struct ahd_softc *ahd, struct scb *scb)
206{
207	union ccb *ccb;
208
209	CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
210		  ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
211
212	ccb = scb->io_ctx;
213	LIST_REMOVE(scb, pending_links);
214
215	untimeout(ahd_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
216
217	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
218		bus_dmasync_op_t op;
219
220		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
221			op = BUS_DMASYNC_POSTREAD;
222		else
223			op = BUS_DMASYNC_POSTWRITE;
224		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
225		bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
226	}
227
228#ifdef AHD_TARGET_MODE
229	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
230		struct cam_path *ccb_path;
231
232		/*
233		 * If we have finally disconnected, clean up our
234		 * pending device state.
235		 * XXX - There may be error states that cause where
236		 *       we will remain connected.
237		 */
238		ccb_path = ccb->ccb_h.path;
239		if (ahd->pending_device != NULL
240		 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
241
242			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
243				ahd->pending_device = NULL;
244			} else {
245				xpt_print_path(ccb->ccb_h.path);
246				printf("Still disconnected\n");
247				ahd_freeze_ccb(ccb);
248			}
249		}
250
251		if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG)
252			ccb->ccb_h.status |= CAM_REQ_CMP;
253		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
254		ahd_free_scb(ahd, scb);
255		xpt_done(ccb);
256		return;
257	}
258#endif
259
260	/*
261	 * If the recovery SCB completes, we have to be
262	 * out of our timeout.
263	 */
264	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
265		struct	scb *list_scb;
266
267		/*
268		 * We were able to complete the command successfully,
269		 * so reinstate the timeouts for all other pending
270		 * commands.
271		 */
272		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
273			union ccb *ccb;
274			uint64_t time;
275
276			ccb = list_scb->io_ctx;
277			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
278				continue;
279
280			time = ccb->ccb_h.timeout;
281			time *= hz;
282			time /= 1000;
283			ccb->ccb_h.timeout_ch =
284			    timeout(ahd_timeout, list_scb, time);
285		}
286
287		if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
288		 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
289			ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
290		ahd_print_path(ahd, scb);
291		printf("no longer in timeout, status = %x\n",
292		       ccb->ccb_h.status);
293	}
294
295	/* Don't clobber any existing error state */
296	if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
297		ccb->ccb_h.status |= CAM_REQ_CMP;
298	} else if ((scb->flags & SCB_SENSE) != 0) {
299		/*
300		 * We performed autosense retrieval.
301		 *
302		 * Zero any sense not transferred by the
303		 * device.  The SCSI spec mandates that any
304		 * untransfered data should be assumed to be
305		 * zero.  Complete the 'bounce' of sense information
306		 * through buffers accessible via bus-space by
307		 * copying it into the clients csio.
308		 */
309		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
310		memcpy(&ccb->csio.sense_data,
311		       ahd_get_sense_buf(ahd, scb),
312/* XXX What size do we want to use??? */
313			sizeof(ccb->csio.sense_data)
314		       - ccb->csio.sense_resid);
315		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
316	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
317		struct scsi_status_iu_header *siu;
318		u_int sense_len;
319		int i;
320
321		/*
322		 * Copy only the sense data into the provided buffer.
323		 */
324		siu = (struct scsi_status_iu_header *)scb->sense_data;
325		sense_len = MIN(scsi_4btoul(siu->sense_length),
326				sizeof(ccb->csio.sense_data));
327		memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
328		memcpy(&ccb->csio.sense_data,
329		       ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
330		       sense_len);
331		printf("Copied %d bytes of sense data offset %d:", sense_len,
332		       SIU_SENSE_OFFSET(siu));
333		for (i = 0; i < sense_len; i++)
334			printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]);
335		printf("\n");
336		scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
337	}
338	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
339	ahd_free_scb(ahd, scb);
340	xpt_done(ccb);
341}
342
343static void
344ahd_action(struct cam_sim *sim, union ccb *ccb)
345{
346	struct	ahd_softc *ahd;
347#ifdef AHD_TARGET_MODE
348	struct	ahd_tmode_lstate *lstate;
349#endif
350	u_int	target_id;
351	u_int	our_id;
352	long	s;
353
354	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
355
356	ahd = (struct ahd_softc *)cam_sim_softc(sim);
357
358	target_id = ccb->ccb_h.target_id;
359	our_id = SIM_SCSI_ID(ahd, sim);
360
361	switch (ccb->ccb_h.func_code) {
362	/* Common cases first */
363#ifdef AHD_TARGET_MODE
364	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
365	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
366	{
367		struct	   ahd_tmode_tstate *tstate;
368		cam_status status;
369
370		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
371					     &lstate, TRUE);
372
373		if (status != CAM_REQ_CMP) {
374			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
375				/* Response from the black hole device */
376				tstate = NULL;
377				lstate = ahd->black_hole;
378			} else {
379				ccb->ccb_h.status = status;
380				xpt_done(ccb);
381				break;
382			}
383		}
384		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
385
386			ahd_lock(ahd, &s);
387			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
388					  sim_links.sle);
389			ccb->ccb_h.status = CAM_REQ_INPROG;
390			if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
391				ahd_run_tqinfifo(ahd, /*paused*/FALSE);
392			ahd_unlock(ahd, &s);
393			break;
394		}
395
396		/*
397		 * The target_id represents the target we attempt to
398		 * select.  In target mode, this is the initiator of
399		 * the original command.
400		 */
401		our_id = target_id;
402		target_id = ccb->csio.init_id;
403		/* FALLTHROUGH */
404	}
405#endif
406	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
407	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
408	{
409		struct	scb *scb;
410		struct	hardware_scb *hscb;
411		struct	ahd_initiator_tinfo *tinfo;
412		struct	ahd_tmode_tstate *tstate;
413		u_int	col_idx;
414
415		if ((ahd->flags & AHD_INITIATORROLE) == 0
416		 && (ccb->ccb_h.func_code == XPT_SCSI_IO
417		  || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
418			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
419			xpt_done(ccb);
420			return;
421		}
422
423		/*
424		 * get an scb to use.
425		 */
426		ahd_lock(ahd, &s);
427		tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
428					    target_id, &tstate);
429		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
430		 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
431		 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
432			col_idx = AHD_NEVER_COL_IDX;
433		} else {
434			col_idx = AHD_BUILD_COL_IDX(target_id,
435						    ccb->ccb_h.target_lun);
436		}
437		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
438
439			xpt_freeze_simq(sim, /*count*/1);
440			ahd->flags |= AHD_RESOURCE_SHORTAGE;
441			ahd_unlock(ahd, &s);
442			ccb->ccb_h.status = CAM_REQUEUE_REQ;
443			xpt_done(ccb);
444			return;
445		}
446		ahd_unlock(ahd, &s);
447
448		hscb = scb->hscb;
449
450		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
451			  ("start scb(%p)\n", scb));
452		scb->io_ctx = ccb;
453		/*
454		 * So we can find the SCB when an abort is requested
455		 */
456		ccb->ccb_h.ccb_scb_ptr = scb;
457
458		/*
459		 * Put all the arguments for the xfer in the scb
460		 */
461		hscb->control = 0;
462		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
463		hscb->lun = ccb->ccb_h.target_lun;
464		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
465			hscb->cdb_len = 0;
466			scb->flags |= SCB_DEVICE_RESET;
467			hscb->control |= MK_MESSAGE;
468			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
469			ahd_execute_scb(scb, NULL, 0, 0);
470		} else {
471#ifdef AHD_TARGET_MODE
472			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
473				struct target_data *tdata;
474
475				tdata = &hscb->shared_data.tdata;
476				if (ahd->pending_device == lstate)
477					scb->flags |= SCB_TARGET_IMMEDIATE;
478				hscb->control |= TARGET_SCB;
479				tdata->target_phases = 0;
480				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
481					tdata->target_phases |= SPHASE_PENDING;
482					tdata->scsi_status =
483					    ccb->csio.scsi_status;
484				}
485	 			if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
486					tdata->target_phases |= NO_DISCONNECT;
487
488				tdata->initiator_tag =
489				    ahd_htole16(ccb->csio.tag_id);
490			}
491#endif
492			hscb->task_management = 0;
493			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
494				hscb->control |= ccb->csio.tag_action;
495
496			ahd_setup_data(ahd, sim, &ccb->csio, scb);
497		}
498		break;
499	}
500#ifdef AHD_TARGET_MODE
501	case XPT_NOTIFY_ACK:
502	case XPT_IMMED_NOTIFY:
503	{
504		struct	   ahd_tmode_tstate *tstate;
505		struct	   ahd_tmode_lstate *lstate;
506		cam_status status;
507
508		status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
509					     &lstate, TRUE);
510
511		if (status != CAM_REQ_CMP) {
512			ccb->ccb_h.status = status;
513			xpt_done(ccb);
514			break;
515		}
516		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
517				  sim_links.sle);
518		ccb->ccb_h.status = CAM_REQ_INPROG;
519		ahd_send_lstate_events(ahd, lstate);
520		break;
521	}
522	case XPT_EN_LUN:		/* Enable LUN as a target */
523		ahd_handle_en_lun(ahd, sim, ccb);
524		xpt_done(ccb);
525		break;
526#endif
527	case XPT_ABORT:			/* Abort the specified CCB */
528	{
529		ahd_abort_ccb(ahd, sim, ccb);
530		break;
531	}
532	case XPT_SET_TRAN_SETTINGS:
533	{
534		ahd_lock(ahd, &s);
535		ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
536				      SIM_CHANNEL(ahd, sim), &ccb->cts);
537		ahd_unlock(ahd, &s);
538		xpt_done(ccb);
539		break;
540	}
541	case XPT_GET_TRAN_SETTINGS:
542	/* Get default/user set transfer settings for the target */
543	{
544		ahd_lock(ahd, &s);
545		ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
546				      SIM_CHANNEL(ahd, sim), &ccb->cts);
547		ahd_unlock(ahd, &s);
548		xpt_done(ccb);
549		break;
550	}
551	case XPT_CALC_GEOMETRY:
552	{
553		struct	  ccb_calc_geometry *ccg;
554		uint32_t size_mb;
555		uint32_t secs_per_cylinder;
556		int	  extended;
557
558		ccg = &ccb->ccg;
559		size_mb = ccg->volume_size
560			/ ((1024L * 1024L) / ccg->block_size);
561		extended = ahd->flags & AHD_EXTENDED_TRANS_A;
562
563		if (size_mb > 1024 && extended) {
564			ccg->heads = 255;
565			ccg->secs_per_track = 63;
566		} else {
567			ccg->heads = 64;
568			ccg->secs_per_track = 32;
569		}
570		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
571		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
572		ccb->ccb_h.status = CAM_REQ_CMP;
573		xpt_done(ccb);
574		break;
575	}
576	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
577	{
578		int  found;
579
580		ahd_lock(ahd, &s);
581		found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
582					  /*initiate reset*/TRUE);
583		ahd_unlock(ahd, &s);
584		if (bootverbose) {
585			xpt_print_path(SIM_PATH(ahd, sim));
586			printf("SCSI bus reset delivered. "
587			       "%d SCBs aborted.\n", found);
588		}
589		ccb->ccb_h.status = CAM_REQ_CMP;
590		xpt_done(ccb);
591		break;
592	}
593	case XPT_TERM_IO:		/* Terminate the I/O process */
594		/* XXX Implement */
595		ccb->ccb_h.status = CAM_REQ_INVALID;
596		xpt_done(ccb);
597		break;
598	case XPT_PATH_INQ:		/* Path routing inquiry */
599	{
600		struct ccb_pathinq *cpi = &ccb->cpi;
601
602		cpi->version_num = 1; /* XXX??? */
603		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
604		if ((ahd->features & AHD_WIDE) != 0)
605			cpi->hba_inquiry |= PI_WIDE_16;
606		if ((ahd->features & AHD_TARGETMODE) != 0) {
607			cpi->target_sprt = PIT_PROCESSOR
608					 | PIT_DISCONNECT
609					 | PIT_TERM_IO;
610		} else {
611			cpi->target_sprt = 0;
612		}
613		cpi->hba_misc = 0;
614		cpi->hba_eng_cnt = 0;
615		cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
616		cpi->max_lun = AHD_NUM_LUNS - 1;
617		cpi->initiator_id = ahd->our_id;
618		if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
619			cpi->hba_misc |= PIM_NOBUSRESET;
620		}
621		cpi->bus_id = cam_sim_bus(sim);
622		cpi->base_transfer_speed = 3300;
623		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
624		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
625		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
626		cpi->unit_number = cam_sim_unit(sim);
627#ifdef AHD_NEW_TRAN_SETTINGS
628		cpi->protocol = PROTO_SCSI;
629		cpi->protocol_version = SCSI_REV_2;
630		cpi->transport = XPORT_SPI;
631		cpi->transport_version = 2;
632		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
633		cpi->transport_version = 4;
634		cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST;
635#endif
636		cpi->ccb_h.status = CAM_REQ_CMP;
637		xpt_done(ccb);
638		break;
639	}
640	default:
641		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
642		xpt_done(ccb);
643		break;
644	}
645}
646
647
648static void
649ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
650		      struct ccb_trans_settings *cts)
651{
652#ifdef AHD_NEW_TRAN_SETTINGS
653	struct	  ahd_devinfo devinfo;
654	struct	  ccb_trans_settings_scsi *scsi;
655	struct	  ccb_trans_settings_spi *spi;
656	struct	  ahd_initiator_tinfo *tinfo;
657	struct	  ahd_tmode_tstate *tstate;
658	uint16_t *discenable;
659	uint16_t *tagenable;
660	u_int	  update_type;
661
662	scsi = &cts->proto_specific.scsi;
663	spi = &cts->xport_specific.spi;
664	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
665			    cts->ccb_h.target_id,
666			    cts->ccb_h.target_lun,
667			    SIM_CHANNEL(ahd, sim),
668			    ROLE_UNKNOWN);
669	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
670				    devinfo.our_scsiid,
671				    devinfo.target, &tstate);
672	update_type = 0;
673	if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
674		update_type |= AHD_TRANS_GOAL;
675		discenable = &tstate->discenable;
676		tagenable = &tstate->tagenable;
677		tinfo->curr.protocol_version = cts->protocol_version;
678		tinfo->curr.transport_version = cts->transport_version;
679		tinfo->goal.protocol_version = cts->protocol_version;
680		tinfo->goal.transport_version = cts->transport_version;
681	} else if (cts->type == CTS_TYPE_USER_SETTINGS) {
682		update_type |= AHD_TRANS_USER;
683		discenable = &ahd->user_discenable;
684		tagenable = &ahd->user_tagenable;
685		tinfo->user.protocol_version = cts->protocol_version;
686		tinfo->user.transport_version = cts->transport_version;
687	} else {
688		cts->ccb_h.status = CAM_REQ_INVALID;
689		return;
690	}
691
692	if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
693		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
694			*discenable |= devinfo.target_mask;
695		else
696			*discenable &= ~devinfo.target_mask;
697	}
698
699	if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
700		if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
701			*tagenable |= devinfo.target_mask;
702		else
703			*tagenable &= ~devinfo.target_mask;
704	}
705
706	if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
707		ahd_validate_width(ahd, /*tinfo limit*/NULL,
708				   &spi->bus_width, ROLE_UNKNOWN);
709		ahd_set_width(ahd, &devinfo, spi->bus_width,
710			      update_type, /*paused*/FALSE);
711	}
712
713	if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
714		if (update_type == AHD_TRANS_USER)
715			spi->ppr_options = tinfo->user.ppr_options;
716		else
717			spi->ppr_options = tinfo->goal.ppr_options;
718	}
719
720	if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
721		if (update_type == AHD_TRANS_USER)
722			spi->sync_offset = tinfo->user.offset;
723		else
724			spi->sync_offset = tinfo->goal.offset;
725	}
726
727	if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
728		if (update_type == AHD_TRANS_USER)
729			spi->sync_period = tinfo->user.period;
730		else
731			spi->sync_period = tinfo->goal.period;
732	}
733
734	if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
735	 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
736		u_int	maxsync;
737
738		maxsync = AHD_SYNCRATE_MAX;
739
740		if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
741			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
742
743		if ((*discenable & devinfo.target_mask) == 0)
744			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
745
746		ahd_find_syncrate(ahd, &spi->sync_period,
747				  &spi->ppr_options, maxsync);
748		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
749				    spi->sync_period, &spi->sync_offset,
750				    spi->bus_width, ROLE_UNKNOWN);
751
752		/* We use a period of 0 to represent async */
753		if (spi->sync_offset == 0) {
754			spi->sync_period = 0;
755			spi->ppr_options = 0;
756		}
757
758		ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
759				 spi->sync_offset, spi->ppr_options,
760				 update_type, /*paused*/FALSE);
761	}
762	cts->ccb_h.status = CAM_REQ_CMP;
763#else
764	struct	  ahd_devinfo devinfo;
765	struct	  ahd_initiator_tinfo *tinfo;
766	struct	  ahd_tmode_tstate *tstate;
767	uint16_t *discenable;
768	uint16_t *tagenable;
769	u_int	  update_type;
770
771	ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
772			    cts->ccb_h.target_id,
773			    cts->ccb_h.target_lun,
774			    SIM_CHANNEL(ahd, sim),
775			    ROLE_UNKNOWN);
776	tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
777				    devinfo.our_scsiid,
778				    devinfo.target, &tstate);
779	update_type = 0;
780	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
781		update_type |= AHD_TRANS_GOAL;
782		discenable = &tstate->discenable;
783		tagenable = &tstate->tagenable;
784	} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
785		update_type |= AHD_TRANS_USER;
786		discenable = &ahd->user_discenable;
787		tagenable = &ahd->user_tagenable;
788	} else {
789		cts->ccb_h.status = CAM_REQ_INVALID;
790		return;
791	}
792
793	if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
794		if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
795			*discenable |= devinfo.target_mask;
796		else
797			*discenable &= ~devinfo.target_mask;
798	}
799
800	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
801		if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
802			*tagenable |= devinfo.target_mask;
803		else
804			*tagenable &= ~devinfo.target_mask;
805	}
806
807	if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
808		ahd_validate_width(ahd, /*tinfo limit*/NULL,
809				   &cts->bus_width, ROLE_UNKNOWN);
810		ahd_set_width(ahd, &devinfo, cts->bus_width,
811			      update_type, /*paused*/FALSE);
812	}
813
814	if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
815		if (update_type == AHD_TRANS_USER)
816			cts->sync_offset = tinfo->user.offset;
817		else
818			cts->sync_offset = tinfo->goal.offset;
819	}
820
821	if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
822		if (update_type == AHD_TRANS_USER)
823			cts->sync_period = tinfo->user.period;
824		else
825			cts->sync_period = tinfo->goal.period;
826	}
827
828	if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
829	 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
830	 || ((cts->valid & CCB_TRANS_TQ_VALID) != 0)
831	 || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) {
832		u_int ppr_options;
833		u_int maxsync;
834
835		maxsync = AHD_SYNCRATE_MAX;
836		ppr_options = 0;
837		if (cts->sync_period <= AHD_SYNCRATE_DT
838		 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
839			ppr_options = tinfo->user.ppr_options
840				    | MSG_EXT_PPR_DT_REQ;
841		}
842
843		if ((*tagenable & devinfo.target_mask) == 0
844		 || (*discenable & devinfo.target_mask) == 0)
845			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
846
847		ahd_find_syncrate(ahd, &cts->sync_period,
848				  &ppr_options, maxsync);
849		ahd_validate_offset(ahd, /*tinfo limit*/NULL,
850				    cts->sync_period, &cts->sync_offset,
851				    MSG_EXT_WDTR_BUS_8_BIT,
852				    ROLE_UNKNOWN);
853
854		/* We use a period of 0 to represent async */
855		if (cts->sync_offset == 0) {
856			cts->sync_period = 0;
857			ppr_options = 0;
858		}
859
860		if (ppr_options != 0
861		 && tinfo->user.transport_version >= 3) {
862			tinfo->goal.transport_version =
863			    tinfo->user.transport_version;
864			tinfo->curr.transport_version =
865			    tinfo->user.transport_version;
866		}
867
868		ahd_set_syncrate(ahd, &devinfo, cts->sync_period,
869				 cts->sync_offset, ppr_options,
870				 update_type, /*paused*/FALSE);
871	}
872	cts->ccb_h.status = CAM_REQ_CMP;
873#endif
874}
875
876static void
877ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
878		      struct ccb_trans_settings *cts)
879{
880#ifdef AHD_NEW_TRAN_SETTINGS
881	struct	ahd_devinfo devinfo;
882	struct	ccb_trans_settings_scsi *scsi;
883	struct	ccb_trans_settings_spi *spi;
884	struct	ahd_initiator_tinfo *targ_info;
885	struct	ahd_tmode_tstate *tstate;
886	struct	ahd_transinfo *tinfo;
887
888	scsi = &cts->proto_specific.scsi;
889	spi = &cts->xport_specific.spi;
890	ahd_compile_devinfo(&devinfo, our_id,
891			    cts->ccb_h.target_id,
892			    cts->ccb_h.target_lun,
893			    channel, ROLE_UNKNOWN);
894	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
895					devinfo.our_scsiid,
896					devinfo.target, &tstate);
897
898	if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
899		tinfo = &targ_info->curr;
900	else
901		tinfo = &targ_info->user;
902
903	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
904	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
905	if (cts->type == CTS_TYPE_USER_SETTINGS) {
906		if ((ahd->user_discenable & devinfo.target_mask) != 0)
907			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
908
909		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
910			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
911	} else {
912		if ((tstate->discenable & devinfo.target_mask) != 0)
913			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
914
915		if ((tstate->tagenable & devinfo.target_mask) != 0)
916			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
917	}
918	cts->protocol_version = tinfo->protocol_version;
919	cts->transport_version = tinfo->transport_version;
920
921	spi->sync_period = tinfo->period;
922	spi->sync_offset = tinfo->offset;
923	spi->bus_width = tinfo->width;
924	spi->ppr_options = tinfo->ppr_options;
925
926	cts->protocol = PROTO_SCSI;
927	cts->transport = XPORT_SPI;
928	spi->valid = CTS_SPI_VALID_SYNC_RATE
929		   | CTS_SPI_VALID_SYNC_OFFSET
930		   | CTS_SPI_VALID_BUS_WIDTH
931		   | CTS_SPI_VALID_PPR_OPTIONS;
932
933	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
934		scsi->valid = CTS_SCSI_VALID_TQ;
935		spi->valid |= CTS_SPI_VALID_DISC;
936	} else {
937		scsi->valid = 0;
938	}
939
940	cts->ccb_h.status = CAM_REQ_CMP;
941#else
942	struct	ahd_devinfo devinfo;
943	struct	ahd_initiator_tinfo *targ_info;
944	struct	ahd_tmode_tstate *tstate;
945	struct	ahd_transinfo *tinfo;
946
947	ahd_compile_devinfo(&devinfo, our_id,
948			    cts->ccb_h.target_id,
949			    cts->ccb_h.target_lun,
950			    channel, ROLE_UNKNOWN);
951	targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
952					devinfo.our_scsiid,
953					devinfo.target, &tstate);
954
955	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
956		tinfo = &targ_info->curr;
957	else
958		tinfo = &targ_info->user;
959
960	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
961	if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
962		if ((ahd->user_discenable & devinfo.target_mask) != 0)
963			cts->flags |= CCB_TRANS_DISC_ENB;
964
965		if ((ahd->user_tagenable & devinfo.target_mask) != 0)
966			cts->flags |= CCB_TRANS_TAG_ENB;
967	} else {
968		if ((tstate->discenable & devinfo.target_mask) != 0)
969			cts->flags |= CCB_TRANS_DISC_ENB;
970
971		if ((tstate->tagenable & devinfo.target_mask) != 0)
972			cts->flags |= CCB_TRANS_TAG_ENB;
973	}
974	cts->sync_period = tinfo->period;
975	cts->sync_offset = tinfo->offset;
976	cts->bus_width = tinfo->width;
977
978	cts->valid = CCB_TRANS_SYNC_RATE_VALID
979		   | CCB_TRANS_SYNC_OFFSET_VALID
980		   | CCB_TRANS_BUS_WIDTH_VALID;
981
982	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
983		cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
984
985	cts->ccb_h.status = CAM_REQ_CMP;
986#endif
987}
988
989static void
990ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
991{
992	struct ahd_softc *ahd;
993	struct cam_sim *sim;
994
995	sim = (struct cam_sim *)callback_arg;
996	ahd = (struct ahd_softc *)cam_sim_softc(sim);
997	switch (code) {
998	case AC_LOST_DEVICE:
999	{
1000		struct	ahd_devinfo devinfo;
1001		long	s;
1002
1003		ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
1004				    xpt_path_target_id(path),
1005				    xpt_path_lun_id(path),
1006				    SIM_CHANNEL(ahd, sim),
1007				    ROLE_UNKNOWN);
1008
1009		/*
1010		 * Revert to async/narrow transfers
1011		 * for the next device.
1012		 */
1013		ahd_lock(ahd, &s);
1014		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1015			      AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
1016		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
1017				 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
1018				 /*paused*/FALSE);
1019		ahd_unlock(ahd, &s);
1020		break;
1021	}
1022	default:
1023		break;
1024	}
1025}
1026
1027static void
1028ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1029		int error)
1030{
1031	struct	scb *scb;
1032	union	ccb *ccb;
1033	struct	ahd_softc *ahd;
1034	struct	ahd_initiator_tinfo *tinfo;
1035	struct	ahd_tmode_tstate *tstate;
1036	u_int	mask;
1037	u_long	s;
1038
1039	scb = (struct scb *)arg;
1040	ccb = scb->io_ctx;
1041	ahd = scb->ahd_softc;
1042
1043	if (error != 0) {
1044		if (error == EFBIG)
1045			ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1046		else
1047			ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1048		if (nsegments != 0)
1049			bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
1050		ahd_lock(ahd, &s);
1051		ahd_free_scb(ahd, scb);
1052		ahd_unlock(ahd, &s);
1053		xpt_done(ccb);
1054		return;
1055	}
1056	scb->sg_count = 0;
1057	if (nsegments != 0) {
1058		void *sg;
1059		bus_dmasync_op_t op;
1060		u_int i;
1061
1062		/* Copy the segments into our SG list */
1063		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
1064
1065			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
1066					  dm_segs->ds_len,
1067					  /*last*/i == 1);
1068			dm_segs++;
1069		}
1070
1071		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1072			op = BUS_DMASYNC_PREREAD;
1073		else
1074			op = BUS_DMASYNC_PREWRITE;
1075
1076		bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
1077
1078		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1079			struct target_data *tdata;
1080
1081			tdata = &scb->hscb->shared_data.tdata;
1082			tdata->target_phases |= DPHASE_PENDING;
1083			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1084				tdata->data_phase = P_DATAOUT;
1085			else
1086				tdata->data_phase = P_DATAIN;
1087		}
1088	}
1089
1090	ahd_lock(ahd, &s);
1091
1092	/*
1093	 * Last time we need to check if this SCB needs to
1094	 * be aborted.
1095	 */
1096	if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) {
1097		if (nsegments != 0)
1098			bus_dmamap_unload(ahd->buffer_dmat,
1099					  scb->dmamap);
1100		ahd_free_scb(ahd, scb);
1101		ahd_unlock(ahd, &s);
1102		xpt_done(ccb);
1103		return;
1104	}
1105
1106	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1107				    SCSIID_OUR_ID(scb->hscb->scsiid),
1108				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
1109				    &tstate);
1110
1111	mask = SCB_GET_TARGET_MASK(ahd, scb);
1112
1113	if ((tstate->discenable & mask) != 0
1114	 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1115		scb->hscb->control |= DISCENB;
1116
1117	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1118		scb->flags |= SCB_PACKETIZED;
1119		if (scb->hscb->task_management != 0)
1120			scb->hscb->control &= ~MK_MESSAGE;
1121	}
1122
1123	if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1124	 && (tinfo->goal.width != 0
1125	  || tinfo->goal.period != 0
1126	  || tinfo->goal.ppr_options != 0)) {
1127		scb->flags |= SCB_NEGOTIATE;
1128		scb->hscb->control |= MK_MESSAGE;
1129	} else if ((tstate->auto_negotiate & mask) != 0) {
1130		scb->flags |= SCB_AUTO_NEGOTIATE;
1131		scb->hscb->control |= MK_MESSAGE;
1132	}
1133
1134	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1135
1136	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1137
1138	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1139		uint64_t time;
1140
1141		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1142			ccb->ccb_h.timeout = 5 * 1000;
1143
1144		time = ccb->ccb_h.timeout;
1145		time *= hz;
1146		time /= 1000;
1147		ccb->ccb_h.timeout_ch =
1148		    timeout(ahd_timeout, (caddr_t)scb, time);
1149	}
1150
1151	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1152		/* Define a mapping from our tag to the SCB. */
1153		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1154		ahd_pause(ahd);
1155		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1156		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1157		ahd_unpause(ahd);
1158	} else {
1159		ahd_queue_scb(ahd, scb);
1160	}
1161
1162	ahd_unlock(ahd, &s);
1163}
1164
1165static void
1166ahd_poll(struct cam_sim *sim)
1167{
1168	ahd_intr(cam_sim_softc(sim));
1169}
1170
1171static void
1172ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1173	       struct ccb_scsiio *csio, struct scb *scb)
1174{
1175	struct hardware_scb *hscb;
1176	struct ccb_hdr *ccb_h;
1177
1178	hscb = scb->hscb;
1179	ccb_h = &csio->ccb_h;
1180
1181	csio->resid = 0;
1182	csio->sense_resid = 0;
1183	if (ccb_h->func_code == XPT_SCSI_IO) {
1184		hscb->cdb_len = csio->cdb_len;
1185		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1186
1187			if (hscb->cdb_len > MAX_CDB_LEN
1188			 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1189				u_long s;
1190
1191				/*
1192				 * Should CAM start to support CDB sizes
1193				 * greater than 16 bytes, we could use
1194				 * the sense buffer to store the CDB.
1195				 */
1196				ahd_set_transaction_status(scb,
1197							   CAM_REQ_INVALID);
1198				ahd_lock(ahd, &s);
1199				ahd_free_scb(ahd, scb);
1200				ahd_unlock(ahd, &s);
1201				xpt_done((union ccb *)csio);
1202				return;
1203			}
1204			if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1205				hscb->shared_data.idata.cdb_from_host.cdbptr =
1206				   ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1207				hscb->shared_data.idata.cdb_from_host.cdblen =
1208				   csio->cdb_len;
1209				hscb->cdb_len |= SCB_CDB_LEN_PTR;
1210			} else {
1211				memcpy(hscb->shared_data.idata.cdb,
1212				       csio->cdb_io.cdb_ptr,
1213				       hscb->cdb_len);
1214			}
1215		} else {
1216			if (hscb->cdb_len > MAX_CDB_LEN) {
1217				u_long s;
1218
1219				ahd_set_transaction_status(scb,
1220							   CAM_REQ_INVALID);
1221				ahd_lock(ahd, &s);
1222				ahd_free_scb(ahd, scb);
1223				ahd_unlock(ahd, &s);
1224				xpt_done((union ccb *)csio);
1225				return;
1226			}
1227			memcpy(hscb->shared_data.idata.cdb,
1228			       csio->cdb_io.cdb_bytes, hscb->cdb_len);
1229		}
1230	}
1231
1232	/* Only use S/G if there is a transfer */
1233	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1234		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1235			/* We've been given a pointer to a single buffer */
1236			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1237				int s;
1238				int error;
1239
1240				s = splsoftvm();
1241				error = bus_dmamap_load(ahd->buffer_dmat,
1242							scb->dmamap,
1243							csio->data_ptr,
1244							csio->dxfer_len,
1245							ahd_execute_scb,
1246							scb, /*flags*/0);
1247				if (error == EINPROGRESS) {
1248					/*
1249					 * So as to maintain ordering,
1250					 * freeze the controller queue
1251					 * until our mapping is
1252					 * returned.
1253					 */
1254					xpt_freeze_simq(sim,
1255							/*count*/1);
1256					scb->io_ctx->ccb_h.status |=
1257					    CAM_RELEASE_SIMQ;
1258				}
1259				splx(s);
1260			} else {
1261				struct bus_dma_segment seg;
1262
1263				/* Pointer to physical buffer */
1264				if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1265					panic("ahd_setup_data - Transfer size "
1266					      "larger than can device max");
1267
1268				seg.ds_addr =
1269				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1270				seg.ds_len = csio->dxfer_len;
1271				ahd_execute_scb(scb, &seg, 1, 0);
1272			}
1273		} else {
1274			struct bus_dma_segment *segs;
1275
1276			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1277				panic("ahd_setup_data - Physical segment "
1278				      "pointers unsupported");
1279
1280			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1281				panic("ahd_setup_data - Virtual segment "
1282				      "addresses unsupported");
1283
1284			/* Just use the segments provided */
1285			segs = (struct bus_dma_segment *)csio->data_ptr;
1286			ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1287		}
1288	} else {
1289		ahd_execute_scb(scb, NULL, 0, 0);
1290	}
1291}
1292
1293#if NOT_YET
1294static void
1295ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) {
1296
1297	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1298		struct scb *list_scb;
1299
1300		scb->flags |= SCB_RECOVERY_SCB;
1301
1302		/*
1303		 * Take all queued, but not sent SCBs out of the equation.
1304		 * Also ensure that no new CCBs are queued to us while we
1305		 * try to fix this problem.
1306		 */
1307		if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1308			xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1);
1309			scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1310		}
1311
1312		/*
1313		 * Go through all of our pending SCBs and remove
1314		 * any scheduled timeouts for them.  We will reschedule
1315		 * them after we've successfully fixed this problem.
1316		 */
1317		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
1318			union ccb *ccb;
1319
1320			ccb = list_scb->io_ctx;
1321			untimeout(ahd_timeout, list_scb, ccb->ccb_h.timeout_ch);
1322		}
1323	}
1324}
1325#endif
1326
1327void
1328ahd_timeout(void *arg)
1329{
1330	struct	scb	  *scb;
1331	struct	ahd_softc *ahd;
1332	ahd_mode_state	   saved_modes;
1333	long		   s;
1334	int		   target;
1335	int		   lun;
1336	char		   channel;
1337
1338#if NOT_YET
1339	int		   i;
1340	int		   found;
1341	u_int		   last_phase;
1342#endif
1343
1344	scb = (struct scb *)arg;
1345	ahd = (struct ahd_softc *)scb->ahd_softc;
1346
1347	ahd_lock(ahd, &s);
1348
1349	ahd_pause_and_flushwork(ahd);
1350
1351	saved_modes = ahd_save_modes(ahd);
1352#if 0
1353	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1354	ahd_outb(ahd, SCSISIGO, ACKO);
1355	printf("set ACK\n");
1356	ahd_outb(ahd, SCSISIGO, 0);
1357	printf("clearing Ack\n");
1358	ahd_restore_modes(ahd, saved_modes);
1359#endif
1360	if ((scb->flags & SCB_ACTIVE) == 0) {
1361		/* Previous timeout took care of me already */
1362		printf("%s: Timedout SCB already complete. "
1363		       "Interrupts may not be functioning.\n", ahd_name(ahd));
1364		ahd_unpause(ahd);
1365		ahd_unlock(ahd, &s);
1366		return;
1367	}
1368
1369	target = SCB_GET_TARGET(ahd, scb);
1370	channel = SCB_GET_CHANNEL(ahd, scb);
1371	lun = SCB_GET_LUN(scb);
1372
1373	ahd_print_path(ahd, scb);
1374	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
1375	ahd_dump_card_state(ahd);
1376	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
1377			  /*initiate reset*/TRUE);
1378	ahd_unlock(ahd, &s);
1379	return;
1380#if NOT_YET
1381	last_phase = ahd_inb(ahd, LASTPHASE);
1382	if (scb->sg_count > 0) {
1383		for (i = 0; i < scb->sg_count; i++) {
1384			printf("sg[%d] - Addr 0x%x : Length %d\n",
1385			       i,
1386			       ((struct ahd_dma_seg *)scb->sg_list)[i].addr,
1387			       ((struct ahd_dma_seg *)scb->sg_list)[i].len
1388				& AHD_SG_LEN_MASK);
1389		}
1390	}
1391	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1392		/*
1393		 * Been down this road before.
1394		 * Do a full bus reset.
1395		 */
1396bus_reset:
1397		ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1398		found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE);
1399		printf("%s: Issued Channel %c Bus Reset. "
1400		       "%d SCBs aborted\n", ahd_name(ahd), channel, found);
1401	} else {
1402		/*
1403		 * If we are a target, transition to bus free and report
1404		 * the timeout.
1405		 *
1406		 * The target/initiator that is holding up the bus may not
1407		 * be the same as the one that triggered this timeout
1408		 * (different commands have different timeout lengths).
1409		 * If the bus is idle and we are actiing as the initiator
1410		 * for this request, queue a BDR message to the timed out
1411		 * target.  Otherwise, if the timed out transaction is
1412		 * active:
1413		 *   Initiator transaction:
1414		 *	Stuff the message buffer with a BDR message and assert
1415		 *	ATN in the hopes that the target will let go of the bus
1416		 *	and go to the mesgout phase.  If this fails, we'll
1417		 *	get another timeout 2 seconds later which will attempt
1418		 *	a bus reset.
1419		 *
1420		 *   Target transaction:
1421		 *	Transition to BUS FREE and report the error.
1422		 *	It's good to be the target!
1423		 */
1424		u_int active_scb_index;
1425		u_int saved_scbptr;
1426
1427		saved_scbptr = ahd_get_scbptr(ahd);
1428		active_scb_index = saved_scbptr;
1429
1430		if (last_phase != P_BUSFREE
1431		  && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
1432		  && (active_scb_index < ahd->scb_data.numscbs)) {
1433			struct scb *active_scb;
1434
1435			/*
1436			 * If the active SCB is not us, assume that
1437			 * the active SCB has a longer timeout than
1438			 * the timedout SCB, and wait for the active
1439			 * SCB to timeout.
1440			 */
1441			active_scb = ahd_lookup_scb(ahd, active_scb_index);
1442			if (active_scb != scb) {
1443				struct	 ccb_hdr *ccbh;
1444				uint64_t newtimeout;
1445
1446				ahd_print_path(ahd, scb);
1447				printf("Other SCB Timeout%s",
1448			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1449				       ? " again\n" : "\n");
1450				scb->flags |= SCB_OTHERTCL_TIMEOUT;
1451				newtimeout =
1452				    MAX(active_scb->io_ctx->ccb_h.timeout,
1453					scb->io_ctx->ccb_h.timeout);
1454				newtimeout *= hz;
1455				newtimeout /= 1000;
1456				ccbh = &scb->io_ctx->ccb_h;
1457				scb->io_ctx->ccb_h.timeout_ch =
1458				    timeout(ahd_timeout, scb, newtimeout);
1459				ahd_unpause(ahd);
1460				ahd_unlock(ahd, &s);
1461				return;
1462			}
1463
1464			/* It's us */
1465			if ((scb->hscb->control & TARGET_SCB) != 0) {
1466
1467				/*
1468				 * Send back any queued up transactions
1469				 * and properly record the error condition.
1470				 */
1471				ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1472					       SCB_GET_CHANNEL(ahd, scb),
1473					       SCB_GET_LUN(scb),
1474					       SCB_GET_TAG(scb),
1475					       ROLE_TARGET,
1476					       CAM_CMD_TIMEOUT);
1477
1478				/* Will clear us from the bus */
1479				ahd_restart(ahd);
1480				ahd_unlock(ahd, &s);
1481				return;
1482			}
1483
1484			ahd_set_recoveryscb(ahd, active_scb);
1485			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1486			ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1487			ahd_print_path(ahd, active_scb);
1488			printf("BDR message in message buffer\n");
1489			active_scb->flags |= SCB_DEVICE_RESET;
1490			active_scb->io_ctx->ccb_h.timeout_ch =
1491			    timeout(ahd_timeout, (caddr_t)active_scb, 2 * hz);
1492			ahd_unpause(ahd);
1493		} else {
1494			int	 disconnected;
1495
1496			/* XXX Shouldn't panic.  Just punt instead? */
1497			if ((scb->hscb->control & TARGET_SCB) != 0)
1498				panic("Timed-out target SCB but bus idle");
1499
1500			if (last_phase != P_BUSFREE
1501			 && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) {
1502				/* XXX What happened to the SCB? */
1503				/* Hung target selection.  Goto busfree */
1504				printf("%s: Hung target selection\n",
1505				       ahd_name(ahd));
1506				ahd_restart(ahd);
1507				ahd_unlock(ahd, &s);
1508				return;
1509			}
1510
1511			if (ahd_search_qinfifo(ahd, target, channel, lun,
1512					       SCB_GET_TAG(scb), ROLE_INITIATOR,
1513					       /*status*/0, SEARCH_COUNT) > 0) {
1514				disconnected = FALSE;
1515			} else {
1516				disconnected = TRUE;
1517			}
1518
1519			if (disconnected) {
1520
1521				ahd_set_recoveryscb(ahd, scb);
1522				/*
1523				 * Actually re-queue this SCB in an attempt
1524				 * to select the device before it reconnects.
1525				 * In either case (selection or reselection),
1526				 * we will now issue a target reset to the
1527				 * timed-out device.
1528				 *
1529				 * Set the MK_MESSAGE control bit indicating
1530				 * that we desire to send a message.  We
1531				 * also set the disconnected flag since
1532				 * in the paging case there is no guarantee
1533				 * that our SCB control byte matches the
1534				 * version on the card.  We don't want the
1535				 * sequencer to abort the command thinking
1536				 * an unsolicited reselection occurred.
1537				 */
1538				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1539				scb->flags |= SCB_DEVICE_RESET;
1540
1541				/*
1542				 * The sequencer will never re-reference the
1543				 * in-core SCB.  To make sure we are notified
1544				 * during reslection, set the MK_MESSAGE flag
1545				 * in the card's copy of the SCB.
1546				 */
1547				ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1548				ahd_outb(ahd, SCB_CONTROL,
1549					 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1550
1551				/*
1552				 * Clear out any entries in the QINFIFO first
1553				 * so we are the next SCB for this target
1554				 * to run.
1555				 */
1556				ahd_search_qinfifo(ahd,
1557						   SCB_GET_TARGET(ahd, scb),
1558						   channel, SCB_GET_LUN(scb),
1559						   SCB_LIST_NULL,
1560						   ROLE_INITIATOR,
1561						   CAM_REQUEUE_REQ,
1562						   SEARCH_COMPLETE);
1563				ahd_print_path(ahd, scb);
1564				printf("Queuing a BDR SCB\n");
1565				ahd_qinfifo_requeue_tail(ahd, scb);
1566				ahd_set_scbptr(ahd, saved_scbptr);
1567				scb->io_ctx->ccb_h.timeout_ch =
1568				    timeout(ahd_timeout, (caddr_t)scb, 2 * hz);
1569				ahd_unpause(ahd);
1570			} else {
1571				/* Go "immediatly" to the bus reset */
1572				/* This shouldn't happen */
1573				ahd_set_recoveryscb(ahd, scb);
1574				ahd_print_path(ahd, scb);
1575				printf("SCB %d: Immediate reset.  "
1576					"Flags = 0x%x\n", SCB_GET_TAG(scb),
1577					scb->flags);
1578				goto bus_reset;
1579			}
1580		}
1581	}
1582	ahd_unlock(ahd, &s);
1583#endif
1584}
1585
1586static void
1587ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1588{
1589	union ccb *abort_ccb;
1590
1591	abort_ccb = ccb->cab.abort_ccb;
1592	switch (abort_ccb->ccb_h.func_code) {
1593#ifdef AHD_TARGET_MODE
1594	case XPT_ACCEPT_TARGET_IO:
1595	case XPT_IMMED_NOTIFY:
1596	case XPT_CONT_TARGET_IO:
1597	{
1598		struct ahd_tmode_tstate *tstate;
1599		struct ahd_tmode_lstate *lstate;
1600		struct ccb_hdr_slist *list;
1601		cam_status status;
1602
1603		status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1604					     &lstate, TRUE);
1605
1606		if (status != CAM_REQ_CMP) {
1607			ccb->ccb_h.status = status;
1608			break;
1609		}
1610
1611		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1612			list = &lstate->accept_tios;
1613		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1614			list = &lstate->immed_notifies;
1615		else
1616			list = NULL;
1617
1618		if (list != NULL) {
1619			struct ccb_hdr *curelm;
1620			int found;
1621
1622			curelm = SLIST_FIRST(list);
1623			found = 0;
1624			if (curelm == &abort_ccb->ccb_h) {
1625				found = 1;
1626				SLIST_REMOVE_HEAD(list, sim_links.sle);
1627			} else {
1628				while(curelm != NULL) {
1629					struct ccb_hdr *nextelm;
1630
1631					nextelm =
1632					    SLIST_NEXT(curelm, sim_links.sle);
1633
1634					if (nextelm == &abort_ccb->ccb_h) {
1635						found = 1;
1636						SLIST_NEXT(curelm,
1637							   sim_links.sle) =
1638						    SLIST_NEXT(nextelm,
1639							       sim_links.sle);
1640						break;
1641					}
1642					curelm = nextelm;
1643				}
1644			}
1645
1646			if (found) {
1647				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1648				xpt_done(abort_ccb);
1649				ccb->ccb_h.status = CAM_REQ_CMP;
1650			} else {
1651				xpt_print_path(abort_ccb->ccb_h.path);
1652				printf("Not found\n");
1653				ccb->ccb_h.status = CAM_PATH_INVALID;
1654			}
1655			break;
1656		}
1657		/* FALLTHROUGH */
1658	}
1659#endif
1660	case XPT_SCSI_IO:
1661		/* XXX Fully implement the hard ones */
1662		ccb->ccb_h.status = CAM_UA_ABORT;
1663		break;
1664	default:
1665		ccb->ccb_h.status = CAM_REQ_INVALID;
1666		break;
1667	}
1668	xpt_done(ccb);
1669}
1670
1671void
1672ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1673		u_int lun, ac_code code, void *opt_arg)
1674{
1675	struct	ccb_trans_settings cts;
1676	struct cam_path *path;
1677	void *arg;
1678	int error;
1679
1680	arg = NULL;
1681	error = ahd_create_path(ahd, channel, target, lun, &path);
1682
1683	if (error != CAM_REQ_CMP)
1684		return;
1685
1686	switch (code) {
1687	case AC_TRANSFER_NEG:
1688	{
1689#ifdef AHD_NEW_TRAN_SETTINGS
1690		struct	ccb_trans_settings_scsi *scsi;
1691
1692		cts.type = CTS_TYPE_CURRENT_SETTINGS;
1693		scsi = &cts.proto_specific.scsi;
1694#else
1695		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1696#endif
1697		cts.ccb_h.path = path;
1698		cts.ccb_h.target_id = target;
1699		cts.ccb_h.target_lun = lun;
1700		ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1701		arg = &cts;
1702#ifdef AHD_NEW_TRAN_SETTINGS
1703		scsi->valid &= ~CTS_SCSI_VALID_TQ;
1704		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1705#else
1706		cts.valid &= ~CCB_TRANS_TQ_VALID;
1707		cts.flags &= ~CCB_TRANS_TAG_ENB;
1708#endif
1709		if (opt_arg == NULL)
1710			break;
1711		if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1712#ifdef AHD_NEW_TRAN_SETTINGS
1713			scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1714		scsi->valid |= CTS_SCSI_VALID_TQ;
1715#else
1716			cts.flags |= CCB_TRANS_TAG_ENB;
1717		cts.valid |= CCB_TRANS_TQ_VALID;
1718#endif
1719		break;
1720	}
1721	case AC_SENT_BDR:
1722	case AC_BUS_RESET:
1723		break;
1724	default:
1725		panic("ahd_send_async: Unexpected async event");
1726	}
1727	xpt_async(code, path, arg);
1728	xpt_free_path(path);
1729}
1730
1731void
1732ahd_platform_set_tags(struct ahd_softc *ahd,
1733		      struct ahd_devinfo *devinfo, int enable)
1734{
1735}
1736
1737int
1738ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1739{
1740	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1741	    M_NOWAIT | M_ZERO);
1742	if (ahd->platform_data == NULL)
1743		return (ENOMEM);
1744	return (0);
1745}
1746
1747void
1748ahd_platform_free(struct ahd_softc *ahd)
1749{
1750	struct ahd_platform_data *pdata;
1751
1752	pdata = ahd->platform_data;
1753	if (pdata != NULL) {
1754		if (pdata->regs[0] != NULL)
1755			bus_release_resource(ahd->dev_softc,
1756					     pdata->regs_res_type[0],
1757					     pdata->regs_res_id[0],
1758					     pdata->regs[0]);
1759
1760		if (pdata->regs[1] != NULL)
1761			bus_release_resource(ahd->dev_softc,
1762					     pdata->regs_res_type[1],
1763					     pdata->regs_res_id[1],
1764					     pdata->regs[1]);
1765
1766		if (pdata->irq != NULL)
1767			bus_release_resource(ahd->dev_softc,
1768					     pdata->irq_res_type,
1769					     0, pdata->irq);
1770
1771		if (pdata->sim_b != NULL) {
1772			xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1773			xpt_free_path(pdata->path_b);
1774			xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1775			cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1776		}
1777		if (pdata->sim != NULL) {
1778			xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1779			xpt_free_path(pdata->path);
1780			xpt_bus_deregister(cam_sim_path(pdata->sim));
1781			cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1782		}
1783		if (pdata->eh != NULL)
1784			EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1785		free(ahd->platform_data, M_DEVBUF);
1786	}
1787}
1788
1789int
1790ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1791{
1792	/* We don't sort softcs under FreeBSD so report equal always */
1793	return (0);
1794}
1795
1796int
1797ahd_detach(device_t dev)
1798{
1799	struct ahd_softc *ahd;
1800	u_long l;
1801	u_long s;
1802
1803	ahd_list_lock(&l);
1804	device_printf(dev, "detaching device\n");
1805	ahd = device_get_softc(dev);
1806	ahd = ahd_find_softc(ahd);
1807	if (ahd == NULL) {
1808		device_printf(dev, "aic7xxx already detached\n");
1809		ahd_list_unlock(&l);
1810		return (ENOENT);
1811	}
1812	ahd_lock(ahd, &s);
1813	ahd_intr_enable(ahd, FALSE);
1814	bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1815	ahd_unlock(ahd, &s);
1816	ahd_free(ahd);
1817	ahd_list_unlock(&l);
1818	return (0);
1819}
1820
1821#if UNUSED
1822static void
1823ahd_dump_targcmd(struct target_cmd *cmd)
1824{
1825	uint8_t *byte;
1826	uint8_t *last_byte;
1827	int i;
1828
1829	byte = &cmd->initiator_channel;
1830	/* Debugging info for received commands */
1831	last_byte = &cmd[1].initiator_channel;
1832
1833	i = 0;
1834	while (byte < last_byte) {
1835		if (i == 0)
1836			printf("\t");
1837		printf("%#x", *byte++);
1838		i++;
1839		if (i == 8) {
1840			printf("\n");
1841			i = 0;
1842		} else {
1843			printf(", ");
1844		}
1845	}
1846}
1847#endif
1848
1849static int
1850ahd_modevent(module_t mod, int type, void *data)
1851{
1852	/* XXX Deal with busy status on unload. */
1853	return 0;
1854}
1855
1856static moduledata_t ahd_mod = {
1857	"ahd",
1858	ahd_modevent,
1859	NULL
1860};
1861
1862/********************************** DDB Hooks *********************************/
1863#ifdef DDB
1864static struct ahd_softc *ahd_ddb_softc;
1865static int ahd_ddb_paused;
1866static int ahd_ddb_paused_on_entry;
1867DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit)
1868{
1869	struct ahd_softc *list_ahd;
1870
1871	ahd_ddb_softc = NULL;
1872	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1873		if (list_ahd->unit == addr)
1874			ahd_ddb_softc = list_ahd;
1875	}
1876	if (ahd_ddb_softc == NULL)
1877		db_error("No matching softc found!\n");
1878}
1879
1880DB_COMMAND(ahd_pause, ahd_ddb_pause)
1881{
1882	if (ahd_ddb_softc == NULL) {
1883		db_error("Must set unit with ahd_set_unit first!\n");
1884		return;
1885	}
1886	if (ahd_ddb_paused == 0) {
1887		ahd_ddb_paused++;
1888		if (ahd_is_paused(ahd_ddb_softc)) {
1889			ahd_ddb_paused_on_entry++;
1890			return;
1891		}
1892		ahd_pause(ahd_ddb_softc);
1893	}
1894}
1895
1896DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1897{
1898	if (ahd_ddb_softc == NULL) {
1899		db_error("Must set unit with ahd_set_unit first!\n");
1900		return;
1901	}
1902	if (ahd_ddb_paused != 0) {
1903		ahd_ddb_paused = 0;
1904		if (ahd_ddb_paused_on_entry)
1905			return;
1906		ahd_unpause(ahd_ddb_softc);
1907	} else if (ahd_ddb_paused_on_entry != 0) {
1908		/* Two unpauses to clear a paused on entry. */
1909		ahd_ddb_paused_on_entry = 0;
1910		ahd_unpause(ahd_ddb_softc);
1911	}
1912}
1913
1914DB_COMMAND(ahd_in, ahd_ddb_in)
1915{
1916	int c;
1917	int size;
1918
1919	if (ahd_ddb_softc == NULL) {
1920		db_error("Must set unit with ahd_set_unit first!\n");
1921		return;
1922	}
1923	if (have_addr == 0)
1924		return;
1925
1926	size = 1;
1927	while ((c = *modif++) != '\0') {
1928		switch (c) {
1929		case 'b':
1930			size = 1;
1931			break;
1932		case 'w':
1933			size = 2;
1934			break;
1935		case 'l':
1936			size = 4;
1937		break;
1938		}
1939	}
1940
1941	if (count <= 0)
1942		count = 1;
1943	while (--count >= 0) {
1944		db_printf("%04lx (M)%x: \t", (u_long)addr,
1945			  ahd_inb(ahd_ddb_softc, MODE_PTR));
1946		switch (size) {
1947		case 1:
1948			db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1949			break;
1950		case 2:
1951			db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1952			break;
1953		case 4:
1954			db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1955			break;
1956		}
1957	}
1958}
1959
1960DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL)
1961{
1962	db_expr_t old_value;
1963	db_expr_t new_value;
1964	int	  size;
1965
1966	if (ahd_ddb_softc == NULL) {
1967		db_error("Must set unit with ahd_set_unit first!\n");
1968		return;
1969	}
1970
1971	switch (modif[0]) {
1972	case '\0':
1973	case 'b':
1974		size = 1;
1975		break;
1976	case 'h':
1977		size = 2;
1978		break;
1979	case 'l':
1980		size = 4;
1981		break;
1982	default:
1983		db_error("Unknown size\n");
1984		return;
1985	}
1986
1987	while (db_expression(&new_value)) {
1988		switch (size) {
1989		default:
1990		case 1:
1991			old_value = ahd_inb(ahd_ddb_softc, addr);
1992			ahd_outb(ahd_ddb_softc, addr, new_value);
1993			break;
1994		case 2:
1995			old_value = ahd_inw(ahd_ddb_softc, addr);
1996			ahd_outw(ahd_ddb_softc, addr, new_value);
1997			break;
1998		case 4:
1999			old_value = ahd_inl(ahd_ddb_softc, addr);
2000			ahd_outl(ahd_ddb_softc, addr, new_value);
2001			break;
2002		}
2003		db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx",
2004			  (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
2005			  (u_long)old_value, (u_long)new_value);
2006		addr += size;
2007	}
2008	db_skip_to_eol();
2009}
2010
2011#endif
2012
2013
2014DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
2015MODULE_DEPEND(ahd, cam, 1, 1, 1);
2016MODULE_VERSION(ahd, 1);
2017