aic7xxx.c revision 71473
1/*
2 * Core routines and tables shareable across OS platforms.
3 *
4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $Id: //depot/src/aic7xxx/aic7xxx.c#26 $
32 *
33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 71473 2001-01-23 22:17:03Z gibbs $
34 */
35
36#ifdef	__linux__
37#include "aic7xxx_linux.h"
38#include "aic7xxx_inline.h"
39#include "aicasm/aicasm_insformat.h"
40#endif
41
42#ifdef	__FreeBSD__
43#include <dev/aic7xxx/aic7xxx_freebsd.h>
44#include <dev/aic7xxx/aic7xxx_inline.h>
45#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
46#endif
47
48/****************************** Softc Data ************************************/
49struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
50
51/***************************** Lookup Tables **********************************/
52char *ahc_chip_names[] =
53{
54	"NONE",
55	"aic7770",
56	"aic7850",
57	"aic7855",
58	"aic7859",
59	"aic7860",
60	"aic7870",
61	"aic7880",
62	"aic7895",
63	"aic7895C",
64	"aic7890/91",
65	"aic7896/97",
66	"aic7892",
67	"aic7899"
68};
69const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
70
71struct hard_error_entry hard_error[] = {
72	{ ILLHADDR,	"Illegal Host Access" },
73	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
74	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
75	{ SQPARERR,	"Sequencer Parity Error" },
76	{ DPARERR,	"Data-path Parity Error" },
77	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
78	{ PCIERRSTAT,	"PCI Error detected" },
79	{ CIOPARERR,	"CIOBUS Parity Error" },
80};
81const u_int num_errors = NUM_ELEMENTS(hard_error);
82
83struct phase_table_entry phase_table[] =
84{
85	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
86	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
87	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
88	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
89	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
90	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
91	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
92	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
93	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
94	{ 0,		MSG_NOOP,		"in unknown phase"	}
95};
96
97/*
98 * In most cases we only wish to itterate over real phases, so
99 * exclude the last element from the count.
100 */
101const u_int num_phases = NUM_ELEMENTS(phase_table) - 1;
102
103/*
104 * Valid SCSIRATE values.  (p. 3-17)
105 * Provides a mapping of tranfer periods in ns to the proper value to
106 * stick in the scsixfer reg.
107 */
108struct ahc_syncrate ahc_syncrates[] =
109{
110      /* ultra2    fast/ultra  period     rate */
111	{ 0x42,      0x000,      9,      "80.0" },
112	{ 0x03,      0x000,     10,      "40.0" },
113	{ 0x04,      0x000,     11,      "33.0" },
114	{ 0x05,      0x100,     12,      "20.0" },
115	{ 0x06,      0x110,     15,      "16.0" },
116	{ 0x07,      0x120,     18,      "13.4" },
117	{ 0x08,      0x000,     25,      "10.0" },
118	{ 0x19,      0x010,     31,      "8.0"  },
119	{ 0x1a,      0x020,     37,      "6.67" },
120	{ 0x1b,      0x030,     43,      "5.7"  },
121	{ 0x1c,      0x040,     50,      "5.0"  },
122	{ 0x00,      0x050,     56,      "4.4"  },
123	{ 0x00,      0x060,     62,      "4.0"  },
124	{ 0x00,      0x070,     68,      "3.6"  },
125	{ 0x00,      0x000,      0,      NULL   }
126};
127
128/* Our Sequencer Program */
129#include "aic7xxx_seq.h"
130
131/**************************** Function Declarations ***************************/
132static struct tmode_tstate*
133			ahc_alloc_tstate(struct ahc_softc *ahc,
134					 u_int scsi_id, char channel);
135#ifdef AHC_TARGET_MODE
136static void		ahc_free_tstate(struct ahc_softc *ahc,
137					u_int scsi_id, char channel, int force);
138#endif
139static struct ahc_syncrate*
140			ahc_devlimited_syncrate(struct ahc_softc *ahc,
141					        struct ahc_initiator_tinfo *,
142						u_int *period,
143						u_int *ppr_options,
144						role_t role);
145static void		ahc_update_pending_syncrates(struct ahc_softc *ahc);
146static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
147					  struct ahc_devinfo *devinfo);
148static void		ahc_scb_devinfo(struct ahc_softc *ahc,
149					struct ahc_devinfo *devinfo,
150					struct scb *scb);
151static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
152						   struct ahc_devinfo *devinfo,
153						   struct scb *scb);
154static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
155					       struct ahc_devinfo *devinfo);
156static void		ahc_construct_sdtr(struct ahc_softc *ahc,
157					   struct ahc_devinfo *devinfo,
158					   u_int period, u_int offset);
159static void		ahc_construct_wdtr(struct ahc_softc *ahc,
160					   struct ahc_devinfo *devinfo,
161					   u_int bus_width);
162static void		ahc_construct_ppr(struct ahc_softc *ahc,
163					  struct ahc_devinfo *devinfo,
164					  u_int period, u_int offset,
165					  u_int bus_width, u_int ppr_options);
166static void		ahc_clear_msg_state(struct ahc_softc *ahc);
167static void		ahc_handle_message_phase(struct ahc_softc *ahc);
168typedef enum {
169	AHCMSG_1B,
170	AHCMSG_2B,
171	AHCMSG_EXT
172} ahc_msgtype;
173static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
174				     u_int msgval, int full);
175static int		ahc_parse_msg(struct ahc_softc *ahc,
176				      struct ahc_devinfo *devinfo);
177static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
178					      struct ahc_devinfo *devinfo);
179static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
180						struct ahc_devinfo *devinfo);
181static void		ahc_handle_devreset(struct ahc_softc *ahc,
182					    struct ahc_devinfo *devinfo,
183					    cam_status status, char *message,
184					    int verbose_level);
185
186static bus_dmamap_callback_t	ahc_dmamap_cb;
187static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
188static int			ahc_init_scbdata(struct ahc_softc *ahc);
189static void			ahc_fini_scbdata(struct ahc_softc *ahc);
190static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
191					    struct scb *prev_scb,
192					    struct scb *scb);
193static int		ahc_qinfifo_count(struct ahc_softc *ahc);
194static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
195						   u_int prev, u_int scbptr);
196static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
197static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
198				     u_int scbpos, u_int prev);
199static int		ahc_abort_scbs(struct ahc_softc *ahc, int target,
200				       char channel, int lun, u_int tag,
201				       role_t role, uint32_t status);
202static void		ahc_reset_current_bus(struct ahc_softc *ahc);
203static void		ahc_calc_residual(struct scb *scb);
204#ifdef AHC_DUMP_SEQ
205static void		ahc_dumpseq(struct ahc_softc *ahc);
206#endif
207static void		ahc_loadseq(struct ahc_softc *ahc);
208static int		ahc_check_patch(struct ahc_softc *ahc,
209					struct patch **start_patch,
210					u_int start_instr, u_int *skip_addr);
211static void		ahc_download_instr(struct ahc_softc *ahc,
212					   u_int instrptr, uint8_t *dconsts);
213#ifdef AHC_TARGET_MODE
214static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
215					       struct tmode_lstate *lstate,
216					       u_int initiator_id,
217					       u_int event_type,
218					       u_int event_arg);
219static void		ahc_update_scsiid(struct ahc_softc *ahc,
220					  u_int targid_mask);
221static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
222					      struct target_cmd *cmd);
223#endif
224/************************* Sequencer Execution Control ************************/
225/*
226 * Restart the sequencer program from address zero
227 */
228void
229restart_sequencer(struct ahc_softc *ahc)
230{
231
232	pause_sequencer(ahc);
233	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
234	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
235	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
236
237	/*
238	 * Ensure that the sequencer's idea of TQINPOS
239	 * matches our own.  The sequencer increments TQINPOS
240	 * only after it sees a DMA complete and a reset could
241	 * occur before the increment leaving the kernel to believe
242	 * the command arrived but the sequencer to not.
243	 */
244	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
245
246	/* Always allow reselection */
247	ahc_outb(ahc, SCSISEQ,
248		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
249	if ((ahc->features & AHC_CMD_CHAN) != 0) {
250		/* Ensure that no DMA operations are in progress */
251		ahc_outb(ahc, CCSCBCNT, 0);
252		ahc_outb(ahc, CCSGCTL, 0);
253		ahc_outb(ahc, CCSCBCTL, 0);
254	}
255	ahc_outb(ahc, MWI_RESIDUAL, 0);
256	ahc_outb(ahc, SEQCTL, FASTMODE);
257	ahc_outb(ahc, SEQADDR0, 0);
258	ahc_outb(ahc, SEQADDR1, 0);
259	unpause_sequencer(ahc);
260}
261
262/************************* Input/Output Queues ********************************/
263void
264ahc_run_qoutfifo(struct ahc_softc *ahc)
265{
266	struct scb *scb;
267	u_int  scb_index;
268
269	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
270
271		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
272		if ((ahc->qoutfifonext & 0x03) == 0x03) {
273			u_int modnext;
274
275			/*
276			 * Clear 32bits of QOUTFIFO at a time
277			 * so that we don't clobber an incomming
278			 * byte DMA to the array on architectures
279			 * that only support 32bit load and store
280			 * operations.
281			 */
282			modnext = ahc->qoutfifonext & ~0x3;
283			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
284		}
285		ahc->qoutfifonext++;
286
287		scb = ahc_lookup_scb(ahc, scb_index);
288		if (scb == NULL) {
289			printf("%s: WARNING no command for scb %d "
290			       "(cmdcmplt)\nQOUTPOS = %d\n",
291			       ahc_name(ahc), scb_index,
292			       ahc->qoutfifonext - 1);
293			continue;
294		}
295
296		/*
297		 * Save off the residual
298		 * if there is one.
299		 */
300		if (ahc_check_residual(scb) != 0)
301			ahc_calc_residual(scb);
302		else
303			ahc_set_residual(scb, 0);
304		ahc_done(ahc, scb);
305	}
306}
307
308void
309ahc_run_untagged_queues(struct ahc_softc *ahc)
310{
311	int i;
312
313	for (i = 0; i < 16; i++)
314		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
315}
316
317void
318ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
319{
320	struct scb *scb;
321
322	if (ahc->untagged_queue_lock != 0)
323		return;
324
325	if ((scb = TAILQ_FIRST(queue)) != NULL
326	 && (scb->flags & SCB_ACTIVE) == 0) {
327		scb->flags |= SCB_ACTIVE;
328		ahc_queue_scb(ahc, scb);
329	}
330}
331
332/************************* Interrupt Handling *********************************/
333void
334ahc_handle_brkadrint(struct ahc_softc *ahc)
335{
336	/*
337	 * We upset the sequencer :-(
338	 * Lookup the error message
339	 */
340	int i, error, num_errors;
341
342	error = ahc_inb(ahc, ERROR);
343	num_errors =  sizeof(hard_error)/sizeof(hard_error[0]);
344	for (i = 0; error != 1 && i < num_errors; i++)
345		error >>= 1;
346	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
347	       ahc_name(ahc), hard_error[i].errmesg,
348	       ahc_inb(ahc, SEQADDR0) |
349	       (ahc_inb(ahc, SEQADDR1) << 8));
350
351	ahc_dump_card_state(ahc);
352
353	/* Tell everyone that this HBA is no longer availible */
354	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
355		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
356		       CAM_NO_HBA);
357
358	/* Disable all interrupt sources by resetting the controller */
359	ahc_shutdown(ahc);
360}
361
362void
363ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
364{
365	struct scb *scb;
366	struct ahc_devinfo devinfo;
367
368	ahc_fetch_devinfo(ahc, &devinfo);
369
370	/*
371	 * Clear the upper byte that holds SEQINT status
372	 * codes and clear the SEQINT bit. We will unpause
373	 * the sequencer, if appropriate, after servicing
374	 * the request.
375	 */
376	ahc_outb(ahc, CLRINT, CLRSEQINT);
377	switch (intstat & SEQINT_MASK) {
378	case BAD_STATUS:
379	{
380		u_int  scb_index;
381		struct hardware_scb *hscb;
382
383		/*
384		 * Set the default return value to 0 (don't
385		 * send sense).  The sense code will change
386		 * this if needed.
387		 */
388		ahc_outb(ahc, RETURN_1, 0);
389
390		/*
391		 * The sequencer will notify us when a command
392		 * has an error that would be of interest to
393		 * the kernel.  This allows us to leave the sequencer
394		 * running in the common case of command completes
395		 * without error.  The sequencer will already have
396		 * dma'd the SCB back up to us, so we can reference
397		 * the in kernel copy directly.
398		 */
399		scb_index = ahc_inb(ahc, SCB_TAG);
400		scb = ahc_lookup_scb(ahc, scb_index);
401		if (scb == NULL) {
402			printf("%s:%c:%d: ahc_intr - referenced scb "
403			       "not valid during seqint 0x%x scb(%d)\n",
404			       ahc_name(ahc), devinfo.channel,
405			       devinfo.target, intstat, scb_index);
406			ahc_dump_card_state(ahc);
407			panic("for safety");
408			goto unpause;
409		}
410
411		hscb = scb->hscb;
412
413		/* Don't want to clobber the original sense code */
414		if ((scb->flags & SCB_SENSE) != 0) {
415			/*
416			 * Clear the SCB_SENSE Flag and have
417			 * the sequencer do a normal command
418			 * complete.
419			 */
420			scb->flags &= ~SCB_SENSE;
421			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
422			break;
423		}
424		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
425		/* Freeze the queue until the client sees the error. */
426		ahc_freeze_devq(ahc, scb);
427		ahc_freeze_scb(scb);
428		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
429		switch (hscb->shared_data.status.scsi_status) {
430		case SCSI_STATUS_OK:
431			printf("%s: Interrupted for staus of 0???\n",
432			       ahc_name(ahc));
433			break;
434		case SCSI_STATUS_CMD_TERMINATED:
435		case SCSI_STATUS_CHECK_COND:
436#ifdef AHC_DEBUG
437			if (ahc_debug & AHC_SHOWSENSE) {
438				ahc_print_path(ahc, scb);
439				printf("SCB %d: requests Check Status\n",
440				       scb->hscb->tag);
441			}
442#endif
443
444			if (ahc_perform_autosense(scb)) {
445				struct ahc_dma_seg *sg;
446				struct scsi_sense *sc;
447				struct ahc_initiator_tinfo *targ_info;
448				struct tmode_tstate *tstate;
449				struct ahc_transinfo *tinfo;
450
451				targ_info =
452				    ahc_fetch_transinfo(ahc,
453							devinfo.channel,
454							devinfo.our_scsiid,
455							devinfo.target,
456							&tstate);
457				tinfo = &targ_info->current;
458				sg = scb->sg_list;
459				sc = (struct scsi_sense *)
460				     (&hscb->shared_data.cdb);
461				/*
462				 * Save off the residual if there is one.
463				 */
464				if (ahc_check_residual(scb))
465					ahc_calc_residual(scb);
466				else
467					ahc_set_residual(scb, 0);
468#ifdef AHC_DEBUG
469				if (ahc_debug & AHC_SHOWSENSE) {
470					ahc_print_path(ahc, scb);
471					printf("Sending Sense\n");
472				}
473#endif
474				sg->addr = ahc_get_sense_bufaddr(ahc, scb);
475				sg->len = ahc_get_sense_bufsize(ahc, scb);
476				sg->len |= AHC_DMA_LAST_SEG;
477
478				/* Fixup byte order */
479				sg->addr = ahc_htole32(sg->addr);
480				sg->len = ahc_htole32(sg->len);
481
482				sc->opcode = REQUEST_SENSE;
483				sc->byte2 = 0;
484				if (tinfo->protocol_version <= SCSI_REV_2
485				 && SCB_GET_LUN(scb) < 8)
486					sc->byte2 = SCB_GET_LUN(scb) << 5;
487				sc->unused[0] = 0;
488				sc->unused[1] = 0;
489				sc->length = sg->len;
490				sc->control = 0;
491
492				/*
493				 * XXX Still true???
494				 * Would be nice to preserve DISCENB here,
495				 * but due to the way we manage busy targets,
496				 * we can't.
497				 */
498				hscb->control = 0;
499
500				/*
501				 * This request sense could be because the
502				 * the device lost power or in some other
503				 * way has lost our transfer negotiations.
504				 * Renegotiate if appropriate.  Unit attention
505				 * errors will be reported before any data
506				 * phases occur.
507				 */
508				if (ahc_get_residual(scb)
509				 == ahc_get_transfer_length(scb)) {
510					ahc_update_target_msg_request(ahc,
511							      &devinfo,
512							      targ_info,
513							      /*force*/TRUE,
514							      /*paused*/TRUE);
515				}
516				hscb->cdb_len = sizeof(*sc);
517				hscb->dataptr = sg->addr;
518				hscb->datacnt = sg->len;
519				hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
520				hscb->sgptr = ahc_htole32(hscb->sgptr);
521				scb->sg_count = 1;
522				scb->flags |= SCB_SENSE;
523				ahc_qinfifo_requeue_tail(ahc, scb);
524				ahc_outb(ahc, RETURN_1, SEND_SENSE);
525#ifdef __FreeBSD__
526				/*
527				 * Ensure we have enough time to actually
528				 * retrieve the sense.
529				 */
530				untimeout(ahc_timeout, (caddr_t)scb,
531					  scb->io_ctx->ccb_h.timeout_ch);
532				scb->io_ctx->ccb_h.timeout_ch =
533				    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
534#endif
535			}
536			break;
537		default:
538			break;
539		}
540		break;
541	}
542	case NO_MATCH:
543	{
544		/* Ensure we don't leave the selection hardware on */
545		ahc_outb(ahc, SCSISEQ,
546			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
547
548		printf("%s:%c:%d: no active SCB for reconnecting "
549		       "target - issuing BUS DEVICE RESET\n",
550		       ahc_name(ahc), devinfo.channel, devinfo.target);
551		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
552		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
553		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
554		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
555		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
556		       "SINDEX == 0x%x\n",
557		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
558		       ahc_index_busy_tcl(ahc,
559			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
560				      ahc_inb(ahc, SAVED_LUN))),
561		       ahc_inb(ahc, SINDEX));
562		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
563		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
564		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
565		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
566		       ahc_inb(ahc, SCB_CONTROL));
567		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
568		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
569		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
570		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
571		ahc_dump_card_state(ahc);
572		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
573		ahc->msgout_len = 1;
574		ahc->msgout_index = 0;
575		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
576		ahc_outb(ahc, MSG_OUT, HOST_MSG);
577		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
578		break;
579	}
580	case SEND_REJECT:
581	{
582		u_int rejbyte = ahc_inb(ahc, ACCUM);
583		printf("%s:%c:%d: Warning - unknown message received from "
584		       "target (0x%x).  Rejecting\n",
585		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
586		break;
587	}
588	case NO_IDENT:
589	{
590		/*
591		 * The reconnecting target either did not send an identify
592		 * message, or did, but we didn't find an SCB to match and
593		 * before it could respond to our ATN/abort, it hit a dataphase.
594		 * The only safe thing to do is to blow it away with a bus
595		 * reset.
596		 */
597		int found;
598
599		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
600		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
601		       ahc_name(ahc), devinfo.channel, devinfo.target,
602		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
603		found = ahc_reset_channel(ahc, devinfo.channel,
604					  /*initiate reset*/TRUE);
605		printf("%s: Issued Channel %c Bus Reset. "
606		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
607		       found);
608		return;
609	}
610	case IGN_WIDE_RES:
611		ahc_handle_ign_wide_residue(ahc, &devinfo);
612		break;
613	case BAD_PHASE:
614	{
615		u_int lastphase;
616
617		lastphase = ahc_inb(ahc, LASTPHASE);
618		printf("%s:%c:%d: unknown scsi bus phase %x, "
619		       "lastphase = 0x%x.  Attempting to continue\n",
620		       ahc_name(ahc), devinfo.channel, devinfo.target,
621		       lastphase, ahc_inb(ahc, SCSISIGI));
622		break;
623	}
624	case MISSED_BUSFREE:
625	{
626		u_int lastphase;
627
628		lastphase = ahc_inb(ahc, LASTPHASE);
629		printf("%s:%c:%d: Missed busfree. "
630		       "Lastphase = 0x%x, Curphase = 0x%x\n",
631		       ahc_name(ahc), devinfo.channel, devinfo.target,
632		       lastphase, ahc_inb(ahc, SCSISIGI));
633		restart_sequencer(ahc);
634		return;
635	}
636	case HOST_MSG_LOOP:
637	{
638		/*
639		 * The sequencer has encountered a message phase
640		 * that requires host assistance for completion.
641		 * While handling the message phase(s), we will be
642		 * notified by the sequencer after each byte is
643		 * transfered so we can track bus phase changes.
644		 *
645		 * If this is the first time we've seen a HOST_MSG_LOOP
646		 * interrupt, initialize the state of the host message
647		 * loop.
648		 */
649		if (ahc->msg_type == MSG_TYPE_NONE) {
650			u_int bus_phase;
651
652			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
653			if (bus_phase != P_MESGIN
654			 && bus_phase != P_MESGOUT) {
655				printf("ahc_intr: HOST_MSG_LOOP bad "
656				       "phase 0x%x\n",
657				      bus_phase);
658				/*
659				 * Probably transitioned to bus free before
660				 * we got here.  Just punt the message.
661				 */
662				ahc_clear_intstat(ahc);
663				restart_sequencer(ahc);
664				return;
665			}
666
667			if (devinfo.role == ROLE_INITIATOR) {
668				struct scb *scb;
669				u_int scb_index;
670
671				scb_index = ahc_inb(ahc, SCB_TAG);
672				scb = ahc_lookup_scb(ahc, scb_index);
673
674				if (scb == NULL)
675					panic("HOST_MSG_LOOP with "
676					      "invalid SCB %x\n", scb_index);
677
678				if (bus_phase == P_MESGOUT)
679					ahc_setup_initiator_msgout(ahc,
680								   &devinfo,
681								   scb);
682				else {
683					ahc->msg_type =
684					    MSG_TYPE_INITIATOR_MSGIN;
685					ahc->msgin_index = 0;
686				}
687			} else {
688				if (bus_phase == P_MESGOUT) {
689					ahc->msg_type =
690					    MSG_TYPE_TARGET_MSGOUT;
691					ahc->msgin_index = 0;
692				}
693#if AHC_TARGET_MODE
694				else
695					ahc_setup_target_msgin(ahc, &devinfo);
696#endif
697			}
698		}
699
700		ahc_handle_message_phase(ahc);
701		break;
702	}
703	case PERR_DETECTED:
704	{
705		/*
706		 * If we've cleared the parity error interrupt
707		 * but the sequencer still believes that SCSIPERR
708		 * is true, it must be that the parity error is
709		 * for the currently presented byte on the bus,
710		 * and we are not in a phase (data-in) where we will
711		 * eventually ack this byte.  Ack the byte and
712		 * throw it away in the hope that the target will
713		 * take us to message out to deliver the appropriate
714		 * error message.
715		 */
716		if ((intstat & SCSIINT) == 0
717		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
718			u_int curphase;
719
720			/*
721			 * The hardware will only let you ack bytes
722			 * if the expected phase in SCSISIGO matches
723			 * the current phase.  Make sure this is
724			 * currently the case.
725			 */
726			curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
727			ahc_outb(ahc, LASTPHASE, curphase);
728			ahc_outb(ahc, SCSISIGO, curphase);
729			ahc_inb(ahc, SCSIDATL);
730		}
731		break;
732	}
733	case DATA_OVERRUN:
734	{
735		/*
736		 * When the sequencer detects an overrun, it
737		 * places the controller in "BITBUCKET" mode
738		 * and allows the target to complete its transfer.
739		 * Unfortunately, none of the counters get updated
740		 * when the controller is in this mode, so we have
741		 * no way of knowing how large the overrun was.
742		 */
743		u_int scbindex = ahc_inb(ahc, SCB_TAG);
744		u_int lastphase = ahc_inb(ahc, LASTPHASE);
745		u_int i;
746
747		scb = ahc_lookup_scb(ahc, scbindex);
748		for (i = 0; i < num_phases; i++) {
749			if (lastphase == phase_table[i].phase)
750				break;
751		}
752		ahc_print_path(ahc, scb);
753		printf("data overrun detected %s."
754		       "  Tag == 0x%x.\n",
755		       phase_table[i].phasemsg,
756  		       scb->hscb->tag);
757		ahc_print_path(ahc, scb);
758		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
759		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
760		       ahc_get_transfer_length(scb), scb->sg_count);
761		if (scb->sg_count > 0) {
762			for (i = 0; i < scb->sg_count; i++) {
763				printf("sg[%d] - Addr 0x%x : Length %d\n",
764				       i,
765				       ahc_le32toh(scb->sg_list[i].addr),
766				       ahc_le32toh(scb->sg_list[i].len)
767				       & AHC_SG_LEN_MASK);
768			}
769		}
770		/*
771		 * Set this and it will take effect when the
772		 * target does a command complete.
773		 */
774		ahc_freeze_devq(ahc, scb);
775		ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
776		ahc_freeze_scb(scb);
777		break;
778	}
779	case MKMSG_FAILED:
780	{
781		u_int scbindex;
782
783		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
784		       ahc_name(ahc), devinfo.channel, devinfo.target,
785		       devinfo.lun);
786		scbindex = ahc_inb(ahc, SCB_TAG);
787		scb = ahc_lookup_scb(ahc, scbindex);
788		if (scb != NULL
789		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
790			/*
791			 * Ensure that we didn't put a second instance of this
792			 * SCB into the QINFIFO.
793			 */
794			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
795					   SCB_GET_CHANNEL(ahc, scb),
796					   SCB_GET_LUN(scb), scb->hscb->tag,
797					   ROLE_INITIATOR, /*status*/0,
798					   SEARCH_REMOVE);
799		break;
800	}
801	case NO_FREE_SCB:
802	{
803		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
804		ahc_dump_card_state(ahc);
805		panic("for safety");
806		break;
807	}
808	case SCB_MISMATCH:
809	{
810		u_int scbptr;
811
812		scbptr = ahc_inb(ahc, SCBPTR);
813		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
814		       scbptr, ahc_inb(ahc, ARG_1),
815		       ahc->scb_data->hscbs[scbptr].tag);
816		ahc_dump_card_state(ahc);
817		panic("for saftey");
818		break;
819	}
820	case OUT_OF_RANGE:
821	{
822		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
823		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
824		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
825		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
826		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
827		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
828		       "SINDEX == 0x%x\n, A == 0x%x\n",
829		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
830		       ahc_index_busy_tcl(ahc,
831			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
832				      ahc_inb(ahc, SAVED_LUN))),
833		       ahc_inb(ahc, SINDEX),
834		       ahc_inb(ahc, ACCUM));
835		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
836		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
837		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
838		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
839		       ahc_inb(ahc, SCB_CONTROL));
840		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
841		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
842		ahc_dump_card_state(ahc);
843		panic("for safety");
844		break;
845	}
846	default:
847		printf("ahc_intr: seqint, "
848		       "intstat == 0x%x, scsisigi = 0x%x\n",
849		       intstat, ahc_inb(ahc, SCSISIGI));
850		break;
851	}
852unpause:
853	/*
854	 *  The sequencer is paused immediately on
855	 *  a SEQINT, so we should restart it when
856	 *  we're done.
857	 */
858	unpause_sequencer(ahc);
859}
860
861void
862ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
863{
864	u_int	scb_index;
865	u_int	status0;
866	u_int	status;
867	struct	scb *scb;
868	char	cur_channel;
869	char	intr_channel;
870
871	/* Make sure the sequencer is in a safe location. */
872	ahc_clear_critical_section(ahc);
873
874	if ((ahc->features & AHC_TWIN) != 0
875	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
876		cur_channel = 'B';
877	else
878		cur_channel = 'A';
879	intr_channel = cur_channel;
880
881	if ((ahc->features & AHC_ULTRA2) != 0)
882		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
883	else
884		status0 = 0;
885	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
886	if (status == 0 && status0 == 0) {
887		if ((ahc->features & AHC_TWIN) != 0) {
888			/* Try the other channel */
889		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
890			status = ahc_inb(ahc, SSTAT1);
891		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
892			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
893		}
894		if (status == 0) {
895			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
896			ahc_outb(ahc, CLRINT, CLRSCSIINT);
897			unpause_sequencer(ahc);
898			return;
899		}
900	}
901
902	scb_index = ahc_inb(ahc, SCB_TAG);
903	scb = ahc_lookup_scb(ahc, scb_index);
904	if (scb != NULL
905	 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
906		scb = NULL;
907
908	if ((ahc->features & AHC_ULTRA2) != 0
909		&& (status0 & IOERR) != 0) {
910		int now_lvd;
911
912		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
913		printf("%s: Transceiver State Has Changed to %s mode\n",
914		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
915		ahc_outb(ahc, CLRSINT0, CLRIOERR);
916		/*
917		 * When transitioning to SE mode, the reset line
918		 * glitches, triggering an arbitration bug in some
919		 * Ultra2 controllers.  This bug is cleared when we
920		 * assert the reset line.  Since a reset glitch has
921		 * already occurred with this transition and a
922		 * transceiver state change is handled just like
923		 * a bus reset anyway, asserting the reset line
924		 * ourselves is safe.
925		 */
926		ahc_reset_channel(ahc, intr_channel,
927				 /*Initiate Reset*/now_lvd == 0);
928	} else if ((status & SCSIRSTI) != 0) {
929		printf("%s: Someone reset channel %c\n",
930			ahc_name(ahc), intr_channel);
931		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
932	} else if ((status & SCSIPERR) != 0) {
933		/*
934		 * Determine the bus phase and queue an appropriate message.
935		 * SCSIPERR is latched true as soon as a parity error
936		 * occurs.  If the sequencer acked the transfer that
937		 * caused the parity error and the currently presented
938		 * transfer on the bus has correct parity, SCSIPERR will
939		 * be cleared by CLRSCSIPERR.  Use this to determine if
940		 * we should look at the last phase the sequencer recorded,
941		 * or the current phase presented on the bus.
942		 */
943		u_int mesg_out;
944		u_int curphase;
945		u_int errorphase;
946		u_int lastphase;
947		u_int scsirate;
948		u_int i;
949		u_int sstat2;
950
951		lastphase = ahc_inb(ahc, LASTPHASE);
952		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
953		sstat2 = ahc_inb(ahc, SSTAT2);
954		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
955		/*
956		 * For all phases save DATA, the sequencer won't
957		 * automatically ack a byte that has a parity error
958		 * in it.  So the only way that the current phase
959		 * could be 'data-in' is if the parity error is for
960		 * an already acked byte in the data phase.  During
961		 * synchronous data-in transfers, we may actually
962		 * ack bytes before latching the current phase in
963		 * LASTPHASE, leading to the discrepancy between
964		 * curphase and lastphase.
965		 */
966		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
967		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
968			errorphase = curphase;
969		else
970			errorphase = lastphase;
971
972		for (i = 0; i < num_phases; i++) {
973			if (errorphase == phase_table[i].phase)
974				break;
975		}
976		mesg_out = phase_table[i].mesg_out;
977		if (scb != NULL)
978			ahc_print_path(ahc, scb);
979		else
980			printf("%s:%c:%d: ", ahc_name(ahc),
981			       intr_channel,
982			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
983		scsirate = ahc_inb(ahc, SCSIRATE);
984		printf("parity error detected %s. "
985		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
986		       phase_table[i].phasemsg,
987		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
988		       scsirate);
989
990		if ((ahc->features & AHC_DT) != 0) {
991
992			if ((sstat2 & CRCVALERR) != 0)
993				printf("\tCRC Value Mismatch\n");
994			if ((sstat2 & CRCENDERR) != 0)
995				printf("\tNo terminal CRC packet recevied\n");
996			if ((sstat2 & CRCREQERR) != 0)
997				printf("\tIllegal CRC packet request\n");
998			if ((sstat2 & DUAL_EDGE_ERR) != 0)
999				printf("\tUnexpected %sDT Data Phase\n",
1000				       (scsirate & SINGLE_EDGE) ? "" : "non-");
1001		}
1002
1003		/*
1004		 * We've set the hardware to assert ATN if we
1005		 * get a parity error on "in" phases, so all we
1006		 * need to do is stuff the message buffer with
1007		 * the appropriate message.  "In" phases have set
1008		 * mesg_out to something other than MSG_NOP.
1009		 */
1010		if (mesg_out != MSG_NOOP) {
1011			if (ahc->msg_type != MSG_TYPE_NONE)
1012				ahc->send_msg_perror = TRUE;
1013			else
1014				ahc_outb(ahc, MSG_OUT, mesg_out);
1015		}
1016		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1017		unpause_sequencer(ahc);
1018	} else if ((status & BUSFREE) != 0
1019		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1020		/*
1021		 * First look at what phase we were last in.
1022		 * If its message out, chances are pretty good
1023		 * that the busfree was in response to one of
1024		 * our abort requests.
1025		 */
1026		u_int lastphase = ahc_inb(ahc, LASTPHASE);
1027		u_int saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1028		u_int saved_lun = ahc_inb(ahc, SAVED_LUN);
1029		u_int target = SCSIID_TARGET(ahc, saved_scsiid);
1030		u_int initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1031		char channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1032		int printerror = 1;
1033
1034		ahc_outb(ahc, SCSISEQ,
1035			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1036		if (lastphase == P_MESGOUT) {
1037			struct ahc_devinfo devinfo;
1038			u_int tag;
1039
1040			ahc_fetch_devinfo(ahc, &devinfo);
1041			tag = SCB_LIST_NULL;
1042			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1043			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1044				if (ahc->msgout_buf[ahc->msgout_index - 1]
1045				 == MSG_ABORT_TAG)
1046					tag = scb->hscb->tag;
1047				ahc_print_path(ahc, scb);
1048				printf("SCB %d - Abort %s Completed.\n",
1049				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1050				       "" : "Tag");
1051				ahc_abort_scbs(ahc, target, channel,
1052					       saved_lun, tag,
1053					       ROLE_INITIATOR,
1054					       CAM_REQ_ABORTED);
1055				printerror = 0;
1056			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1057						MSG_BUS_DEV_RESET, TRUE)) {
1058				struct ahc_devinfo devinfo;
1059#ifdef __FreeBSD__
1060				/*
1061				 * Don't mark the user's request for this BDR
1062				 * as completing with CAM_BDR_SENT.  CAM3
1063				 * specifies CAM_REQ_CMP.
1064				 */
1065				if (scb != NULL
1066				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1067				 && ahc_match_scb(ahc, scb, target, channel,
1068						  CAM_LUN_WILDCARD,
1069						  SCB_LIST_NULL,
1070						  ROLE_INITIATOR)) {
1071					ahc_set_transaction_status(scb, CAM_REQ_CMP);
1072				}
1073#endif
1074				ahc_compile_devinfo(&devinfo,
1075						    initiator_role_id,
1076						    target,
1077						    CAM_LUN_WILDCARD,
1078						    channel,
1079						    ROLE_INITIATOR);
1080				ahc_handle_devreset(ahc, &devinfo,
1081						    CAM_BDR_SENT,
1082						    "Bus Device Reset",
1083						    /*verbose_level*/0);
1084				printerror = 0;
1085			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1086						MSG_EXT_PPR, FALSE)) {
1087				struct ahc_initiator_tinfo *tinfo;
1088				struct tmode_tstate *tstate;
1089
1090				/*
1091				 * PPR Rejected.  Try non-ppr negotiation
1092				 * and retry command.
1093				 */
1094				tinfo = ahc_fetch_transinfo(ahc,
1095							    devinfo.channel,
1096							    devinfo.our_scsiid,
1097							    devinfo.target,
1098							    &tstate);
1099				tinfo->current.transport_version = 2;
1100				tinfo->goal.transport_version = 2;
1101				tinfo->goal.ppr_options = 0;
1102				ahc_qinfifo_requeue_tail(ahc, scb);
1103				printerror = 0;
1104			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1105						MSG_EXT_WDTR, FALSE)
1106				|| ahc_sent_msg(ahc, AHCMSG_EXT,
1107						MSG_EXT_SDTR, FALSE)) {
1108				/*
1109				 * Negotiation Rejected.  Go-async and
1110				 * retry command.
1111				 */
1112				ahc_set_width(ahc, &devinfo,
1113					      MSG_EXT_WDTR_BUS_8_BIT,
1114					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
1115					      /*paused*/TRUE);
1116				ahc_set_syncrate(ahc, &devinfo,
1117						/*syncrate*/NULL,
1118						/*period*/0, /*offset*/0,
1119						/*ppr_options*/0,
1120						AHC_TRANS_CUR|AHC_TRANS_GOAL,
1121						/*paused*/TRUE);
1122				ahc_qinfifo_requeue_tail(ahc, scb);
1123				printerror = 0;
1124			}
1125		}
1126		if (printerror != 0) {
1127			u_int i;
1128
1129			if (scb != NULL) {
1130				u_int tag;
1131
1132				if ((scb->hscb->control & TAG_ENB) != 0)
1133					tag = scb->hscb->tag;
1134				else
1135					tag = SCB_LIST_NULL;
1136				ahc_print_path(ahc, scb);
1137				ahc_abort_scbs(ahc, target, channel,
1138					       SCB_GET_LUN(scb), tag,
1139					       ROLE_INITIATOR,
1140					       CAM_UNEXP_BUSFREE);
1141			} else {
1142				/*
1143				 * We had not fully identified this connection,
1144				 * so we cannot abort anything.
1145				 */
1146				printf("%s: ", ahc_name(ahc));
1147			}
1148			for (i = 0; i < num_phases; i++) {
1149				if (lastphase == phase_table[i].phase)
1150					break;
1151			}
1152			printf("Unexpected busfree %s\n"
1153			       "SEQADDR == 0x%x\n",
1154			       phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0)
1155				| (ahc_inb(ahc, SEQADDR1) << 8));
1156		}
1157		ahc_clear_msg_state(ahc);
1158		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1159		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1160		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1161		restart_sequencer(ahc);
1162	} else if ((status & SELTO) != 0) {
1163		u_int scbptr;
1164
1165		scbptr = ahc_inb(ahc, WAITING_SCBH);
1166		ahc_outb(ahc, SCBPTR, scbptr);
1167		scb_index = ahc_inb(ahc, SCB_TAG);
1168
1169		scb = ahc_lookup_scb(ahc, scb_index);
1170		if (scb == NULL) {
1171			printf("%s: ahc_intr - referenced scb not "
1172			       "valid during SELTO scb(%d, %d)\n",
1173			       ahc_name(ahc), scbptr, scb_index);
1174		} else {
1175			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1176			ahc_freeze_devq(ahc, scb);
1177		}
1178		/* Stop the selection */
1179		ahc_outb(ahc, SCSISEQ, 0);
1180
1181		/* No more pending messages */
1182		ahc_clear_msg_state(ahc);
1183
1184		/* Clear interrupt state */
1185		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1186
1187		/*
1188		 * Although the driver does not care about the
1189		 * 'Selection in Progress' status bit, the busy
1190		 * LED does.  SELINGO is only cleared by a sucessful
1191		 * selection, so we must manually clear it to insure
1192		 * the LED turns off just incase no future successful
1193		 * selections occur (e.g. no devices on the bus).
1194		 */
1195		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1196
1197		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1198		restart_sequencer(ahc);
1199	} else {
1200		panic("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1201		      ahc_name(ahc), status);
1202	}
1203}
1204
1205#define AHC_MAX_STEPS 2000
1206void
1207ahc_clear_critical_section(struct ahc_softc *ahc)
1208{
1209	int	stepping;
1210	int	steps;
1211	u_int	simode0;
1212	u_int	simode1;
1213
1214	if (ahc->num_critical_sections == 0)
1215		return;
1216
1217	stepping = FALSE;
1218	steps = 0;
1219	simode0 = 0;
1220	simode1 = 0;
1221	for (;;) {
1222		struct	cs *cs;
1223		u_int	seqaddr;
1224		u_int	i;
1225
1226		seqaddr = ahc_inb(ahc, SEQADDR0)
1227			| (ahc_inb(ahc, SEQADDR1) << 8);
1228
1229		cs = ahc->critical_sections;
1230		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1231
1232			if (cs->begin < seqaddr && cs->end >= seqaddr)
1233				break;
1234		}
1235
1236		if (i == ahc->num_critical_sections)
1237			break;
1238
1239		if (steps > AHC_MAX_STEPS) {
1240			printf("%s: Infinite loop in critical section\n",
1241			       ahc_name(ahc));
1242			ahc_dump_card_state(ahc);
1243			panic("critical section loop");
1244		}
1245
1246		steps++;
1247		if (stepping == FALSE) {
1248
1249			/*
1250			 * Disable all interrupt sources so that the
1251			 * sequencer will not be stuck by a pausing
1252			 * interrupt condition while we attempt to
1253			 * leave a critical section.
1254			 */
1255			simode0 = ahc_inb(ahc, SIMODE0);
1256			ahc_outb(ahc, SIMODE0, 0);
1257			simode1 = ahc_inb(ahc, SIMODE1);
1258			ahc_outb(ahc, SIMODE1, 0);
1259			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1260			ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1261			stepping = TRUE;
1262		}
1263		ahc_outb(ahc, HCNTRL, ahc->unpause);
1264		do {
1265			ahc_delay(200);
1266		} while (!sequencer_paused(ahc));
1267	}
1268	if (stepping) {
1269		ahc_outb(ahc, SIMODE0, simode0);
1270		ahc_outb(ahc, SIMODE1, simode1);
1271		ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1272	}
1273}
1274
1275/*
1276 * Clear any pending interrupt status.
1277 */
1278void
1279ahc_clear_intstat(struct ahc_softc *ahc)
1280{
1281	/* Clear any interrupt conditions this may have caused */
1282	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1283				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1284				CLRREQINIT);
1285	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1286	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1287}
1288
1289/**************************** Debugging Routines ******************************/
1290void
1291ahc_print_scb(struct scb *scb)
1292{
1293	int i;
1294
1295	struct hardware_scb *hscb = scb->hscb;
1296
1297	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1298	       scb,
1299	       hscb->control,
1300	       hscb->scsiid,
1301	       hscb->lun,
1302	       hscb->cdb_len);
1303	i = 0;
1304	printf("Shared Data: %#02x %#02x %#02x %#02x\n",
1305	       hscb->shared_data.cdb[i++],
1306	       hscb->shared_data.cdb[i++],
1307	       hscb->shared_data.cdb[i++],
1308	       hscb->shared_data.cdb[i++]);
1309	printf("             %#02x %#02x %#02x %#02x\n",
1310	       hscb->shared_data.cdb[i++],
1311	       hscb->shared_data.cdb[i++],
1312	       hscb->shared_data.cdb[i++],
1313	       hscb->shared_data.cdb[i++]);
1314	printf("             %#02x %#02x %#02x %#02x\n",
1315	       hscb->shared_data.cdb[i++],
1316	       hscb->shared_data.cdb[i++],
1317	       hscb->shared_data.cdb[i++],
1318	       hscb->shared_data.cdb[i++]);
1319	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1320		ahc_le32toh(hscb->dataptr),
1321		ahc_le32toh(hscb->datacnt),
1322		ahc_le32toh(hscb->sgptr),
1323		hscb->tag);
1324	if (scb->sg_count > 0) {
1325		for (i = 0; i < scb->sg_count; i++) {
1326			printf("sg[%d] - Addr 0x%x : Length %d\n",
1327			       i,
1328			       ahc_le32toh(scb->sg_list[i].addr),
1329			       ahc_le32toh(scb->sg_list[i].len));
1330		}
1331	}
1332}
1333
1334/************************* Transfer Negotiation *******************************/
1335/*
1336 * Allocate per target mode instance (ID we respond to as a target)
1337 * transfer negotiation data structures.
1338 */
1339static struct tmode_tstate *
1340ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1341{
1342	struct tmode_tstate *master_tstate;
1343	struct tmode_tstate *tstate;
1344	int i;
1345
1346	master_tstate = ahc->enabled_targets[ahc->our_id];
1347	if (channel == 'B') {
1348		scsi_id += 8;
1349		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1350	}
1351	if (ahc->enabled_targets[scsi_id] != NULL
1352	 && ahc->enabled_targets[scsi_id] != master_tstate)
1353		panic("%s: ahc_alloc_tstate - Target already allocated",
1354		      ahc_name(ahc));
1355	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1356	if (tstate == NULL)
1357		return (NULL);
1358
1359	/*
1360	 * If we have allocated a master tstate, copy user settings from
1361	 * the master tstate (taken from SRAM or the EEPROM) for this
1362	 * channel, but reset our current and goal settings to async/narrow
1363	 * until an initiator talks to us.
1364	 */
1365	if (master_tstate != NULL) {
1366		memcpy(tstate, master_tstate, sizeof(*tstate));
1367		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1368		tstate->ultraenb = 0;
1369		for (i = 0; i < 16; i++) {
1370			memset(&tstate->transinfo[i].current, 0,
1371			      sizeof(tstate->transinfo[i].current));
1372			memset(&tstate->transinfo[i].goal, 0,
1373			      sizeof(tstate->transinfo[i].goal));
1374		}
1375	} else
1376		memset(tstate, 0, sizeof(*tstate));
1377	ahc->enabled_targets[scsi_id] = tstate;
1378	return (tstate);
1379}
1380
1381/*
1382 * Free per target mode instance (ID we respond to as a target)
1383 * transfer negotiation data structures.
1384 */
1385static void
1386ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1387{
1388	struct tmode_tstate *tstate;
1389
1390	/* Don't clean up the entry for our initiator role */
1391	if ((ahc->flags & AHC_INITIATORROLE) != 0
1392	 && ((channel == 'B' && scsi_id == ahc->our_id_b)
1393	  || (channel == 'A' && scsi_id == ahc->our_id))
1394	 && force == FALSE)
1395		return;
1396
1397	if (channel == 'B')
1398		scsi_id += 8;
1399	tstate = ahc->enabled_targets[scsi_id];
1400	if (tstate != NULL)
1401		free(tstate, M_DEVBUF);
1402	ahc->enabled_targets[scsi_id] = NULL;
1403}
1404
1405/*
1406 * Called when we have an active connection to a target on the bus,
1407 * this function finds the nearest syncrate to the input period limited
1408 * by the capabilities of the bus connectivity of and sync settings for
1409 * the target.
1410 */
1411struct ahc_syncrate *
1412ahc_devlimited_syncrate(struct ahc_softc *ahc,
1413			struct ahc_initiator_tinfo *tinfo,
1414			u_int *period, u_int *ppr_options, role_t role) {
1415	struct	ahc_transinfo *transinfo;
1416	u_int	maxsync;
1417
1418	if ((ahc->features & AHC_ULTRA2) != 0) {
1419		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1420		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1421			maxsync = AHC_SYNCRATE_DT;
1422		} else {
1423			maxsync = AHC_SYNCRATE_ULTRA;
1424			/* Can't do DT on an SE bus */
1425			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1426		}
1427	} else if ((ahc->features & AHC_ULTRA) != 0) {
1428		maxsync = AHC_SYNCRATE_ULTRA;
1429	} else {
1430		maxsync = AHC_SYNCRATE_FAST;
1431	}
1432	/*
1433	 * Never allow a value higher than our current goal
1434	 * period otherwise we may allow a target initiated
1435	 * negotiation to go above the limit as set by the
1436	 * user.  In the case of an initiator initiated
1437	 * sync negotiation, we limit based on the user
1438	 * setting.  This allows the system to still accept
1439	 * incoming negotiations even if target initiated
1440	 * negotiation is not performed.
1441	 */
1442	if (role == ROLE_TARGET)
1443		transinfo = &tinfo->user;
1444	else
1445		transinfo = &tinfo->goal;
1446	*ppr_options &= transinfo->ppr_options;
1447	if (transinfo->period == 0) {
1448		*period = 0;
1449		*ppr_options = 0;
1450		return (NULL);
1451	}
1452	*period = MAX(*period, transinfo->period);
1453	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1454}
1455
1456/*
1457 * Look up the valid period to SCSIRATE conversion in our table.
1458 * Return the period and offset that should be sent to the target
1459 * if this was the beginning of an SDTR.
1460 */
1461struct ahc_syncrate *
1462ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1463		  u_int *ppr_options, u_int maxsync)
1464{
1465	struct ahc_syncrate *syncrate;
1466
1467	if ((ahc->features & AHC_DT) == 0)
1468		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1469
1470	for (syncrate = &ahc_syncrates[maxsync];
1471	     syncrate->rate != NULL;
1472	     syncrate++) {
1473
1474		/*
1475		 * The Ultra2 table doesn't go as low
1476		 * as for the Fast/Ultra cards.
1477		 */
1478		if ((ahc->features & AHC_ULTRA2) != 0
1479		 && (syncrate->sxfr_u2 == 0))
1480			break;
1481
1482		/* Skip any DT entries if DT is not available */
1483		if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1484		 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1485			continue;
1486
1487		if (*period <= syncrate->period) {
1488			/*
1489			 * When responding to a target that requests
1490			 * sync, the requested rate may fall between
1491			 * two rates that we can output, but still be
1492			 * a rate that we can receive.  Because of this,
1493			 * we want to respond to the target with
1494			 * the same rate that it sent to us even
1495			 * if the period we use to send data to it
1496			 * is lower.  Only lower the response period
1497			 * if we must.
1498			 */
1499			if (syncrate == &ahc_syncrates[maxsync])
1500				*period = syncrate->period;
1501
1502			/*
1503			 * At some speeds, we only support
1504			 * ST transfers.
1505			 */
1506		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1507				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1508			break;
1509		}
1510	}
1511
1512	if ((*period == 0)
1513	 || (syncrate->rate == NULL)
1514	 || ((ahc->features & AHC_ULTRA2) != 0
1515	  && (syncrate->sxfr_u2 == 0))) {
1516		/* Use asynchronous transfers. */
1517		*period = 0;
1518		syncrate = NULL;
1519		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1520	}
1521	return (syncrate);
1522}
1523
1524/*
1525 * Convert from an entry in our syncrate table to the SCSI equivalent
1526 * sync "period" factor.
1527 */
1528u_int
1529ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1530{
1531	struct ahc_syncrate *syncrate;
1532
1533	if ((ahc->features & AHC_ULTRA2) != 0)
1534		scsirate &= SXFR_ULTRA2;
1535	else
1536		scsirate &= SXFR;
1537
1538	syncrate = &ahc_syncrates[maxsync];
1539	while (syncrate->rate != NULL) {
1540
1541		if ((ahc->features & AHC_ULTRA2) != 0) {
1542			if (syncrate->sxfr_u2 == 0)
1543				break;
1544			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1545				return (syncrate->period);
1546		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1547				return (syncrate->period);
1548		}
1549		syncrate++;
1550	}
1551	return (0); /* async */
1552}
1553
1554/*
1555 * Truncate the given synchronous offset to a value the
1556 * current adapter type and syncrate are capable of.
1557 */
1558void
1559ahc_validate_offset(struct ahc_softc *ahc,
1560		    struct ahc_initiator_tinfo *tinfo,
1561		    struct ahc_syncrate *syncrate,
1562		    u_int *offset, int wide, role_t role)
1563{
1564	u_int maxoffset;
1565
1566	/* Limit offset to what we can do */
1567	if (syncrate == NULL) {
1568		maxoffset = 0;
1569	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1570		maxoffset = MAX_OFFSET_ULTRA2;
1571	} else {
1572		if (wide)
1573			maxoffset = MAX_OFFSET_16BIT;
1574		else
1575			maxoffset = MAX_OFFSET_8BIT;
1576	}
1577	*offset = MIN(*offset, maxoffset);
1578	if (tinfo != NULL) {
1579		if (role == ROLE_TARGET)
1580			*offset = MIN(*offset, tinfo->user.offset);
1581		else
1582			*offset = MIN(*offset, tinfo->goal.offset);
1583	}
1584}
1585
1586/*
1587 * Truncate the given transfer width parameter to a value the
1588 * current adapter type is capable of.
1589 */
1590void
1591ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1592		   u_int *bus_width, role_t role)
1593{
1594	switch (*bus_width) {
1595	default:
1596		if (ahc->features & AHC_WIDE) {
1597			/* Respond Wide */
1598			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1599			break;
1600		}
1601		/* FALLTHROUGH */
1602	case MSG_EXT_WDTR_BUS_8_BIT:
1603		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1604		break;
1605	}
1606	if (tinfo != NULL) {
1607		if (role == ROLE_TARGET)
1608			*bus_width = MIN(tinfo->user.width, *bus_width);
1609		else
1610			*bus_width = MIN(tinfo->goal.width, *bus_width);
1611	}
1612}
1613
1614/*
1615 * Update the bitmask of targets for which the controller should
1616 * negotiate with at the next convenient oportunity.  This currently
1617 * means the next time we send the initial identify messages for
1618 * a new transaction.
1619 */
1620void
1621ahc_update_target_msg_request(struct ahc_softc *ahc,
1622			      struct ahc_devinfo *devinfo,
1623			      struct ahc_initiator_tinfo *tinfo,
1624			      int force, int paused)
1625{
1626	u_int targ_msg_req_orig;
1627
1628	targ_msg_req_orig = ahc->targ_msg_req;
1629	if (tinfo->current.period != tinfo->goal.period
1630	 || tinfo->current.width != tinfo->goal.width
1631	 || tinfo->current.offset != tinfo->goal.offset
1632	 || tinfo->current.ppr_options != tinfo->goal.ppr_options
1633	 || (force
1634	  && (tinfo->goal.period != 0
1635	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1636	   || tinfo->goal.ppr_options != 0)))
1637		ahc->targ_msg_req |= devinfo->target_mask;
1638	else
1639		ahc->targ_msg_req &= ~devinfo->target_mask;
1640
1641	if (ahc->targ_msg_req != targ_msg_req_orig) {
1642		/* Update the message request bit for this target */
1643		if (!paused)
1644			pause_sequencer(ahc);
1645
1646		ahc_outb(ahc, TARGET_MSG_REQUEST,
1647			 ahc->targ_msg_req & 0xFF);
1648		ahc_outb(ahc, TARGET_MSG_REQUEST + 1,
1649			 (ahc->targ_msg_req >> 8) & 0xFF);
1650
1651		if (!paused)
1652			unpause_sequencer(ahc);
1653	}
1654}
1655
1656/*
1657 * Update the user/goal/current tables of synchronous negotiation
1658 * parameters as well as, in the case of a current or active update,
1659 * any data structures on the host controller.  In the case of an
1660 * active update, the specified target is currently talking to us on
1661 * the bus, so the transfer parameter update must take effect
1662 * immediately.
1663 */
1664void
1665ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1666		 struct ahc_syncrate *syncrate, u_int period,
1667		 u_int offset, u_int ppr_options, u_int type, int paused)
1668{
1669	struct	ahc_initiator_tinfo *tinfo;
1670	struct	tmode_tstate *tstate;
1671	u_int	old_period;
1672	u_int	old_offset;
1673	u_int	old_ppr;
1674	int	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1675
1676	if (syncrate == NULL) {
1677		period = 0;
1678		offset = 0;
1679	}
1680
1681	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1682				    devinfo->target, &tstate);
1683	old_period = tinfo->current.period;
1684	old_offset = tinfo->current.offset;
1685	old_ppr	   = tinfo->current.ppr_options;
1686
1687	if ((type & AHC_TRANS_CUR) != 0
1688	 && (old_period != period
1689	  || old_offset != offset
1690	  || old_ppr != ppr_options)) {
1691		u_int	scsirate;
1692
1693		scsirate = tinfo->scsirate;
1694		if ((ahc->features & AHC_ULTRA2) != 0) {
1695
1696			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1697			if (syncrate != NULL) {
1698				scsirate |= syncrate->sxfr_u2;
1699				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1700					scsirate |= ENABLE_CRC;
1701				else
1702					scsirate |= SINGLE_EDGE;
1703			}
1704		} else {
1705
1706			scsirate &= ~(SXFR|SOFS);
1707			/*
1708			 * Ensure Ultra mode is set properly for
1709			 * this target.
1710			 */
1711			tstate->ultraenb &= ~devinfo->target_mask;
1712			if (syncrate != NULL) {
1713				if (syncrate->sxfr & ULTRA_SXFR) {
1714					tstate->ultraenb |=
1715						devinfo->target_mask;
1716				}
1717				scsirate |= syncrate->sxfr & SXFR;
1718				scsirate |= offset & SOFS;
1719			}
1720			if (active) {
1721				u_int sxfrctl0;
1722
1723				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1724				sxfrctl0 &= ~FAST20;
1725				if (tstate->ultraenb & devinfo->target_mask)
1726					sxfrctl0 |= FAST20;
1727				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1728			}
1729		}
1730		if (active) {
1731			ahc_outb(ahc, SCSIRATE, scsirate);
1732			if ((ahc->features & AHC_ULTRA2) != 0)
1733				ahc_outb(ahc, SCSIOFFSET, offset);
1734		}
1735
1736		tinfo->scsirate = scsirate;
1737		tinfo->current.period = period;
1738		tinfo->current.offset = offset;
1739		tinfo->current.ppr_options = ppr_options;
1740
1741		/* Update the syncrates in any pending scbs */
1742		ahc_update_pending_syncrates(ahc);
1743
1744		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1745			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1746		if (bootverbose) {
1747			if (offset != 0) {
1748				printf("%s: target %d synchronous at %sMHz%s, "
1749				       "offset = 0x%x\n", ahc_name(ahc),
1750				       devinfo->target, syncrate->rate,
1751				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1752				       ? " DT" : "", offset);
1753			} else {
1754				printf("%s: target %d using "
1755				       "asynchronous transfers\n",
1756				       ahc_name(ahc), devinfo->target);
1757			}
1758		}
1759	}
1760
1761	if ((type & AHC_TRANS_GOAL) != 0) {
1762		tinfo->goal.period = period;
1763		tinfo->goal.offset = offset;
1764		tinfo->goal.ppr_options = ppr_options;
1765	}
1766
1767	if ((type & AHC_TRANS_USER) != 0) {
1768		tinfo->user.period = period;
1769		tinfo->user.offset = offset;
1770		tinfo->user.ppr_options = ppr_options;
1771	}
1772
1773	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1774				      /*force*/FALSE,
1775				      paused);
1776}
1777
1778/*
1779 * Update the user/goal/current tables of wide negotiation
1780 * parameters as well as, in the case of a current or active update,
1781 * any data structures on the host controller.  In the case of an
1782 * active update, the specified target is currently talking to us on
1783 * the bus, so the transfer parameter update must take effect
1784 * immediately.
1785 */
1786void
1787ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1788	      u_int width, u_int type, int paused)
1789{
1790	struct ahc_initiator_tinfo *tinfo;
1791	struct tmode_tstate *tstate;
1792	u_int  oldwidth;
1793	int    active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1794
1795	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1796				    devinfo->target, &tstate);
1797	oldwidth = tinfo->current.width;
1798
1799	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1800		u_int	scsirate;
1801
1802		scsirate =  tinfo->scsirate;
1803		scsirate &= ~WIDEXFER;
1804		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1805			scsirate |= WIDEXFER;
1806
1807		tinfo->scsirate = scsirate;
1808
1809		if (active)
1810			ahc_outb(ahc, SCSIRATE, scsirate);
1811
1812		tinfo->current.width = width;
1813
1814		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1815			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
1816		if (bootverbose) {
1817			printf("%s: target %d using %dbit transfers\n",
1818			       ahc_name(ahc), devinfo->target,
1819			       8 * (0x01 << width));
1820		}
1821	}
1822	if ((type & AHC_TRANS_GOAL) != 0)
1823		tinfo->goal.width = width;
1824	if ((type & AHC_TRANS_USER) != 0)
1825		tinfo->user.width = width;
1826
1827	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1828				      /*force*/FALSE, paused);
1829}
1830
1831/*
1832 * Update the current state of tagged queuing for a given target.
1833 */
1834void
1835ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1836{
1837	struct ahc_initiator_tinfo *tinfo;
1838	struct tmode_tstate *tstate;
1839	uint16_t orig_tagenable;
1840
1841	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1842				    devinfo->target, &tstate);
1843
1844	orig_tagenable = tstate->tagenable;
1845	if (enable)
1846		tstate->tagenable |= devinfo->target_mask;
1847	else
1848		tstate->tagenable &= ~devinfo->target_mask;
1849
1850	if (orig_tagenable != tstate->tagenable) {
1851		ahc_platform_set_tags(ahc, devinfo, enable);
1852		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1853			       devinfo->lun, AC_TRANSFER_NEG);
1854	}
1855
1856}
1857
1858/*
1859 * When the transfer settings for a connection change, update any
1860 * in-transit SCBs to contain the new data so the hardware will
1861 * be set correctly during future (re)selections.
1862 */
1863static void
1864ahc_update_pending_syncrates(struct ahc_softc *ahc)
1865{
1866	struct	scb *pending_scb;
1867	int	pending_scb_count;
1868	int	i;
1869	u_int	saved_scbptr;
1870
1871	/*
1872	 * Traverse the pending SCB list and ensure that all of the
1873	 * SCBs there have the proper settings.
1874	 */
1875	pending_scb_count = 0;
1876	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
1877		struct ahc_devinfo devinfo;
1878		struct hardware_scb *pending_hscb;
1879		struct ahc_initiator_tinfo *tinfo;
1880		struct tmode_tstate *tstate;
1881
1882		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
1883		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
1884					    devinfo.our_scsiid,
1885					    devinfo.target, &tstate);
1886		pending_hscb = pending_scb->hscb;
1887		pending_hscb->control &= ~ULTRAENB;
1888		if ((tstate->ultraenb & devinfo.target_mask) != 0)
1889			pending_hscb->control |= ULTRAENB;
1890		pending_hscb->scsirate = tinfo->scsirate;
1891		pending_hscb->scsioffset = tinfo->current.offset;
1892		pending_scb_count++;
1893	}
1894
1895	if (pending_scb_count == 0)
1896		return;
1897
1898	saved_scbptr = ahc_inb(ahc, SCBPTR);
1899	/* Ensure that the hscbs down on the card match the new information */
1900	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
1901		struct	hardware_scb *pending_hscb;
1902		u_int	control;
1903		u_int	scb_tag;
1904
1905		ahc_outb(ahc, SCBPTR, i);
1906		scb_tag = ahc_inb(ahc, SCB_TAG);
1907		pending_scb = ahc_lookup_scb(ahc, scb_tag);
1908		if (pending_scb == NULL)
1909			continue;
1910
1911		pending_hscb = pending_scb->hscb;
1912		control = ahc_inb(ahc, SCB_CONTROL);
1913		control &= ~ULTRAENB;
1914		if ((pending_hscb->control & ULTRAENB) != 0)
1915			control |= ULTRAENB;
1916		ahc_outb(ahc, SCB_CONTROL, control);
1917		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
1918		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
1919	}
1920	ahc_outb(ahc, SCBPTR, saved_scbptr);
1921}
1922
1923/**************************** Pathing Information *****************************/
1924static void
1925ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1926{
1927	u_int	saved_scsiid;
1928	role_t	role;
1929	int	our_id;
1930
1931	if (ahc_inb(ahc, SSTAT0) & TARGET)
1932		role = ROLE_TARGET;
1933	else
1934		role = ROLE_INITIATOR;
1935
1936	if (role == ROLE_TARGET
1937	 && (ahc->features & AHC_MULTI_TID) != 0
1938	 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1939		/* We were selected, so pull our id from TARGIDIN */
1940		our_id = ahc_inb(ahc, TARGIDIN) & OID;
1941	} else if ((ahc->features & AHC_ULTRA2) != 0)
1942		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
1943	else
1944		our_id = ahc_inb(ahc, SCSIID) & OID;
1945
1946	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1947	ahc_compile_devinfo(devinfo,
1948			    our_id,
1949			    SCSIID_TARGET(ahc, saved_scsiid),
1950			    ahc_inb(ahc, SAVED_LUN),
1951			    SCSIID_CHANNEL(ahc, saved_scsiid),
1952			    role);
1953}
1954
1955void
1956ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
1957		    u_int lun, char channel, role_t role)
1958{
1959	devinfo->our_scsiid = our_id;
1960	devinfo->target = target;
1961	devinfo->lun = lun;
1962	devinfo->target_offset = target;
1963	devinfo->channel = channel;
1964	devinfo->role = role;
1965	if (channel == 'B')
1966		devinfo->target_offset += 8;
1967	devinfo->target_mask = (0x01 << devinfo->target_offset);
1968}
1969
1970static void
1971ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1972		struct scb *scb)
1973{
1974	role_t	role;
1975	int	our_id;
1976
1977	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
1978	role = ROLE_INITIATOR;
1979	if ((scb->hscb->control & TARGET_SCB) != 0)
1980		role = ROLE_TARGET;
1981	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
1982			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
1983}
1984
1985
1986/************************ Message Phase Processing ****************************/
1987/*
1988 * When an initiator transaction with the MK_MESSAGE flag either reconnects
1989 * or enters the initial message out phase, we are interrupted.  Fill our
1990 * outgoing message buffer with the appropriate message and beging handing
1991 * the message phase(s) manually.
1992 */
1993static void
1994ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1995			   struct scb *scb)
1996{
1997	/*
1998	 * To facilitate adding multiple messages together,
1999	 * each routine should increment the index and len
2000	 * variables instead of setting them explicitly.
2001	 */
2002	ahc->msgout_index = 0;
2003	ahc->msgout_len = 0;
2004
2005	if ((scb->flags & SCB_DEVICE_RESET) == 0
2006	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2007		u_int identify_msg;
2008
2009		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2010		if ((scb->hscb->control & DISCENB) != 0)
2011			identify_msg |= MSG_IDENTIFY_DISCFLAG;
2012		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2013		ahc->msgout_len++;
2014
2015		if ((scb->hscb->control & TAG_ENB) != 0) {
2016			ahc->msgout_buf[ahc->msgout_index++] =
2017			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2018			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2019			ahc->msgout_len += 2;
2020		}
2021	}
2022
2023	if (scb->flags & SCB_DEVICE_RESET) {
2024		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2025		ahc->msgout_len++;
2026		ahc_print_path(ahc, scb);
2027		printf("Bus Device Reset Message Sent\n");
2028	} else if ((scb->flags & SCB_ABORT) != 0) {
2029		if ((scb->hscb->control & TAG_ENB) != 0)
2030			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2031		else
2032			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2033		ahc->msgout_len++;
2034		ahc_print_path(ahc, scb);
2035		printf("Abort Message Sent\n");
2036	} else if ((ahc->targ_msg_req & devinfo->target_mask) != 0
2037		|| (scb->flags & SCB_NEGOTIATE) != 0) {
2038		ahc_build_transfer_msg(ahc, devinfo);
2039	} else {
2040		printf("ahc_intr: AWAITING_MSG for an SCB that "
2041		       "does not have a waiting message\n");
2042		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2043		       devinfo->target_mask);
2044		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2045		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2046		      ahc_inb(ahc, MSG_OUT), scb->flags);
2047	}
2048
2049	/*
2050	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2051	 * asked to send this message again.
2052	 */
2053	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2054	ahc->msgout_index = 0;
2055	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2056}
2057
2058/*
2059 * Build an appropriate transfer negotiation message for the
2060 * currently active target.
2061 */
2062static void
2063ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2064{
2065	/*
2066	 * We need to initiate transfer negotiations.
2067	 * If our current and goal settings are identical,
2068	 * we want to renegotiate due to a check condition.
2069	 */
2070	struct	ahc_initiator_tinfo *tinfo;
2071	struct	tmode_tstate *tstate;
2072	struct	ahc_syncrate *rate;
2073	int	dowide;
2074	int	dosync;
2075	int	doppr;
2076	int	use_ppr;
2077	u_int	period;
2078	u_int	ppr_options;
2079	u_int	offset;
2080
2081	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2082				    devinfo->target, &tstate);
2083	dowide = tinfo->current.width != tinfo->goal.width;
2084	dosync = tinfo->current.period != tinfo->goal.period;
2085	doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
2086
2087	if (!dowide && !dosync && !doppr) {
2088		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2089		dosync = tinfo->goal.period != 0;
2090		doppr = tinfo->goal.ppr_options != 0;
2091	}
2092
2093	if (!dowide && !dosync && !doppr) {
2094		panic("ahc_intr: AWAITING_MSG for negotiation, "
2095		      "but no negotiation needed\n");
2096	}
2097
2098	use_ppr = (tinfo->current.transport_version >= 3) || doppr;
2099	/* Target initiated PPR is not allowed in the SCSI spec */
2100	if (devinfo->role == ROLE_TARGET)
2101		use_ppr = 0;
2102
2103	/*
2104	 * Both the PPR message and SDTR message require the
2105	 * goal syncrate to be limited to what the target device
2106	 * is capable of handling (based on whether an LVD->SE
2107	 * expander is on the bus), so combine these two cases.
2108	 * Regardless, guarantee that if we are using WDTR and SDTR
2109	 * messages that WDTR comes first.
2110	 */
2111	if (use_ppr || (dosync && !dowide)) {
2112
2113		period = tinfo->goal.period;
2114		ppr_options = tinfo->goal.ppr_options;
2115		if (use_ppr == 0)
2116			ppr_options = 0;
2117		rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2118					       &ppr_options, devinfo->role);
2119		offset = tinfo->goal.offset;
2120		ahc_validate_offset(ahc, tinfo, rate, &offset,
2121				    use_ppr ? tinfo->goal.width
2122					    : tinfo->current.width,
2123				    devinfo->role);
2124		if (use_ppr) {
2125			ahc_construct_ppr(ahc, devinfo, period, offset,
2126					  tinfo->goal.width, ppr_options);
2127		} else {
2128			ahc_construct_sdtr(ahc, devinfo, period, offset);
2129		}
2130	} else {
2131		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2132	}
2133}
2134
2135/*
2136 * Build a synchronous negotiation message in our message
2137 * buffer based on the input parameters.
2138 */
2139static void
2140ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2141		   u_int period, u_int offset)
2142{
2143	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2144	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2145	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2146	ahc->msgout_buf[ahc->msgout_index++] = period;
2147	ahc->msgout_buf[ahc->msgout_index++] = offset;
2148	ahc->msgout_len += 5;
2149	if (bootverbose) {
2150		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2151		       ahc_name(ahc), devinfo->channel, devinfo->target,
2152		       devinfo->lun, period, offset);
2153	}
2154}
2155
2156/*
2157 * Build a wide negotiateion message in our message
2158 * buffer based on the input parameters.
2159 */
2160static void
2161ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2162		   u_int bus_width)
2163{
2164	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2165	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2166	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2167	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2168	ahc->msgout_len += 4;
2169	if (bootverbose) {
2170		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2171		       ahc_name(ahc), devinfo->channel, devinfo->target,
2172		       devinfo->lun, bus_width);
2173	}
2174}
2175
2176/*
2177 * Build a parallel protocol request message in our message
2178 * buffer based on the input parameters.
2179 */
2180static void
2181ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2182		  u_int period, u_int offset, u_int bus_width,
2183		  u_int ppr_options)
2184{
2185	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2186	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2187	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2188	ahc->msgout_buf[ahc->msgout_index++] = period;
2189	ahc->msgout_buf[ahc->msgout_index++] = 0;
2190	ahc->msgout_buf[ahc->msgout_index++] = offset;
2191	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2192	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2193	ahc->msgout_len += 8;
2194	if (bootverbose) {
2195		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2196		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2197		       devinfo->channel, devinfo->target, devinfo->lun,
2198		       bus_width, period, offset, ppr_options);
2199	}
2200}
2201
2202/*
2203 * Clear any active message state.
2204 */
2205static void
2206ahc_clear_msg_state(struct ahc_softc *ahc)
2207{
2208	ahc->msgout_len = 0;
2209	ahc->msgin_index = 0;
2210	ahc->msg_type = MSG_TYPE_NONE;
2211	if ((ahc_inb(ahc, SCSISIGI) & ATNI) == 0) {
2212		/*
2213		 * The target didn't care to respond to our
2214		 * message request, so clear ATN.
2215		 */
2216		ahc_outb(ahc, CLRSINT1, CLRATNO);
2217	}
2218	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2219}
2220
2221/*
2222 * Manual message loop handler.
2223 */
2224static void
2225ahc_handle_message_phase(struct ahc_softc *ahc)
2226{
2227	struct	ahc_devinfo devinfo;
2228	u_int	bus_phase;
2229	int	end_session;
2230
2231	ahc_fetch_devinfo(ahc, &devinfo);
2232	end_session = FALSE;
2233	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2234
2235reswitch:
2236	switch (ahc->msg_type) {
2237	case MSG_TYPE_INITIATOR_MSGOUT:
2238	{
2239		int lastbyte;
2240		int phasemis;
2241		int msgdone;
2242
2243		if (ahc->msgout_len == 0)
2244			panic("HOST_MSG_LOOP interrupt with no active message");
2245
2246		phasemis = bus_phase != P_MESGOUT;
2247		if (phasemis) {
2248			if (bus_phase == P_MESGIN) {
2249				/*
2250				 * Change gears and see if
2251				 * this messages is of interest to
2252				 * us or should be passed back to
2253				 * the sequencer.
2254				 */
2255				ahc_outb(ahc, CLRSINT1, CLRATNO);
2256				ahc->send_msg_perror = FALSE;
2257				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2258				ahc->msgin_index = 0;
2259				goto reswitch;
2260			}
2261			end_session = TRUE;
2262			break;
2263		}
2264
2265		if (ahc->send_msg_perror) {
2266			ahc_outb(ahc, CLRSINT1, CLRATNO);
2267			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2268			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2269			break;
2270		}
2271
2272		msgdone	= ahc->msgout_index == ahc->msgout_len;
2273		if (msgdone) {
2274			/*
2275			 * The target has requested a retry.
2276			 * Re-assert ATN, reset our message index to
2277			 * 0, and try again.
2278			 */
2279			ahc->msgout_index = 0;
2280			ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
2281		}
2282
2283		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2284		if (lastbyte) {
2285			/* Last byte is signified by dropping ATN */
2286			ahc_outb(ahc, CLRSINT1, CLRATNO);
2287		}
2288
2289		/*
2290		 * Clear our interrupt status and present
2291		 * the next byte on the bus.
2292		 */
2293		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2294		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2295		break;
2296	}
2297	case MSG_TYPE_INITIATOR_MSGIN:
2298	{
2299		int phasemis;
2300		int message_done;
2301
2302		phasemis = bus_phase != P_MESGIN;
2303
2304		if (phasemis) {
2305			ahc->msgin_index = 0;
2306			if (bus_phase == P_MESGOUT
2307			 && (ahc->send_msg_perror == TRUE
2308			  || (ahc->msgout_len != 0
2309			   && ahc->msgout_index == 0))) {
2310				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2311				goto reswitch;
2312			}
2313			end_session = TRUE;
2314			break;
2315		}
2316
2317		/* Pull the byte in without acking it */
2318		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2319
2320		message_done = ahc_parse_msg(ahc, &devinfo);
2321
2322		if (message_done) {
2323			/*
2324			 * Clear our incoming message buffer in case there
2325			 * is another message following this one.
2326			 */
2327			ahc->msgin_index = 0;
2328
2329			/*
2330			 * If this message illicited a response,
2331			 * assert ATN so the target takes us to the
2332			 * message out phase.
2333			 */
2334			if (ahc->msgout_len != 0)
2335				ahc_outb(ahc, SCSISIGO,
2336					 ahc_inb(ahc, SCSISIGO) | ATNO);
2337		} else
2338			ahc->msgin_index++;
2339
2340		/* Ack the byte */
2341		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2342		ahc_inb(ahc, SCSIDATL);
2343		break;
2344	}
2345	case MSG_TYPE_TARGET_MSGIN:
2346	{
2347		int msgdone;
2348		int msgout_request;
2349
2350		if (ahc->msgout_len == 0)
2351			panic("Target MSGIN with no active message");
2352
2353		/*
2354		 * If we interrupted a mesgout session, the initiator
2355		 * will not know this until our first REQ.  So, we
2356		 * only honor mesgout requests after we've sent our
2357		 * first byte.
2358		 */
2359		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2360		 && ahc->msgout_index > 0)
2361			msgout_request = TRUE;
2362		else
2363			msgout_request = FALSE;
2364
2365		if (msgout_request) {
2366
2367			/*
2368			 * Change gears and see if
2369			 * this messages is of interest to
2370			 * us or should be passed back to
2371			 * the sequencer.
2372			 */
2373			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2374			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2375			ahc->msgin_index = 0;
2376			/* Dummy read to REQ for first byte */
2377			ahc_inb(ahc, SCSIDATL);
2378			ahc_outb(ahc, SXFRCTL0,
2379				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2380			break;
2381		}
2382
2383		msgdone = ahc->msgout_index == ahc->msgout_len;
2384		if (msgdone) {
2385			ahc_outb(ahc, SXFRCTL0,
2386				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2387			end_session = TRUE;
2388			break;
2389		}
2390
2391		/*
2392		 * Present the next byte on the bus.
2393		 */
2394		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2395		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2396		break;
2397	}
2398	case MSG_TYPE_TARGET_MSGOUT:
2399	{
2400		int lastbyte;
2401		int msgdone;
2402
2403		/*
2404		 * The initiator signals that this is
2405		 * the last byte by dropping ATN.
2406		 */
2407		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2408
2409		/*
2410		 * Read the latched byte, but turn off SPIOEN first
2411		 * so that we don't inadvertantly cause a REQ for the
2412		 * next byte.
2413		 */
2414		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2415		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2416		msgdone = ahc_parse_msg(ahc, &devinfo);
2417		if (msgdone == MSGLOOP_TERMINATED) {
2418			/*
2419			 * The message is *really* done in that it caused
2420			 * us to go to bus free.  The sequencer has already
2421			 * been reset at this point, so pull the ejection
2422			 * handle.
2423			 */
2424			return;
2425		}
2426
2427		ahc->msgin_index++;
2428
2429		/*
2430		 * XXX Read spec about initiator dropping ATN too soon
2431		 *     and use msgdone to detect it.
2432		 */
2433		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2434			ahc->msgin_index = 0;
2435
2436			/*
2437			 * If this message illicited a response, transition
2438			 * to the Message in phase and send it.
2439			 */
2440			if (ahc->msgout_len != 0) {
2441				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2442				ahc_outb(ahc, SXFRCTL0,
2443					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2444				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2445				ahc->msgin_index = 0;
2446				break;
2447			}
2448		}
2449
2450		if (lastbyte)
2451			end_session = TRUE;
2452		else {
2453			/* Ask for the next byte. */
2454			ahc_outb(ahc, SXFRCTL0,
2455				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2456		}
2457
2458		break;
2459	}
2460	default:
2461		panic("Unknown REQINIT message type");
2462	}
2463
2464	if (end_session) {
2465		ahc_clear_msg_state(ahc);
2466		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2467	} else
2468		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2469}
2470
2471/*
2472 * See if we sent a particular extended message to the target.
2473 * If "full" is true, return true only if the target saw the full
2474 * message.  If "full" is false, return true if the target saw at
2475 * least the first byte of the message.
2476 */
2477static int
2478ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2479{
2480	int found;
2481	u_int index;
2482
2483	found = FALSE;
2484	index = 0;
2485
2486	while (index < ahc->msgout_len) {
2487		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2488			u_int end_index;
2489
2490			end_index = index + 1 + ahc->msgout_buf[index + 1];
2491			if (ahc->msgout_buf[index+2] == msgval
2492			 && type == AHCMSG_EXT) {
2493
2494				if (full) {
2495					if (ahc->msgout_index > end_index)
2496						found = TRUE;
2497				} else if (ahc->msgout_index > index)
2498					found = TRUE;
2499			}
2500			index = end_index;
2501		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
2502			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2503
2504			/* Skip tag type and tag id or residue param*/
2505			index += 2;
2506		} else {
2507			/* Single byte message */
2508			if (type == AHCMSG_1B
2509			 && ahc->msgout_buf[index] == msgval
2510			 && ahc->msgout_index > index)
2511				found = TRUE;
2512			index++;
2513		}
2514
2515		if (found)
2516			break;
2517	}
2518	return (found);
2519}
2520
2521/*
2522 * Wait for a complete incomming message, parse it, and respond accordingly.
2523 */
2524static int
2525ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2526{
2527	struct	ahc_initiator_tinfo *tinfo;
2528	struct	tmode_tstate *tstate;
2529	int	reject;
2530	int	done;
2531	int	response;
2532	u_int	targ_scsirate;
2533
2534	done = MSGLOOP_IN_PROG;
2535	response = FALSE;
2536	reject = FALSE;
2537	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2538				    devinfo->target, &tstate);
2539	targ_scsirate = tinfo->scsirate;
2540
2541	/*
2542	 * Parse as much of the message as is availible,
2543	 * rejecting it if we don't support it.  When
2544	 * the entire message is availible and has been
2545	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2546	 * that we have parsed an entire message.
2547	 *
2548	 * In the case of extended messages, we accept the length
2549	 * byte outright and perform more checking once we know the
2550	 * extended message type.
2551	 */
2552	switch (ahc->msgin_buf[0]) {
2553	case MSG_MESSAGE_REJECT:
2554		response = ahc_handle_msg_reject(ahc, devinfo);
2555		/* FALLTHROUGH */
2556	case MSG_NOOP:
2557		done = MSGLOOP_MSGCOMPLETE;
2558		break;
2559	case MSG_EXTENDED:
2560	{
2561		/* Wait for enough of the message to begin validation */
2562		if (ahc->msgin_index < 2)
2563			break;
2564		switch (ahc->msgin_buf[2]) {
2565		case MSG_EXT_SDTR:
2566		{
2567			struct	 ahc_syncrate *syncrate;
2568			u_int	 period;
2569			u_int	 ppr_options;
2570			u_int	 offset;
2571			u_int	 saved_offset;
2572
2573			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2574				reject = TRUE;
2575				break;
2576			}
2577
2578			/*
2579			 * Wait until we have both args before validating
2580			 * and acting on this message.
2581			 *
2582			 * Add one to MSG_EXT_SDTR_LEN to account for
2583			 * the extended message preamble.
2584			 */
2585			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2586				break;
2587
2588			period = ahc->msgin_buf[3];
2589			ppr_options = 0;
2590			saved_offset = offset = ahc->msgin_buf[4];
2591			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2592							   &ppr_options,
2593							   devinfo->role);
2594			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2595					    targ_scsirate & WIDEXFER,
2596					    devinfo->role);
2597			if (bootverbose) {
2598				printf("(%s:%c:%d:%d): Received "
2599				       "SDTR period %x, offset %x\n\t"
2600				       "Filtered to period %x, offset %x\n",
2601				       ahc_name(ahc), devinfo->channel,
2602				       devinfo->target, devinfo->lun,
2603				       ahc->msgin_buf[3], saved_offset,
2604				       period, offset);
2605			}
2606			ahc_set_syncrate(ahc, devinfo,
2607					 syncrate, period,
2608					 offset, ppr_options,
2609					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2610					 /*paused*/TRUE);
2611
2612			/*
2613			 * See if we initiated Sync Negotiation
2614			 * and didn't have to fall down to async
2615			 * transfers.
2616			 */
2617			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
2618				/* We started it */
2619				if (saved_offset != offset) {
2620					/* Went too low - force async */
2621					reject = TRUE;
2622				}
2623			} else {
2624				/*
2625				 * Send our own SDTR in reply
2626				 */
2627				if (bootverbose) {
2628					printf("(%s:%c:%d:%d): Target "
2629					       "Initiated SDTR\n",
2630					       ahc_name(ahc), devinfo->channel,
2631					       devinfo->target, devinfo->lun);
2632				}
2633				ahc->msgout_index = 0;
2634				ahc->msgout_len = 0;
2635				ahc_construct_sdtr(ahc, devinfo,
2636						   period, offset);
2637				ahc->msgout_index = 0;
2638				response = TRUE;
2639			}
2640			done = MSGLOOP_MSGCOMPLETE;
2641			break;
2642		}
2643		case MSG_EXT_WDTR:
2644		{
2645			u_int bus_width;
2646			u_int saved_width;
2647			u_int sending_reply;
2648
2649			sending_reply = FALSE;
2650			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2651				reject = TRUE;
2652				break;
2653			}
2654
2655			/*
2656			 * Wait until we have our arg before validating
2657			 * and acting on this message.
2658			 *
2659			 * Add one to MSG_EXT_WDTR_LEN to account for
2660			 * the extended message preamble.
2661			 */
2662			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2663				break;
2664
2665			bus_width = ahc->msgin_buf[3];
2666			saved_width = bus_width;
2667			ahc_validate_width(ahc, tinfo, &bus_width,
2668					   devinfo->role);
2669			if (bootverbose) {
2670				printf("(%s:%c:%d:%d): Received WDTR "
2671				       "%x filtered to %x\n",
2672				       ahc_name(ahc), devinfo->channel,
2673				       devinfo->target, devinfo->lun,
2674				       saved_width, bus_width);
2675			}
2676
2677			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
2678				/*
2679				 * Don't send a WDTR back to the
2680				 * target, since we asked first.
2681				 * If the width went higher than our
2682				 * request, reject it.
2683				 */
2684				if (saved_width > bus_width) {
2685					reject = TRUE;
2686					printf("(%s:%c:%d:%d): requested %dBit "
2687					       "transfers.  Rejecting...\n",
2688					       ahc_name(ahc), devinfo->channel,
2689					       devinfo->target, devinfo->lun,
2690					       8 * (0x01 << bus_width));
2691					bus_width = 0;
2692				}
2693			} else {
2694				/*
2695				 * Send our own WDTR in reply
2696				 */
2697				if (bootverbose) {
2698					printf("(%s:%c:%d:%d): Target "
2699					       "Initiated WDTR\n",
2700					       ahc_name(ahc), devinfo->channel,
2701					       devinfo->target, devinfo->lun);
2702				}
2703				ahc->msgout_index = 0;
2704				ahc->msgout_len = 0;
2705				ahc_construct_wdtr(ahc, devinfo, bus_width);
2706				ahc->msgout_index = 0;
2707				response = TRUE;
2708				sending_reply = TRUE;
2709			}
2710			ahc_set_width(ahc, devinfo, bus_width,
2711				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2712				      /*paused*/TRUE);
2713			/* After a wide message, we are async */
2714			ahc_set_syncrate(ahc, devinfo,
2715					 /*syncrate*/NULL, /*period*/0,
2716					 /*offset*/0, /*ppr_options*/0,
2717					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
2718			if (sending_reply == FALSE && reject == FALSE) {
2719
2720				if (tinfo->goal.period) {
2721					ahc->msgout_index = 0;
2722					ahc->msgout_len = 0;
2723					ahc_build_transfer_msg(ahc, devinfo);
2724					ahc->msgout_index = 0;
2725					response = TRUE;
2726				}
2727			}
2728			done = MSGLOOP_MSGCOMPLETE;
2729			break;
2730		}
2731		case MSG_EXT_PPR:
2732		{
2733			struct	ahc_syncrate *syncrate;
2734			u_int	period;
2735			u_int	offset;
2736			u_int	bus_width;
2737			u_int	ppr_options;
2738			u_int	saved_width;
2739			u_int	saved_offset;
2740			u_int	saved_ppr_options;
2741
2742			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
2743				reject = TRUE;
2744				break;
2745			}
2746
2747			/*
2748			 * Wait until we have all args before validating
2749			 * and acting on this message.
2750			 *
2751			 * Add one to MSG_EXT_PPR_LEN to account for
2752			 * the extended message preamble.
2753			 */
2754			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
2755				break;
2756
2757			period = ahc->msgin_buf[3];
2758			offset = ahc->msgin_buf[5];
2759			bus_width = ahc->msgin_buf[6];
2760			saved_width = bus_width;
2761			ppr_options = ahc->msgin_buf[7];
2762			/*
2763			 * According to the spec, a DT only
2764			 * period factor with no DT option
2765			 * set implies async.
2766			 */
2767			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2768			 && period == 9)
2769				offset = 0;
2770			saved_ppr_options = ppr_options;
2771			saved_offset = offset;
2772
2773			/*
2774			 * Mask out any options we don't support
2775			 * on any controller.  Transfer options are
2776			 * only available if we are negotiating wide.
2777			 */
2778			ppr_options &= MSG_EXT_PPR_DT_REQ;
2779			if (bus_width == 0)
2780				ppr_options = 0;
2781
2782			ahc_validate_width(ahc, tinfo, &bus_width,
2783					   devinfo->role);
2784			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2785							   &ppr_options,
2786							   devinfo->role);
2787			ahc_validate_offset(ahc, tinfo, syncrate,
2788					    &offset, bus_width,
2789					    devinfo->role);
2790
2791			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
2792				/*
2793				 * If we are unable to do any of the
2794				 * requested options (we went too low),
2795				 * then we'll have to reject the message.
2796				 */
2797				if (saved_width > bus_width
2798				 || saved_offset != offset
2799				 || saved_ppr_options != ppr_options) {
2800					reject = TRUE;
2801					period = 0;
2802					offset = 0;
2803					bus_width = 0;
2804					ppr_options = 0;
2805					syncrate = NULL;
2806				}
2807			} else {
2808				if (devinfo->role != ROLE_TARGET)
2809					printf("(%s:%c:%d:%d): Target "
2810					       "Initiated PPR\n",
2811					       ahc_name(ahc), devinfo->channel,
2812					       devinfo->target, devinfo->lun);
2813				else
2814					printf("(%s:%c:%d:%d): Initiator "
2815					       "Initiated PPR\n",
2816					       ahc_name(ahc), devinfo->channel,
2817					       devinfo->target, devinfo->lun);
2818				ahc->msgout_index = 0;
2819				ahc->msgout_len = 0;
2820				ahc_construct_ppr(ahc, devinfo, period, offset,
2821						  bus_width, ppr_options);
2822				ahc->msgout_index = 0;
2823				response = TRUE;
2824			}
2825			if (bootverbose) {
2826				printf("(%s:%c:%d:%d): Received PPR width %x, "
2827				       "period %x, offset %x,options %x\n"
2828				       "\tFiltered to width %x, period %x, "
2829				       "offset %x, options %x\n",
2830				       ahc_name(ahc), devinfo->channel,
2831				       devinfo->target, devinfo->lun,
2832				       ahc->msgin_buf[3], saved_width,
2833				       saved_offset, saved_ppr_options,
2834				       bus_width, period, offset, ppr_options);
2835			}
2836			ahc_set_width(ahc, devinfo, bus_width,
2837				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2838				      /*paused*/TRUE);
2839			ahc_set_syncrate(ahc, devinfo,
2840					 syncrate, period,
2841					 offset, ppr_options,
2842					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2843					 /*paused*/TRUE);
2844			done = MSGLOOP_MSGCOMPLETE;
2845			break;
2846		}
2847		default:
2848			/* Unknown extended message.  Reject it. */
2849			reject = TRUE;
2850			break;
2851		}
2852		break;
2853	}
2854	case MSG_BUS_DEV_RESET:
2855		ahc_handle_devreset(ahc, devinfo,
2856				    CAM_BDR_SENT,
2857				    "Bus Device Reset Received",
2858				    /*verbose_level*/0);
2859		restart_sequencer(ahc);
2860		done = MSGLOOP_TERMINATED;
2861		break;
2862	case MSG_ABORT_TAG:
2863	case MSG_ABORT:
2864	case MSG_CLEAR_QUEUE:
2865#ifdef AHC_TARGET_MODE
2866		/* Target mode messages */
2867		if (devinfo->role != ROLE_TARGET) {
2868			reject = TRUE;
2869			break;
2870		}
2871		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
2872			       devinfo->lun,
2873			       ahc->msgin_buf[0] == MSG_ABORT_TAG
2874						  ? SCB_LIST_NULL
2875						  : ahc_inb(ahc, INITIATOR_TAG),
2876			       ROLE_TARGET, CAM_REQ_ABORTED);
2877
2878		tstate = ahc->enabled_targets[devinfo->our_scsiid];
2879		if (tstate != NULL) {
2880			struct tmode_lstate* lstate;
2881
2882			lstate = tstate->enabled_luns[devinfo->lun];
2883			if (lstate != NULL) {
2884				ahc_queue_lstate_event(ahc, lstate,
2885						       devinfo->our_scsiid,
2886						       ahc->msgin_buf[0],
2887						       /*arg*/0);
2888				ahc_send_lstate_events(ahc, lstate);
2889			}
2890		}
2891		done = MSGLOOP_MSGCOMPLETE;
2892		break;
2893#endif
2894	case MSG_TERM_IO_PROC:
2895	default:
2896		reject = TRUE;
2897		break;
2898	}
2899
2900	if (reject) {
2901		/*
2902		 * Setup to reject the message.
2903		 */
2904		ahc->msgout_index = 0;
2905		ahc->msgout_len = 1;
2906		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
2907		done = MSGLOOP_MSGCOMPLETE;
2908		response = TRUE;
2909	}
2910
2911	if (done != MSGLOOP_IN_PROG && !response)
2912		/* Clear the outgoing message buffer */
2913		ahc->msgout_len = 0;
2914
2915	return (done);
2916}
2917
2918/*
2919 * Process a message reject message.
2920 */
2921static int
2922ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2923{
2924	/*
2925	 * What we care about here is if we had an
2926	 * outstanding SDTR or WDTR message for this
2927	 * target.  If we did, this is a signal that
2928	 * the target is refusing negotiation.
2929	 */
2930	struct scb *scb;
2931	struct ahc_initiator_tinfo *tinfo;
2932	struct tmode_tstate *tstate;
2933	u_int scb_index;
2934	u_int last_msg;
2935	int   response = 0;
2936
2937	scb_index = ahc_inb(ahc, SCB_TAG);
2938	scb = ahc_lookup_scb(ahc, scb_index);
2939	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
2940				    devinfo->our_scsiid,
2941				    devinfo->target, &tstate);
2942	/* Might be necessary */
2943	last_msg = ahc_inb(ahc, LAST_MSG);
2944
2945	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
2946		/*
2947		 * Target does not support the PPR message.
2948		 * Attempt to negotiate SPI-2 style.
2949		 */
2950		if (bootverbose) {
2951			printf("(%s:%c:%d:%d): PPR Rejected. "
2952			       "Trying WDTR/SDTR\n",
2953			       ahc_name(ahc), devinfo->channel,
2954			       devinfo->target, devinfo->lun);
2955		}
2956		tinfo->goal.ppr_options = 0;
2957		tinfo->current.transport_version = 2;
2958		tinfo->goal.transport_version = 2;
2959		ahc->msgout_index = 0;
2960		ahc->msgout_len = 0;
2961		ahc_build_transfer_msg(ahc, devinfo);
2962		ahc->msgout_index = 0;
2963		response = 1;
2964	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
2965
2966		/* note 8bit xfers */
2967		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
2968		       "8bit transfers\n", ahc_name(ahc),
2969		       devinfo->channel, devinfo->target, devinfo->lun);
2970		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2971			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2972			      /*paused*/TRUE);
2973		/*
2974		 * No need to clear the sync rate.  If the target
2975		 * did not accept the command, our syncrate is
2976		 * unaffected.  If the target started the negotiation,
2977		 * but rejected our response, we already cleared the
2978		 * sync rate before sending our WDTR.
2979		 */
2980		if (tinfo->goal.period) {
2981
2982			/* Start the sync negotiation */
2983			ahc->msgout_index = 0;
2984			ahc->msgout_len = 0;
2985			ahc_build_transfer_msg(ahc, devinfo);
2986			ahc->msgout_index = 0;
2987			response = 1;
2988		}
2989	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
2990		/* note asynch xfers and clear flag */
2991		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
2992				 /*offset*/0, /*ppr_options*/0,
2993				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2994				 /*paused*/TRUE);
2995		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
2996		       "Using asynchronous transfers\n",
2997		       ahc_name(ahc), devinfo->channel,
2998		       devinfo->target, devinfo->lun);
2999	} else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
3000
3001		printf("(%s:%c:%d:%d): refuses tagged commands.  Performing "
3002		       "non-tagged I/O\n", ahc_name(ahc),
3003		       devinfo->channel, devinfo->target, devinfo->lun);
3004		ahc_set_tags(ahc, devinfo, FALSE);
3005
3006		/*
3007		 * Resend the identify for this CCB as the target
3008		 * may believe that the selection is invalid otherwise.
3009		 */
3010		ahc_outb(ahc, SCB_CONTROL,
3011			 ahc_inb(ahc, SCB_CONTROL) & ~MSG_SIMPLE_Q_TAG);
3012	 	scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
3013		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3014					/*type*/MSG_SIMPLE_Q_TAG);
3015		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3016		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3017
3018		/*
3019		 * This transaction is now at the head of
3020		 * the untagged queue for this target.
3021		 */
3022		if ((ahc->flags & AHC_SCB_BTT) == 0) {
3023			struct scb_tailq *untagged_q;
3024
3025			untagged_q = &(ahc->untagged_queues[devinfo->target]);
3026			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3027			scb->flags |= SCB_UNTAGGEDQ;
3028		}
3029		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3030			     scb->hscb->tag);
3031
3032		/*
3033		 * Requeue all tagged commands for this target
3034		 * currently in our posession so they can be
3035		 * converted to untagged commands.
3036		 */
3037		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3038				   SCB_GET_CHANNEL(ahc, scb),
3039				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3040				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3041				   SEARCH_COMPLETE);
3042	} else {
3043		/*
3044		 * Otherwise, we ignore it.
3045		 */
3046		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3047		       ahc_name(ahc), devinfo->channel, devinfo->target,
3048		       last_msg);
3049	}
3050	return (response);
3051}
3052
3053/*
3054 * Process an ingnore wide residue message.
3055 */
3056static void
3057ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3058{
3059	u_int scb_index;
3060	struct scb *scb;
3061
3062	scb_index = ahc_inb(ahc, SCB_TAG);
3063	scb = ahc_lookup_scb(ahc, scb_index);
3064	/*
3065	 * XXX Actually check data direction in the sequencer?
3066	 * Perhaps add datadir to some spare bits in the hscb?
3067	 */
3068	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3069	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3070		/*
3071		 * Ignore the message if we haven't
3072		 * seen an appropriate data phase yet.
3073		 */
3074	} else {
3075		/*
3076		 * If the residual occurred on the last
3077		 * transfer and the transfer request was
3078		 * expected to end on an odd count, do
3079		 * nothing.  Otherwise, subtract a byte
3080		 * and update the residual count accordingly.
3081		 */
3082		uint32_t sgptr;
3083
3084		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3085		if ((sgptr & SG_LIST_NULL) != 0
3086		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3087			/*
3088			 * If the residual occurred on the last
3089			 * transfer and the transfer request was
3090			 * expected to end on an odd count, do
3091			 * nothing.
3092			 */
3093		} else {
3094			struct ahc_dma_seg *sg;
3095			uint32_t data_cnt;
3096			uint32_t data_addr;
3097
3098			/* Pull in the rest of the sgptr */
3099			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3100			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3101			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3102			sgptr &= SG_PTR_MASK;
3103			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3104				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3105				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3106
3107			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3108				  | (ahc_inb(ahc, SHADDR + 2) << 16)
3109				  | (ahc_inb(ahc, SHADDR + 1) << 8)
3110				  | (ahc_inb(ahc, SHADDR));
3111
3112			data_cnt += 1;
3113			data_addr -= 1;
3114
3115			sg = ahc_sg_bus_to_virt(scb, sgptr);
3116			/*
3117			 * The residual sg ptr points to the next S/G
3118			 * to load so we must go back one.
3119			 */
3120			sg--;
3121			if (sg != scb->sg_list
3122			 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
3123
3124				sg--;
3125				data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
3126				data_addr = sg->addr
3127					  + (sg->len & AHC_SG_LEN_MASK) - 1;
3128
3129				/*
3130				 * Increment sg so it points to the
3131				 * "next" sg.
3132				 */
3133				sg++;
3134				sgptr = ahc_sg_virt_to_bus(scb, sg);
3135				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3136					 sgptr >> 24);
3137				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3138					 sgptr >> 16);
3139				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3140					 sgptr >> 8);
3141				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3142			}
3143
3144/* XXX What about high address byte??? */
3145			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3146			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3147			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3148			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3149
3150/* XXX Perhaps better to just keep the saved address in sram */
3151			if ((ahc->features & AHC_ULTRA2) != 0) {
3152				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3153				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3154				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3155				ahc_outb(ahc, HADDR, data_addr);
3156				ahc_outb(ahc, DFCNTRL, PRELOADEN);
3157				ahc_outb(ahc, SXFRCTL0,
3158					 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
3159			} else {
3160				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
3161				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
3162				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
3163				ahc_outb(ahc, HADDR, data_addr);
3164			}
3165		}
3166	}
3167}
3168
3169/*
3170 * Handle the effects of issuing a bus device reset message.
3171 */
3172static void
3173ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3174		    cam_status status, char *message, int verbose_level)
3175{
3176#ifdef AHC_TARGET_MODE
3177	struct tmode_tstate* tstate;
3178	u_int lun;
3179#endif
3180	int found;
3181
3182	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3183			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3184			       status);
3185
3186#ifdef AHC_TARGET_MODE
3187	/*
3188	 * Send an immediate notify ccb to all target mord peripheral
3189	 * drivers affected by this action.
3190	 */
3191	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3192	if (tstate != NULL) {
3193		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3194			struct tmode_lstate* lstate;
3195
3196			lstate = tstate->enabled_luns[lun];
3197			if (lstate == NULL)
3198				continue;
3199
3200			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3201					       MSG_BUS_DEV_RESET, /*arg*/0);
3202			ahc_send_lstate_events(ahc, lstate);
3203		}
3204	}
3205#endif
3206
3207	/*
3208	 * Go back to async/narrow transfers and renegotiate.
3209	 */
3210	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3211		      AHC_TRANS_CUR, /*paused*/TRUE);
3212	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3213			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3214			 AHC_TRANS_CUR, /*paused*/TRUE);
3215
3216	ahc_send_async(ahc, devinfo->channel, devinfo->target,
3217		       CAM_LUN_WILDCARD, AC_SENT_BDR);
3218
3219	if (message != NULL
3220	 && (verbose_level <= bootverbose))
3221		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3222		       message, devinfo->channel, devinfo->target, found);
3223}
3224
3225#ifdef AHC_TARGET_MODE
3226void
3227ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3228{
3229	/*
3230	 * To facilitate adding multiple messages together,
3231	 * each routine should increment the index and len
3232	 * variables instead of setting them explicitly.
3233	 */
3234	ahc->msgout_index = 0;
3235	ahc->msgout_len = 0;
3236
3237	if ((ahc->targ_msg_req & devinfo->target_mask) != 0)
3238		ahc_build_transfer_msg(ahc, devinfo);
3239	else
3240		panic("ahc_intr: AWAITING target message with no message");
3241
3242	ahc->msgout_index = 0;
3243	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3244}
3245#endif
3246/**************************** Initialization **********************************/
3247/*
3248 * Allocate a controller structure for a new device
3249 * and perform initial initializion.
3250 */
3251struct ahc_softc *
3252ahc_alloc(void *platform_arg, char *name)
3253{
3254	struct  ahc_softc *ahc;
3255	int	i;
3256
3257#ifndef	__FreeBSD__
3258	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3259	if (!ahc) {
3260		printf("aic7xxx: cannot malloc softc!\n");
3261		free(name, M_DEVBUF);
3262		return NULL;
3263	}
3264#else
3265	ahc = device_get_softc((device_t)platform_arg);
3266#endif
3267	memset(ahc, 0, sizeof(*ahc));
3268	LIST_INIT(&ahc->pending_scbs);
3269	/* We don't know or unit number until the OSM sets it */
3270	ahc->name = name;
3271	for (i = 0; i < 16; i++)
3272		TAILQ_INIT(&ahc->untagged_queues[i]);
3273	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3274		ahc_free(ahc);
3275		ahc = NULL;
3276	}
3277	return (ahc);
3278}
3279
3280int
3281ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config)
3282{
3283
3284	ahc->chip = config->chip;
3285	ahc->features = config->features;
3286	ahc->bugs = config->bugs;
3287	ahc->flags = config->flags;
3288	ahc->channel = config->channel;
3289	ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN;
3290	ahc->description = config->description;
3291	/* The IRQMS bit is only valid on VL and EISA chips */
3292	if ((ahc->chip & AHC_PCI) != 0)
3293		ahc->unpause &= ~IRQMS;
3294	ahc->pause = ahc->unpause | PAUSE;
3295	/* XXX The shared scb data stuff should be depricated */
3296	if (ahc->scb_data == NULL) {
3297		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3298				       M_DEVBUF, M_NOWAIT);
3299		if (ahc->scb_data == NULL)
3300			return (ENOMEM);
3301		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3302	}
3303
3304	return (0);
3305}
3306
3307void
3308ahc_softc_insert(struct ahc_softc *ahc)
3309{
3310	struct ahc_softc *list_ahc;
3311
3312#ifdef AHC_SUPPORT_PCI
3313	/*
3314	 * Second Function PCI devices need to inherit some
3315	 * settings from function 0.  We assume that function 0
3316	 * will always be found prior to function 1.
3317	 */
3318	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3319	 && ahc_get_pci_function(ahc->dev_softc) == 1) {
3320		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3321			ahc_dev_softc_t list_pci;
3322			ahc_dev_softc_t pci;
3323
3324			list_pci = list_ahc->dev_softc;
3325			pci = ahc->dev_softc;
3326			if (ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)
3327			 && ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3328			 && ahc_get_pci_function(list_pci) == 0) {
3329				ahc->flags &= ~AHC_BIOS_ENABLED;
3330				ahc->flags |=
3331				    list_ahc->flags & AHC_BIOS_ENABLED;
3332				ahc->flags &= ~AHC_CHANNEL_B_PRIMARY;
3333				ahc->flags |=
3334				    list_ahc->flags & AHC_CHANNEL_B_PRIMARY;
3335				break;
3336			}
3337		}
3338	}
3339#endif
3340
3341	/*
3342	 * Insertion sort into our list of softcs.
3343	 */
3344	list_ahc = TAILQ_FIRST(&ahc_tailq);
3345	while (list_ahc != NULL
3346	    && ahc_softc_comp(list_ahc, ahc) <= 0)
3347		list_ahc = TAILQ_NEXT(list_ahc, links);
3348	if (list_ahc != NULL)
3349		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3350	else
3351		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3352	ahc->init_level++;
3353}
3354
3355void
3356ahc_set_unit(struct ahc_softc *ahc, int unit)
3357{
3358	ahc->unit = unit;
3359}
3360
3361void
3362ahc_set_name(struct ahc_softc *ahc, char *name)
3363{
3364	if (ahc->name != NULL)
3365		free(ahc->name, M_DEVBUF);
3366	ahc->name = name;
3367}
3368
3369void
3370ahc_free(struct ahc_softc *ahc)
3371{
3372	int i;
3373
3374	ahc_fini_scbdata(ahc);
3375	switch (ahc->init_level) {
3376	default:
3377	case 5:
3378		ahc_shutdown(ahc);
3379		TAILQ_REMOVE(&ahc_tailq, ahc, links);
3380		/* FALLTHROUGH */
3381	case 4:
3382		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3383				  ahc->shared_data_dmamap);
3384		/* FALLTHROUGH */
3385	case 3:
3386		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3387				ahc->shared_data_dmamap);
3388		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3389				   ahc->shared_data_dmamap);
3390		/* FALLTHROUGH */
3391	case 2:
3392		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3393	case 1:
3394#ifndef __linux__
3395		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3396#endif
3397		break;
3398	}
3399
3400	ahc_platform_free(ahc);
3401	for (i = 0; i < AHC_NUM_TARGETS; i++) {
3402		struct tmode_tstate *tstate;
3403
3404		tstate = ahc->enabled_targets[i];
3405		if (tstate != NULL) {
3406#if AHC_TARGET_MODE
3407			int j;
3408
3409			for (j = 0; j < AHC_NUM_LUNS; j++) {
3410				struct tmode_lstate *lstate;
3411
3412				lstate = tstate->enabled_luns[j];
3413				if (lstate != NULL) {
3414					xpt_free_path(lstate->path);
3415					free(lstate, M_DEVBUF);
3416				}
3417			}
3418#endif
3419			free(tstate, M_DEVBUF);
3420		}
3421	}
3422#if AHC_TARGET_MODE
3423	if (ahc->black_hole != NULL) {
3424		xpt_free_path(ahc->black_hole->path);
3425		free(ahc->black_hole, M_DEVBUF);
3426	}
3427#endif
3428	if (ahc->name != NULL)
3429		free(ahc->name, M_DEVBUF);
3430#ifndef __FreeBSD__
3431	free(ahc, M_DEVBUF);
3432#endif
3433	return;
3434}
3435
3436void
3437ahc_shutdown(void *arg)
3438{
3439	struct	ahc_softc *ahc;
3440	int	i;
3441
3442	ahc = (struct ahc_softc *)arg;
3443
3444	/* This will reset most registers to 0, but not all */
3445	ahc_reset(ahc);
3446	ahc_outb(ahc, SCSISEQ, 0);
3447	ahc_outb(ahc, SXFRCTL0, 0);
3448	ahc_outb(ahc, DSPCISTATUS, 0);
3449
3450	for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
3451		ahc_outb(ahc, i, 0);
3452}
3453
3454/*
3455 * Reset the controller and record some information about it
3456 * that is only availabel just after a reset.
3457 */
3458int
3459ahc_reset(struct ahc_softc *ahc)
3460{
3461	u_int	sblkctl;
3462	u_int	sxfrctl1_a, sxfrctl1_b;
3463	int	wait;
3464
3465	/*
3466	 * Preserve the value of the SXFRCTL1 register for all channels.
3467	 * It contains settings that affect termination and we don't want
3468	 * to disturb the integrity of the bus.
3469	 */
3470	pause_sequencer(ahc);
3471	sxfrctl1_b = 0;
3472	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3473		u_int sblkctl;
3474
3475		/*
3476		 * Save channel B's settings in case this chip
3477		 * is setup for TWIN channel operation.
3478		 */
3479		sblkctl = ahc_inb(ahc, SBLKCTL);
3480		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3481		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3482		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3483	}
3484	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3485
3486	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3487
3488	/*
3489	 * Ensure that the reset has finished
3490	 */
3491	wait = 1000;
3492	do {
3493		ahc_delay(1000);
3494	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3495
3496	if (wait == 0) {
3497		printf("%s: WARNING - Failed chip reset!  "
3498		       "Trying to initialize anyway.\n", ahc_name(ahc));
3499	}
3500	ahc_outb(ahc, HCNTRL, ahc->pause);
3501
3502	/* Determine channel configuration */
3503	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3504	/* No Twin Channel PCI cards */
3505	if ((ahc->chip & AHC_PCI) != 0)
3506		sblkctl &= ~SELBUSB;
3507	switch (sblkctl) {
3508	case 0:
3509		/* Single Narrow Channel */
3510		break;
3511	case 2:
3512		/* Wide Channel */
3513		ahc->features |= AHC_WIDE;
3514		break;
3515	case 8:
3516		/* Twin Channel */
3517		ahc->features |= AHC_TWIN;
3518		break;
3519	default:
3520		printf(" Unsupported adapter type.  Ignoring\n");
3521		return(-1);
3522	}
3523
3524	/*
3525	 * Reload sxfrctl1.
3526	 *
3527	 * We must always initialize STPWEN to 1 before we
3528	 * restore the saved values.  STPWEN is initialized
3529	 * to a tri-state condition which can only be cleared
3530	 * by turning it on.
3531	 */
3532	if ((ahc->features & AHC_TWIN) != 0) {
3533		u_int sblkctl;
3534
3535		sblkctl = ahc_inb(ahc, SBLKCTL);
3536		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3537		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3538		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3539	}
3540	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3541
3542#ifdef AHC_DUMP_SEQ
3543	if (ahc->init_level == 0)
3544		ahc_dumpseq(ahc);
3545#endif
3546
3547	return (0);
3548}
3549
3550/*
3551 * Determine the number of SCBs available on the controller
3552 */
3553int
3554ahc_probe_scbs(struct ahc_softc *ahc) {
3555	int i;
3556
3557	for (i = 0; i < AHC_SCB_MAX; i++) {
3558
3559		ahc_outb(ahc, SCBPTR, i);
3560		ahc_outb(ahc, SCB_BASE, i);
3561		if (ahc_inb(ahc, SCB_BASE) != i)
3562			break;
3563		ahc_outb(ahc, SCBPTR, 0);
3564		if (ahc_inb(ahc, SCB_BASE) != 0)
3565			break;
3566	}
3567	return (i);
3568}
3569
3570void
3571ahc_init_probe_config(struct ahc_probe_config *probe_config)
3572{
3573	probe_config->description = NULL;
3574	probe_config->channel = 'A';
3575	probe_config->channel_b = 'B';
3576	probe_config->chip = AHC_NONE;
3577	probe_config->features = AHC_FENONE;
3578	probe_config->bugs = AHC_BUGNONE;
3579	probe_config->flags = AHC_FNONE;
3580}
3581
3582static void
3583ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3584{
3585	bus_addr_t *baddr;
3586
3587	baddr = (bus_addr_t *)arg;
3588	*baddr = segs->ds_addr;
3589}
3590
3591static void
3592ahc_build_free_scb_list(struct ahc_softc *ahc)
3593{
3594	int i;
3595
3596	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3597		ahc_outb(ahc, SCBPTR, i);
3598
3599		/* Clear the control byte. */
3600		ahc_outb(ahc, SCB_CONTROL, 0);
3601
3602		/* Set the next pointer */
3603		if ((ahc->flags & AHC_PAGESCBS) != 0)
3604			ahc_outb(ahc, SCB_NEXT, i+1);
3605		else
3606			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3607
3608		/* Make the tag number invalid */
3609		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
3610	}
3611
3612	/* Make sure that the last SCB terminates the free list */
3613	ahc_outb(ahc, SCBPTR, i-1);
3614	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
3615
3616	/* Ensure we clear the 0 SCB's control byte. */
3617	ahc_outb(ahc, SCBPTR, 0);
3618	ahc_outb(ahc, SCB_CONTROL, 0);
3619}
3620
3621static int
3622ahc_init_scbdata(struct ahc_softc *ahc)
3623{
3624	struct scb_data *scb_data;
3625
3626	scb_data = ahc->scb_data;
3627	SLIST_INIT(&scb_data->free_scbs);
3628	SLIST_INIT(&scb_data->sg_maps);
3629
3630	/* Allocate SCB resources */
3631	scb_data->scbarray =
3632	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
3633				 M_DEVBUF, M_NOWAIT);
3634	if (scb_data->scbarray == NULL)
3635		return (ENOMEM);
3636	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX);
3637
3638	/* Determine the number of hardware SCBs and initialize them */
3639
3640	scb_data->maxhscbs = ahc_probe_scbs(ahc);
3641	if ((ahc->flags & AHC_PAGESCBS) != 0) {
3642		/* SCB 0 heads the free list */
3643		ahc_outb(ahc, FREE_SCBH, 0);
3644	} else {
3645		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
3646	}
3647
3648	if (ahc->scb_data->maxhscbs == 0) {
3649		printf("%s: No SCB space found\n", ahc_name(ahc));
3650		return (ENXIO);
3651	}
3652
3653	ahc_build_free_scb_list(ahc);
3654
3655	/*
3656	 * Create our DMA tags.  These tags define the kinds of device
3657	 * accessible memory allocations and memory mappings we will
3658	 * need to perform during normal operation.
3659	 *
3660	 * Unless we need to further restrict the allocation, we rely
3661	 * on the restrictions of the parent dmat, hence the common
3662	 * use of MAXADDR and MAXSIZE.
3663	 */
3664
3665	/* DMA tag for our hardware scb structures */
3666	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3667			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3668			       /*highaddr*/BUS_SPACE_MAXADDR,
3669			       /*filter*/NULL, /*filterarg*/NULL,
3670			       AHC_SCB_MAX * sizeof(struct hardware_scb),
3671			       /*nsegments*/1,
3672			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3673			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
3674		goto error_exit;
3675	}
3676
3677	scb_data->init_level++;
3678
3679	/* Allocation for our ccbs */
3680	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
3681			     (void **)&scb_data->hscbs,
3682			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
3683		goto error_exit;
3684	}
3685
3686	scb_data->init_level++;
3687
3688	/* And permanently map them */
3689	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
3690			scb_data->hscbs,
3691			AHC_SCB_MAX * sizeof(struct hardware_scb),
3692			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
3693
3694	scb_data->init_level++;
3695
3696	/* DMA tag for our sense buffers */
3697	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3698			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3699			       /*highaddr*/BUS_SPACE_MAXADDR,
3700			       /*filter*/NULL, /*filterarg*/NULL,
3701			       AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3702			       /*nsegments*/1,
3703			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3704			       /*flags*/0, &scb_data->sense_dmat) != 0) {
3705		goto error_exit;
3706	}
3707
3708	scb_data->init_level++;
3709
3710	/* Allocate them */
3711	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
3712			     (void **)&scb_data->sense,
3713			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
3714		goto error_exit;
3715	}
3716
3717	scb_data->init_level++;
3718
3719	/* And permanently map them */
3720	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
3721			scb_data->sense,
3722			AHC_SCB_MAX * sizeof(struct scsi_sense_data),
3723			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
3724
3725	scb_data->init_level++;
3726
3727	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
3728	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3729			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3730			       /*highaddr*/BUS_SPACE_MAXADDR,
3731			       /*filter*/NULL, /*filterarg*/NULL,
3732			       PAGE_SIZE, /*nsegments*/1,
3733			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
3734			       /*flags*/0, &scb_data->sg_dmat) != 0) {
3735		goto error_exit;
3736	}
3737
3738	scb_data->init_level++;
3739
3740	/* Perform initial CCB allocation */
3741	memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb));
3742	ahc_alloc_scbs(ahc);
3743
3744	if (scb_data->numscbs == 0) {
3745		printf("%s: ahc_init_scbdata - "
3746		       "Unable to allocate initial scbs\n",
3747		       ahc_name(ahc));
3748		goto error_exit;
3749	}
3750
3751	/*
3752	 * Tell the sequencer which SCB will be the next one it receives.
3753	 */
3754	ahc->next_queued_scb = ahc_get_scb(ahc);
3755	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
3756
3757	/*
3758	 * Note that we were successfull
3759	 */
3760	return (0);
3761
3762error_exit:
3763
3764	return (ENOMEM);
3765}
3766
3767static void
3768ahc_fini_scbdata(struct ahc_softc *ahc)
3769{
3770	struct scb_data *scb_data;
3771
3772	scb_data = ahc->scb_data;
3773	if (scb_data == NULL)
3774		return;
3775
3776	switch (scb_data->init_level) {
3777	default:
3778	case 7:
3779	{
3780		struct sg_map_node *sg_map;
3781
3782		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
3783			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
3784			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
3785					  sg_map->sg_dmamap);
3786			ahc_dmamem_free(ahc, scb_data->sg_dmat,
3787					sg_map->sg_vaddr,
3788					sg_map->sg_dmamap);
3789			free(sg_map, M_DEVBUF);
3790		}
3791		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
3792	}
3793	case 6:
3794		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
3795				  scb_data->sense_dmamap);
3796	case 5:
3797		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
3798				scb_data->sense_dmamap);
3799		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
3800				   scb_data->sense_dmamap);
3801	case 4:
3802		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
3803	case 3:
3804		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
3805				  scb_data->hscb_dmamap);
3806	case 2:
3807		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
3808				scb_data->hscb_dmamap);
3809		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
3810				   scb_data->hscb_dmamap);
3811	case 1:
3812		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
3813		break;
3814	case 0:
3815		break;
3816	}
3817	if (scb_data->scbarray != NULL)
3818		free(scb_data->scbarray, M_DEVBUF);
3819}
3820
3821void
3822ahc_alloc_scbs(struct ahc_softc *ahc)
3823{
3824	struct scb_data *scb_data;
3825	struct scb *next_scb;
3826	struct sg_map_node *sg_map;
3827	bus_addr_t physaddr;
3828	struct ahc_dma_seg *segs;
3829	int newcount;
3830	int i;
3831
3832	scb_data = ahc->scb_data;
3833	if (scb_data->numscbs >= AHC_SCB_MAX)
3834		/* Can't allocate any more */
3835		return;
3836
3837	next_scb = &scb_data->scbarray[scb_data->numscbs];
3838
3839	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
3840
3841	if (sg_map == NULL)
3842		return;
3843
3844	/* Allocate S/G space for the next batch of SCBS */
3845	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
3846			     (void **)&sg_map->sg_vaddr,
3847			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
3848		free(sg_map, M_DEVBUF);
3849		return;
3850	}
3851
3852	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
3853
3854	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
3855			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
3856			&sg_map->sg_physaddr, /*flags*/0);
3857
3858	segs = sg_map->sg_vaddr;
3859	physaddr = sg_map->sg_physaddr;
3860
3861	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
3862	for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
3863		struct scb_platform_data *pdata;
3864#ifndef __linux__
3865		int error;
3866#endif
3867		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
3868							   M_DEVBUF, M_NOWAIT);
3869		if (pdata == NULL)
3870			break;
3871		next_scb->platform_data = pdata;
3872		next_scb->sg_list = segs;
3873		/*
3874		 * The sequencer always starts with the second entry.
3875		 * The first entry is embedded in the scb.
3876		 */
3877		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
3878		next_scb->ahc_softc = ahc;
3879		next_scb->flags = SCB_FREE;
3880#ifndef __linux__
3881		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
3882					  &next_scb->dmamap);
3883		if (error != 0)
3884			break;
3885#endif
3886		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
3887		next_scb->hscb->tag = ahc->scb_data->numscbs;
3888		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
3889				  next_scb, links.sle);
3890		segs += AHC_NSEG;
3891		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
3892		next_scb++;
3893		ahc->scb_data->numscbs++;
3894	}
3895}
3896
3897void
3898ahc_controller_info(struct ahc_softc *ahc, char *buf)
3899{
3900	int len;
3901
3902	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
3903	buf += len;
3904	if ((ahc->features & AHC_TWIN) != 0)
3905 		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
3906			      "B SCSI Id=%d, primary %c, ",
3907			      ahc->our_id, ahc->our_id_b,
3908			      ahc->flags & AHC_CHANNEL_B_PRIMARY ? 'B': 'A');
3909	else {
3910		const char *type;
3911
3912		if ((ahc->features & AHC_WIDE) != 0) {
3913			type = "Wide";
3914		} else {
3915			type = "Single";
3916		}
3917		len = sprintf(buf, "%s Channel %c, SCSI Id=%d, ",
3918			      type, ahc->channel, ahc->our_id);
3919	}
3920	buf += len;
3921
3922	if ((ahc->flags & AHC_PAGESCBS) != 0)
3923		sprintf(buf, "%d/%d SCBs",
3924			ahc->scb_data->maxhscbs, AHC_SCB_MAX);
3925	else
3926		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
3927}
3928
3929/*
3930 * Start the board, ready for normal operation
3931 */
3932int
3933ahc_init(struct ahc_softc *ahc)
3934{
3935	int	 max_targ;
3936	int	 i;
3937	int	 term;
3938	u_int	 scsi_conf;
3939	u_int	 scsiseq_template;
3940	u_int	 ultraenb;
3941	u_int	 discenable;
3942	u_int	 tagenable;
3943	size_t	 driver_data_size;
3944	uint32_t physaddr;
3945
3946#ifdef AHC_DEBUG_SEQUENCER
3947	ahc->flags |= AHC_SEQUENCER_DEBUG;
3948#endif
3949
3950#ifdef AHC_PRINT_SRAM
3951	printf("Scratch Ram:");
3952	for (i = 0x20; i < 0x5f; i++) {
3953		if (((i % 8) == 0) && (i != 0)) {
3954			printf ("\n              ");
3955		}
3956		printf (" 0x%x", ahc_inb(ahc, i));
3957	}
3958	if ((ahc->features & AHC_MORE_SRAM) != 0) {
3959		for (i = 0x70; i < 0x7f; i++) {
3960			if (((i % 8) == 0) && (i != 0)) {
3961				printf ("\n              ");
3962			}
3963			printf (" 0x%x", ahc_inb(ahc, i));
3964		}
3965	}
3966	printf ("\n");
3967#endif
3968	max_targ = 15;
3969
3970	/*
3971	 * Assume we have a board at this stage and it has been reset.
3972	 */
3973	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
3974		ahc->our_id = ahc->our_id_b = 7;
3975
3976	/*
3977	 * Default to allowing initiator operations.
3978	 */
3979	ahc->flags |= AHC_INITIATORROLE;
3980
3981	/*
3982	 * Only allow target mode features if this unit has them enabled.
3983	 */
3984	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
3985		ahc->features &= ~AHC_TARGETMODE;
3986
3987#ifndef __linux__
3988	/* DMA tag for mapping buffers into device visible space. */
3989	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
3990			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
3991			       /*highaddr*/BUS_SPACE_MAXADDR,
3992			       /*filter*/NULL, /*filterarg*/NULL,
3993			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
3994			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
3995			       /*flags*/BUS_DMA_ALLOCNOW,
3996			       &ahc->buffer_dmat) != 0) {
3997		return (ENOMEM);
3998	}
3999#endif
4000
4001	ahc->init_level++;
4002
4003	/*
4004	 * DMA tag for our command fifos and other data in system memory
4005	 * the card's sequencer must be able to access.  For initiator
4006	 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4007	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4008	 * When providing for the target mode role, we must additionally
4009	 * provide space for the incoming target command fifo and an extra
4010	 * byte to deal with a dma bug in some chip versions.
4011	 */
4012	driver_data_size = 2 * 256 * sizeof(uint8_t);
4013	if ((ahc->features & AHC_TARGETMODE) != 0)
4014		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4015				 + /*DMA WideOdd Bug Buffer*/1;
4016	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4017			       /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
4018			       /*highaddr*/BUS_SPACE_MAXADDR,
4019			       /*filter*/NULL, /*filterarg*/NULL,
4020			       driver_data_size,
4021			       /*nsegments*/1,
4022			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4023			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4024		return (ENOMEM);
4025	}
4026
4027	ahc->init_level++;
4028
4029	/* Allocation of driver data */
4030	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4031			     (void **)&ahc->qoutfifo,
4032			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4033		return (ENOMEM);
4034	}
4035
4036	ahc->init_level++;
4037
4038	/* And permanently map it in */
4039	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4040			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4041			&ahc->shared_data_busaddr, /*flags*/0);
4042
4043	if ((ahc->features & AHC_TARGETMODE) != 0) {
4044		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4045		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4046		ahc->dma_bug_buf = ahc->shared_data_busaddr
4047				 + driver_data_size - 1;
4048		/* All target command blocks start out invalid. */
4049		for (i = 0; i < AHC_TMODE_CMDS; i++)
4050			ahc->targetcmds[i].cmd_valid = 0;
4051		ahc->tqinfifonext = 1;
4052		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4053		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4054		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4055	}
4056	ahc->qinfifo = &ahc->qoutfifo[256];
4057
4058	ahc->init_level++;
4059
4060	/* Allocate SCB data now that buffer_dmat is initialized */
4061	if (ahc->scb_data->maxhscbs == 0)
4062		if (ahc_init_scbdata(ahc) != 0)
4063			return (ENOMEM);
4064
4065	/*
4066	 * Allocate a tstate to house information for our
4067	 * initiator presence on the bus as well as the user
4068	 * data for any target mode initiator.
4069	 */
4070	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4071		printf("%s: unable to allocate tmode_tstate.  "
4072		       "Failing attach\n", ahc_name(ahc));
4073		return (-1);
4074	}
4075
4076	if ((ahc->features & AHC_TWIN) != 0) {
4077		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4078			printf("%s: unable to allocate tmode_tstate.  "
4079			       "Failing attach\n", ahc_name(ahc));
4080			return (-1);
4081		}
4082	}
4083
4084	ahc_outb(ahc, SEQ_FLAGS, 0);
4085
4086	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4087		ahc->flags |= AHC_PAGESCBS;
4088	} else {
4089		ahc->flags &= ~AHC_PAGESCBS;
4090	}
4091
4092#ifdef AHC_DEBUG
4093	if (ahc_debug & AHC_SHOWMISC) {
4094		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4095		       "ahc_dma %d bytes\n",
4096			ahc_name(ahc),
4097			sizeof(struct hardware_scb),
4098			sizeof(struct scb),
4099			sizeof(struct ahc_dma_seg));
4100	}
4101#endif /* AHC_DEBUG */
4102
4103	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4104	if (ahc->features & AHC_TWIN) {
4105
4106		/*
4107		 * The device is gated to channel B after a chip reset,
4108		 * so set those values first
4109		 */
4110		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4111		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4112		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4113		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4114		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4115					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4116		if ((ahc->features & AHC_ULTRA2) != 0)
4117			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4118		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4119		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4120
4121		if ((scsi_conf & RESET_SCSI) != 0
4122		 && (ahc->flags & AHC_INITIATORROLE) != 0)
4123			ahc->flags |= AHC_RESET_BUS_B;
4124
4125		/* Select Channel A */
4126		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4127	}
4128	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4129	if ((ahc->features & AHC_ULTRA2) != 0)
4130		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4131	else
4132		ahc_outb(ahc, SCSIID, ahc->our_id);
4133	scsi_conf = ahc_inb(ahc, SCSICONF);
4134	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4135				|term|ahc->seltime
4136				|ENSTIMER|ACTNEGEN);
4137	if ((ahc->features & AHC_ULTRA2) != 0)
4138		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4139	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4140	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4141
4142	if ((scsi_conf & RESET_SCSI) != 0
4143	 && (ahc->flags & AHC_INITIATORROLE) != 0)
4144		ahc->flags |= AHC_RESET_BUS_A;
4145
4146	/*
4147	 * Look at the information that board initialization or
4148	 * the board bios has left us.
4149	 */
4150	ultraenb = 0;
4151	tagenable = ALL_TARGETS_MASK;
4152
4153	/* Grab the disconnection disable table and invert it for our needs */
4154	if (ahc->flags & AHC_USEDEFAULTS) {
4155		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4156			"device parameters\n", ahc_name(ahc));
4157		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4158			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4159		discenable = ALL_TARGETS_MASK;
4160		if ((ahc->features & AHC_ULTRA) != 0)
4161			ultraenb = ALL_TARGETS_MASK;
4162	} else {
4163		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4164			   | ahc_inb(ahc, DISC_DSB));
4165		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4166			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4167				      | ahc_inb(ahc, ULTRA_ENB);
4168	}
4169
4170	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4171		max_targ = 7;
4172
4173	for (i = 0; i <= max_targ; i++) {
4174		struct ahc_initiator_tinfo *tinfo;
4175		struct tmode_tstate *tstate;
4176		u_int our_id;
4177		u_int target_id;
4178		char channel;
4179
4180		channel = 'A';
4181		our_id = ahc->our_id;
4182		target_id = i;
4183		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4184			channel = 'B';
4185			our_id = ahc->our_id_b;
4186			target_id = i % 8;
4187		}
4188		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4189					    target_id, &tstate);
4190		/* Default to async narrow across the board */
4191		memset(tinfo, 0, sizeof(*tinfo));
4192		if (ahc->flags & AHC_USEDEFAULTS) {
4193			if ((ahc->features & AHC_WIDE) != 0)
4194				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4195
4196			/*
4197			 * These will be truncated when we determine the
4198			 * connection type we have with the target.
4199			 */
4200			tinfo->user.period = ahc_syncrates->period;
4201			tinfo->user.offset = ~0;
4202		} else {
4203			u_int scsirate;
4204			uint16_t mask;
4205
4206			/* Take the settings leftover in scratch RAM. */
4207			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4208			mask = (0x01 << i);
4209			if ((ahc->features & AHC_ULTRA2) != 0) {
4210				u_int offset;
4211				u_int maxsync;
4212
4213				if ((scsirate & SOFS) == 0x0F) {
4214					/*
4215					 * Haven't negotiated yet,
4216					 * so the format is different.
4217					 */
4218					scsirate = (scsirate & SXFR) >> 4
4219						 | (ultraenb & mask)
4220						  ? 0x08 : 0x0
4221						 | (scsirate & WIDEXFER);
4222					offset = MAX_OFFSET_ULTRA2;
4223				} else
4224					offset = ahc_inb(ahc, TARG_OFFSET + i);
4225				maxsync = AHC_SYNCRATE_ULTRA2;
4226				if ((ahc->features & AHC_DT) != 0)
4227					maxsync = AHC_SYNCRATE_DT;
4228				tinfo->user.period =
4229				    ahc_find_period(ahc, scsirate, maxsync);
4230				if (offset == 0)
4231					tinfo->user.period = 0;
4232				else
4233					tinfo->user.offset = ~0;
4234				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4235				 && (ahc->features & AHC_DT) != 0)
4236					tinfo->user.ppr_options =
4237					    MSG_EXT_PPR_DT_REQ;
4238			} else if ((scsirate & SOFS) != 0) {
4239				if ((scsirate & SXFR) == 0x40
4240				 && (ultraenb & mask) != 0) {
4241					/* Treat 10MHz as a non-ultra speed */
4242					scsirate &= ~SXFR;
4243				 	ultraenb &= ~mask;
4244				}
4245				tinfo->user.period =
4246				    ahc_find_period(ahc, scsirate,
4247						    (ultraenb & mask)
4248						   ? AHC_SYNCRATE_ULTRA
4249						   : AHC_SYNCRATE_FAST);
4250				if (tinfo->user.period != 0)
4251					tinfo->user.offset = ~0;
4252			}
4253			if ((scsirate & WIDEXFER) != 0
4254			 && (ahc->features & AHC_WIDE) != 0)
4255				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4256			tinfo->user.protocol_version = 4;
4257			if ((ahc->features & AHC_DT) != 0)
4258				tinfo->user.transport_version = 3;
4259			else
4260				tinfo->user.transport_version = 2;
4261			tinfo->goal.protocol_version = 2;
4262			tinfo->goal.transport_version = 2;
4263			tinfo->current.protocol_version = 2;
4264			tinfo->current.transport_version = 2;
4265		}
4266		tstate->ultraenb = ultraenb;
4267		tstate->discenable = discenable;
4268		tstate->tagenable = 0; /* Wait until the XPT says its okay */
4269	}
4270	ahc->user_discenable = discenable;
4271	ahc->user_tagenable = tagenable;
4272
4273	/* There are no untagged SCBs active yet. */
4274	for (i = 0; i < 16; i++) {
4275		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4276		if ((ahc->flags & AHC_SCB_BTT) != 0) {
4277			int lun;
4278
4279			/*
4280			 * The SCB based BTT allows an entry per
4281			 * target and lun pair.
4282			 */
4283			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4284				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4285		}
4286	}
4287
4288	/* All of our queues are empty */
4289	for (i = 0; i < 256; i++)
4290		ahc->qoutfifo[i] = SCB_LIST_NULL;
4291
4292	for (i = 0; i < 256; i++)
4293		ahc->qinfifo[i] = SCB_LIST_NULL;
4294
4295	if ((ahc->features & AHC_MULTI_TID) != 0) {
4296		ahc_outb(ahc, TARGID, 0);
4297		ahc_outb(ahc, TARGID + 1, 0);
4298	}
4299
4300	/*
4301	 * Tell the sequencer where it can find our arrays in memory.
4302	 */
4303	physaddr = ahc->scb_data->hscb_busaddr;
4304	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4305	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4306	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4307	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4308
4309	physaddr = ahc->shared_data_busaddr;
4310	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4311	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4312	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4313	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4314
4315	/*
4316	 * Initialize the group code to command length table.
4317	 * This overrides the values in TARG_SCSIRATE, so only
4318	 * setup the table after we have processed that information.
4319	 */
4320	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4321	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4322	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4323	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4324	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4325	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4326	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4327	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4328
4329	/* Tell the sequencer of our initial queue positions */
4330	ahc_outb(ahc, KERNEL_QINPOS, 0);
4331	ahc_outb(ahc, QINPOS, 0);
4332	ahc_outb(ahc, QOUTPOS, 0);
4333
4334	/* Don't have any special messages to send to targets */
4335	ahc_outb(ahc, TARGET_MSG_REQUEST, 0);
4336	ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0);
4337
4338	/*
4339	 * Use the built in queue management registers
4340	 * if they are available.
4341	 */
4342	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4343		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4344		ahc_outb(ahc, SDSCB_QOFF, 0);
4345		ahc_outb(ahc, SNSCB_QOFF, 0);
4346		ahc_outb(ahc, HNSCB_QOFF, 0);
4347	}
4348
4349
4350	/* We don't have any waiting selections */
4351	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4352
4353	/* Our disconnection list is empty too */
4354	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4355
4356	/* Message out buffer starts empty */
4357	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4358
4359	/*
4360	 * Setup the allowed SCSI Sequences based on operational mode.
4361	 * If we are a target, we'll enalbe select in operations once
4362	 * we've had a lun enabled.
4363	 */
4364	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4365	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4366		scsiseq_template |= ENRSELI;
4367	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4368
4369	/*
4370	 * Load the Sequencer program and Enable the adapter
4371	 * in "fast" mode.
4372	 */
4373	if (bootverbose)
4374		printf("%s: Downloading Sequencer Program...",
4375		       ahc_name(ahc));
4376
4377	ahc_loadseq(ahc);
4378
4379	if ((ahc->features & AHC_ULTRA2) != 0) {
4380		int wait;
4381
4382		/*
4383		 * Wait for up to 500ms for our transceivers
4384		 * to settle.  If the adapter does not have
4385		 * a cable attached, the tranceivers may
4386		 * never settle, so don't complain if we
4387		 * fail here.
4388		 */
4389		pause_sequencer(ahc);
4390		for (wait = 5000;
4391		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4392		     wait--)
4393			ahc_delay(100);
4394		unpause_sequencer(ahc);
4395	}
4396	return (0);
4397}
4398
4399/*
4400 * Ensure that the card is paused in a location
4401 * outside of all critical sections and that all
4402 * pending work is completed prior to returning.
4403 * This routine should only be called from outside
4404 * an interrupt context.
4405 */
4406void
4407ahc_pause_and_flushwork(struct ahc_softc *ahc)
4408{
4409	int intstat;
4410	int maxloops;
4411
4412	maxloops = 1000;
4413	ahc->flags |= AHC_ALL_INTERRUPTS;
4414	intstat = 0;
4415	do {
4416		ahc_intr(ahc);
4417		pause_sequencer(ahc);
4418		ahc_clear_critical_section(ahc);
4419		if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
4420			break;
4421		maxloops--;
4422	} while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops);
4423	if (maxloops == 0) {
4424		printf("Infinite interrupt loop, INTSTAT = %x",
4425		      ahc_inb(ahc, INTSTAT));
4426	}
4427	ahc_platform_flushwork(ahc);
4428	ahc->flags &= ~AHC_ALL_INTERRUPTS;
4429}
4430
4431int
4432ahc_suspend(struct ahc_softc *ahc)
4433{
4434	uint8_t *ptr;
4435	int	 i;
4436
4437	ahc_pause_and_flushwork(ahc);
4438
4439	if (LIST_FIRST(&ahc->pending_scbs) != NULL)
4440		return (EBUSY);
4441
4442#if AHC_TARGET_MODE
4443	/*
4444	 * XXX What about ATIOs that have not yet been serviced?
4445	 * Perhaps we should just refuse to be suspended if we
4446	 * are acting in a target role.
4447	 */
4448	if (ahc->pending_device != NULL)
4449		return (EBUSY);
4450#endif
4451
4452	/* Save volatile registers */
4453	if ((ahc->features & AHC_TWIN) != 0) {
4454		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4455		ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
4456		ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4457		ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4458		ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
4459		ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
4460		ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
4461		ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
4462		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4463	}
4464	ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
4465	ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4466	ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4467	ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
4468	ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
4469	ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
4470	ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
4471
4472	if ((ahc->chip & AHC_PCI) != 0) {
4473		ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
4474		ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
4475	}
4476
4477	if ((ahc->features & AHC_DT) != 0) {
4478		u_int sfunct;
4479
4480		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4481		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4482		ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
4483		ahc_outb(ahc, SFUNCT, sfunct);
4484		ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
4485	}
4486
4487	if ((ahc->features & AHC_MULTI_FUNC) != 0)
4488		ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
4489
4490	if ((ahc->features & AHC_ULTRA2) != 0)
4491		ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
4492
4493	ptr = ahc->suspend_state.scratch_ram;
4494	for (i = 0; i < 64; i++)
4495		*ptr++ = ahc_inb(ahc, SRAM_BASE + i);
4496
4497	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4498		for (i = 0; i < 16; i++)
4499			*ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
4500	}
4501
4502	ptr = ahc->suspend_state.btt;
4503	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4504		for (i = 0;i < AHC_NUM_TARGETS; i++) {
4505			int j;
4506
4507			for (j = 0;j < AHC_NUM_LUNS; j++) {
4508				u_int tcl;
4509
4510				tcl = BUILD_TCL(i << 4, j);
4511				*ptr = ahc_index_busy_tcl(ahc, tcl);
4512			}
4513		}
4514	}
4515	ahc_shutdown(ahc);
4516	return (0);
4517}
4518
4519int
4520ahc_resume(struct ahc_softc *ahc)
4521{
4522	uint8_t *ptr;
4523	int	 i;
4524
4525	ahc_reset(ahc);
4526
4527	ahc_build_free_scb_list(ahc);
4528
4529	/* Restore volatile registers */
4530	if ((ahc->features & AHC_TWIN) != 0) {
4531		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4532		ahc_outb(ahc, SCSIID, ahc->our_id);
4533		ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
4534		ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
4535		ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
4536		ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
4537		ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
4538		ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
4539		ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
4540		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4541	}
4542	ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
4543	ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
4544	ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
4545	ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
4546	ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
4547	ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
4548	ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
4549	if ((ahc->features & AHC_ULTRA2) != 0)
4550		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4551	else
4552		ahc_outb(ahc, SCSIID, ahc->our_id);
4553
4554	if ((ahc->chip & AHC_PCI) != 0) {
4555		ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
4556		ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
4557	}
4558
4559	if ((ahc->features & AHC_DT) != 0) {
4560		u_int sfunct;
4561
4562		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4563		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4564		ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
4565		ahc_outb(ahc, SFUNCT, sfunct);
4566		ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
4567	}
4568
4569	if ((ahc->features & AHC_MULTI_FUNC) != 0)
4570		ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
4571
4572	if ((ahc->features & AHC_ULTRA2) != 0)
4573		ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
4574
4575	ptr = ahc->suspend_state.scratch_ram;
4576	for (i = 0; i < 64; i++)
4577		ahc_outb(ahc, SRAM_BASE + i, *ptr++);
4578
4579	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4580		for (i = 0; i < 16; i++)
4581			ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
4582	}
4583
4584	ptr = ahc->suspend_state.btt;
4585	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4586		for (i = 0;i < AHC_NUM_TARGETS; i++) {
4587			int j;
4588
4589			for (j = 0;j < AHC_NUM_LUNS; j++) {
4590				u_int tcl;
4591
4592				tcl = BUILD_TCL(i << 4, j);
4593				ahc_busy_tcl(ahc, tcl, *ptr);
4594			}
4595		}
4596	}
4597	return (0);
4598}
4599
4600/************************** Busy Target Table *********************************/
4601/*
4602 * Return the untagged transaction id for a given target/channel lun.
4603 * Optionally, clear the entry.
4604 */
4605u_int
4606ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
4607{
4608	u_int scbid;
4609	u_int target_offset;
4610
4611	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4612		u_int saved_scbptr;
4613
4614		saved_scbptr = ahc_inb(ahc, SCBPTR);
4615		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4616		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
4617		ahc_outb(ahc, SCBPTR, saved_scbptr);
4618	} else {
4619		target_offset = TCL_TARGET_OFFSET(tcl);
4620		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
4621	}
4622
4623	return (scbid);
4624}
4625
4626void
4627ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
4628{
4629	u_int target_offset;
4630
4631	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4632		u_int saved_scbptr;
4633
4634		saved_scbptr = ahc_inb(ahc, SCBPTR);
4635		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4636		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
4637		ahc_outb(ahc, SCBPTR, saved_scbptr);
4638	} else {
4639		target_offset = TCL_TARGET_OFFSET(tcl);
4640		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
4641	}
4642}
4643
4644void
4645ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
4646{
4647	u_int target_offset;
4648
4649	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4650		u_int saved_scbptr;
4651
4652		saved_scbptr = ahc_inb(ahc, SCBPTR);
4653		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
4654		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
4655		ahc_outb(ahc, SCBPTR, saved_scbptr);
4656	} else {
4657		target_offset = TCL_TARGET_OFFSET(tcl);
4658		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
4659	}
4660}
4661
4662/************************** SCB and SCB queue management **********************/
4663int
4664ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
4665	      char channel, int lun, u_int tag, role_t role)
4666{
4667	int targ = SCB_GET_TARGET(ahc, scb);
4668	char chan = SCB_GET_CHANNEL(ahc, scb);
4669	int slun = SCB_GET_LUN(scb);
4670	int match;
4671
4672	match = ((chan == channel) || (channel == ALL_CHANNELS));
4673	if (match != 0)
4674		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
4675	if (match != 0)
4676		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
4677	if (match != 0) {
4678#if AHC_TARGET_MODE
4679		int group;
4680
4681		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
4682		if (role == ROLE_INITIATOR) {
4683			match = (group == XPT_FC_GROUP_COMMON)
4684			      && ((tag == scb->hscb->tag)
4685			       || (tag == SCB_LIST_NULL));
4686		} else if (role == ROLE_TARGET) {
4687			match = (group == XPT_FC_GROUP_TMODE)
4688			      && ((tag == scb->io_ctx->csio.tag_id)
4689			       || (tag == SCB_LIST_NULL));
4690		}
4691#else /* !AHC_TARGET_MODE */
4692		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
4693#endif /* AHC_TARGET_MODE */
4694	}
4695
4696	return match;
4697}
4698
4699void
4700ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
4701{
4702	int	target;
4703	char	channel;
4704	int	lun;
4705
4706	target = SCB_GET_TARGET(ahc, scb);
4707	lun = SCB_GET_LUN(scb);
4708	channel = SCB_GET_CHANNEL(ahc, scb);
4709
4710	ahc_search_qinfifo(ahc, target, channel, lun,
4711			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
4712			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
4713
4714	ahc_platform_freeze_devq(ahc, scb);
4715}
4716
4717void
4718ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
4719{
4720	struct scb *prev_scb;
4721
4722	prev_scb = NULL;
4723	if (ahc_qinfifo_count(ahc) != 0) {
4724		u_int prev_tag;
4725		uint8_t prev_pos;
4726
4727		prev_pos = ahc->qinfifonext - 1;
4728		prev_tag = ahc->qinfifo[prev_pos];
4729		prev_scb = ahc_lookup_scb(ahc, prev_tag);
4730	}
4731	ahc_qinfifo_requeue(ahc, prev_scb, scb);
4732	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4733		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4734	} else {
4735		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4736	}
4737}
4738
4739static void
4740ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
4741		    struct scb *scb)
4742{
4743	if (prev_scb == NULL)
4744		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4745	else
4746		prev_scb->hscb->next = scb->hscb->tag;
4747	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
4748	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4749}
4750
4751static int
4752ahc_qinfifo_count(struct ahc_softc *ahc)
4753{
4754	u_int8_t qinpos;
4755	u_int8_t diff;
4756
4757	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4758		qinpos = ahc_inb(ahc, SNSCB_QOFF);
4759		ahc_outb(ahc, SNSCB_QOFF, qinpos);
4760	} else
4761		qinpos = ahc_inb(ahc, QINPOS);
4762	diff = ahc->qinfifonext - qinpos;
4763	return (diff);
4764}
4765
4766int
4767ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
4768		   int lun, u_int tag, role_t role, uint32_t status,
4769		   ahc_search_action action)
4770{
4771	struct	scb *scb;
4772	struct	scb *prev_scb;
4773	uint8_t qinstart;
4774	uint8_t qinpos;
4775	uint8_t qintail;
4776	uint8_t next, prev;
4777	uint8_t curscbptr;
4778	int	found;
4779	int	maxtarget;
4780	int	i;
4781	int	have_qregs;
4782
4783	qintail = ahc->qinfifonext;
4784	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
4785	if (have_qregs) {
4786		qinstart = ahc_inb(ahc, SNSCB_QOFF);
4787		ahc_outb(ahc, SNSCB_QOFF, qinstart);
4788	} else
4789		qinstart = ahc_inb(ahc, QINPOS);
4790	qinpos = qinstart;
4791	next = ahc_inb(ahc, NEXT_QUEUED_SCB);
4792	found = 0;
4793	prev_scb = NULL;
4794
4795	if (action == SEARCH_COMPLETE) {
4796		/*
4797		 * Don't attempt to run any queued untagged transactions
4798		 * until we are done with the abort process.
4799		 */
4800		ahc_freeze_untagged_queues(ahc);
4801	}
4802
4803	/*
4804	 * Start with an empty queue.  Entries that are not chosen
4805	 * for removal will be re-added to the queue as we go.
4806	 */
4807	ahc->qinfifonext = qinpos;
4808	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4809
4810	while (qinpos != qintail) {
4811		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
4812		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
4813			/*
4814			 * We found an scb that needs to be acted on.
4815			 */
4816			found++;
4817			switch (action) {
4818			case SEARCH_COMPLETE:
4819			{
4820				cam_status ostat;
4821
4822				ostat = ahc_get_transaction_status(scb);
4823				if (ostat == CAM_REQ_INPROG)
4824					ahc_set_transaction_status(scb,
4825								   status);
4826				ahc_freeze_scb(scb);
4827				if ((scb->flags & SCB_ACTIVE) == 0)
4828					printf("Inactive SCB in qinfifo\n");
4829				ahc_done(ahc, scb);
4830
4831				/* FALLTHROUGH */
4832			case SEARCH_REMOVE:
4833				break;
4834			}
4835			case SEARCH_COUNT:
4836				ahc_qinfifo_requeue(ahc, prev_scb, scb);
4837				prev_scb = scb;
4838				break;
4839			}
4840		} else {
4841			ahc_qinfifo_requeue(ahc, prev_scb, scb);
4842			prev_scb = scb;
4843		}
4844		qinpos++;
4845	}
4846
4847	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4848		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4849	} else {
4850		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4851	}
4852
4853	if (action != SEARCH_COUNT
4854	 && (found != 0)
4855	 && (qinstart != ahc->qinfifonext)) {
4856		/*
4857		 * The sequencer may be in the process of dmaing
4858		 * down the SCB at the beginning of the queue.
4859		 * This could be problematic if either the first,
4860		 * or the second SCB is removed from the queue
4861		 * (the first SCB includes a pointer to the "next"
4862		 * SCB to dma). If we have removed any entries, swap
4863		 * the first element in the queue with the next HSCB
4864		 * so the sequencer will notice that NEXT_QUEUED_SCB
4865		 * has changed during its dma attempt and will retry
4866		 * the DMA.
4867		 */
4868		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
4869
4870		/*
4871		 * ahc_swap_with_next_hscb forces our next pointer to
4872		 * point to the reserved SCB for future commands.  Save
4873		 * and restore our original next pointer to maintain
4874		 * queue integrity.
4875		 */
4876		next = scb->hscb->next;
4877		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
4878		ahc_swap_with_next_hscb(ahc, scb);
4879		scb->hscb->next = next;
4880		ahc->qinfifo[qinstart] = scb->hscb->tag;
4881
4882		/* Tell the card about the new head of the qinfifo. */
4883		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
4884
4885		/* Fixup the tail "next" pointer. */
4886		qintail = ahc->qinfifonext - 1;
4887		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
4888		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
4889	}
4890
4891	/*
4892	 * Search waiting for selection list.
4893	 */
4894	curscbptr = ahc_inb(ahc, SCBPTR);
4895	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
4896	prev = SCB_LIST_NULL;
4897
4898	while (next != SCB_LIST_NULL) {
4899		uint8_t scb_index;
4900
4901		ahc_outb(ahc, SCBPTR, next);
4902		scb_index = ahc_inb(ahc, SCB_TAG);
4903		if (scb_index >= ahc->scb_data->numscbs) {
4904			printf("Waiting List inconsistency. "
4905			       "SCB index == %d, yet numscbs == %d.",
4906			       scb_index, ahc->scb_data->numscbs);
4907			ahc_dump_card_state(ahc);
4908			panic("for safety");
4909		}
4910		scb = ahc_lookup_scb(ahc, scb_index);
4911		if (ahc_match_scb(ahc, scb, target, channel,
4912				  lun, SCB_LIST_NULL, role)) {
4913			/*
4914			 * We found an scb that needs to be acted on.
4915			 */
4916			found++;
4917			switch (action) {
4918			case SEARCH_COMPLETE:
4919			{
4920				cam_status ostat;
4921
4922				ostat = ahc_get_transaction_status(scb);
4923				if (ostat == CAM_REQ_INPROG)
4924					ahc_set_transaction_status(scb,
4925								   status);
4926				ahc_freeze_scb(scb);
4927				if ((scb->flags & SCB_ACTIVE) == 0)
4928					printf("Inactive SCB in Waiting List\n");
4929				ahc_done(ahc, scb);
4930				/* FALLTHROUGH */
4931			}
4932			case SEARCH_REMOVE:
4933				next = ahc_rem_wscb(ahc, next, prev);
4934				break;
4935			case SEARCH_COUNT:
4936				prev = next;
4937				next = ahc_inb(ahc, SCB_NEXT);
4938				break;
4939			}
4940		} else {
4941
4942			prev = next;
4943			next = ahc_inb(ahc, SCB_NEXT);
4944		}
4945	}
4946	ahc_outb(ahc, SCBPTR, curscbptr);
4947
4948	/*
4949	 * And lastly, the untagged holding queues.
4950	 */
4951	i = 0;
4952	if ((ahc->flags & AHC_SCB_BTT) == 0) {
4953
4954		maxtarget = 16;
4955		if (target != CAM_TARGET_WILDCARD) {
4956
4957			i = target;
4958			if (channel == 'B')
4959				i += 8;
4960			maxtarget = i + 1;
4961		}
4962	} else {
4963		maxtarget = 0;
4964	}
4965
4966	for (; i < maxtarget; i++) {
4967		struct scb_tailq *untagged_q;
4968		struct scb *next_scb;
4969
4970		untagged_q = &(ahc->untagged_queues[i]);
4971		next_scb = TAILQ_FIRST(untagged_q);
4972		while (next_scb != NULL) {
4973
4974			scb = next_scb;
4975			next_scb = TAILQ_NEXT(scb, links.tqe);
4976
4977			/*
4978			 * The head of the list may be the currently
4979			 * active untagged command for a device.
4980			 * We're only searching for commands that
4981			 * have not been started.  A transaction
4982			 * marked active but still in the qinfifo
4983			 * is removed by the qinfifo scanning code
4984			 * above.
4985			 */
4986			if ((scb->flags & SCB_ACTIVE) != 0)
4987				continue;
4988
4989			if (ahc_match_scb(ahc, scb, target, channel,
4990					  lun, SCB_LIST_NULL, role)) {
4991				/*
4992				 * We found an scb that needs to be acted on.
4993				 */
4994				found++;
4995				switch (action) {
4996				case SEARCH_COMPLETE:
4997				{
4998					cam_status ostat;
4999
5000					ostat = ahc_get_transaction_status(scb);
5001					if (ostat == CAM_REQ_INPROG)
5002						ahc_set_transaction_status(scb,
5003								   status);
5004					ahc_freeze_scb(scb);
5005					if ((scb->flags & SCB_ACTIVE) == 0)
5006						printf("Inactive SCB in untaggedQ\n");
5007					ahc_done(ahc, scb);
5008					break;
5009				}
5010				case SEARCH_REMOVE:
5011					TAILQ_REMOVE(untagged_q, scb,
5012						     links.tqe);
5013					break;
5014				case SEARCH_COUNT:
5015					break;
5016				}
5017			}
5018		}
5019	}
5020
5021	if (action == SEARCH_COMPLETE)
5022		ahc_release_untagged_queues(ahc);
5023	return (found);
5024}
5025
5026int
5027ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5028		     int lun, u_int tag, int stop_on_first, int remove,
5029		     int save_state)
5030{
5031	struct	scb *scbp;
5032	u_int	next;
5033	u_int	prev;
5034	u_int	count;
5035	u_int	active_scb;
5036
5037	count = 0;
5038	next = ahc_inb(ahc, DISCONNECTED_SCBH);
5039	prev = SCB_LIST_NULL;
5040
5041	if (save_state) {
5042		/* restore this when we're done */
5043		active_scb = ahc_inb(ahc, SCBPTR);
5044	} else
5045		/* Silence compiler */
5046		active_scb = SCB_LIST_NULL;
5047
5048	while (next != SCB_LIST_NULL) {
5049		u_int scb_index;
5050
5051		ahc_outb(ahc, SCBPTR, next);
5052		scb_index = ahc_inb(ahc, SCB_TAG);
5053		if (scb_index >= ahc->scb_data->numscbs) {
5054			printf("Disconnected List inconsistency. "
5055			       "SCB index == %d, yet numscbs == %d.",
5056			       scb_index, ahc->scb_data->numscbs);
5057			ahc_dump_card_state(ahc);
5058			panic("for safety");
5059		}
5060
5061		if (next == prev) {
5062			panic("Disconnected List Loop. "
5063			      "cur SCBPTR == %x, prev SCBPTR == %x.",
5064			      next, prev);
5065		}
5066		scbp = ahc_lookup_scb(ahc, scb_index);
5067		if (ahc_match_scb(ahc, scbp, target, channel, lun,
5068				  tag, ROLE_INITIATOR)) {
5069			count++;
5070			if (remove) {
5071				next =
5072				    ahc_rem_scb_from_disc_list(ahc, prev, next);
5073			} else {
5074				prev = next;
5075				next = ahc_inb(ahc, SCB_NEXT);
5076			}
5077			if (stop_on_first)
5078				break;
5079		} else {
5080			prev = next;
5081			next = ahc_inb(ahc, SCB_NEXT);
5082		}
5083	}
5084	if (save_state)
5085		ahc_outb(ahc, SCBPTR, active_scb);
5086	return (count);
5087}
5088
5089/*
5090 * Remove an SCB from the on chip list of disconnected transactions.
5091 * This is empty/unused if we are not performing SCB paging.
5092 */
5093static u_int
5094ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5095{
5096	u_int next;
5097
5098	ahc_outb(ahc, SCBPTR, scbptr);
5099	next = ahc_inb(ahc, SCB_NEXT);
5100
5101	ahc_outb(ahc, SCB_CONTROL, 0);
5102
5103	ahc_add_curscb_to_free_list(ahc);
5104
5105	if (prev != SCB_LIST_NULL) {
5106		ahc_outb(ahc, SCBPTR, prev);
5107		ahc_outb(ahc, SCB_NEXT, next);
5108	} else
5109		ahc_outb(ahc, DISCONNECTED_SCBH, next);
5110
5111	return (next);
5112}
5113
5114/*
5115 * Add the SCB as selected by SCBPTR onto the on chip list of
5116 * free hardware SCBs.  This list is empty/unused if we are not
5117 * performing SCB paging.
5118 */
5119static void
5120ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5121{
5122	/*
5123	 * Invalidate the tag so that our abort
5124	 * routines don't think it's active.
5125	 */
5126	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5127
5128	if ((ahc->flags & AHC_PAGESCBS) != 0) {
5129		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5130		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5131	}
5132}
5133
5134/*
5135 * Manipulate the waiting for selection list and return the
5136 * scb that follows the one that we remove.
5137 */
5138static u_int
5139ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5140{
5141	u_int curscb, next;
5142
5143	/*
5144	 * Select the SCB we want to abort and
5145	 * pull the next pointer out of it.
5146	 */
5147	curscb = ahc_inb(ahc, SCBPTR);
5148	ahc_outb(ahc, SCBPTR, scbpos);
5149	next = ahc_inb(ahc, SCB_NEXT);
5150
5151	/* Clear the necessary fields */
5152	ahc_outb(ahc, SCB_CONTROL, 0);
5153
5154	ahc_add_curscb_to_free_list(ahc);
5155
5156	/* update the waiting list */
5157	if (prev == SCB_LIST_NULL) {
5158		/* First in the list */
5159		ahc_outb(ahc, WAITING_SCBH, next);
5160
5161		/*
5162		 * Ensure we aren't attempting to perform
5163		 * selection for this entry.
5164		 */
5165		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5166	} else {
5167		/*
5168		 * Select the scb that pointed to us
5169		 * and update its next pointer.
5170		 */
5171		ahc_outb(ahc, SCBPTR, prev);
5172		ahc_outb(ahc, SCB_NEXT, next);
5173	}
5174
5175	/*
5176	 * Point us back at the original scb position.
5177	 */
5178	ahc_outb(ahc, SCBPTR, curscb);
5179	return next;
5180}
5181
5182/******************************** Error Handling ******************************/
5183/*
5184 * Abort all SCBs that match the given description (target/channel/lun/tag),
5185 * setting their status to the passed in status if the status has not already
5186 * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
5187 * is paused before it is called.
5188 */
5189int
5190ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5191	       int lun, u_int tag, role_t role, uint32_t status)
5192{
5193	struct	scb *scbp;
5194	struct	scb *scbp_next;
5195	u_int	active_scb;
5196	int	i, j;
5197	int	maxtarget;
5198	int	minlun;
5199	int	maxlun;
5200
5201	int	found;
5202
5203	/*
5204	 * Don't attempt to run any queued untagged transactions
5205	 * until we are done with the abort process.
5206	 */
5207	ahc_freeze_untagged_queues(ahc);
5208
5209	/* restore this when we're done */
5210	active_scb = ahc_inb(ahc, SCBPTR);
5211
5212	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5213				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5214
5215	/*
5216	 * Clean out the busy target table for any untagged commands.
5217	 */
5218	i = 0;
5219	maxtarget = 16;
5220	if (target != CAM_TARGET_WILDCARD) {
5221		i = target;
5222		if (channel == 'B')
5223			i += 8;
5224		maxtarget = i + 1;
5225	}
5226
5227	if (lun == CAM_LUN_WILDCARD) {
5228
5229		/*
5230		 * Unless we are using an SCB based
5231		 * busy targets table, there is only
5232		 * one table entry for all luns of
5233		 * a target.
5234		 */
5235		minlun = 0;
5236		maxlun = 1;
5237		if ((ahc->flags & AHC_SCB_BTT) != 0)
5238			maxlun = AHC_NUM_LUNS;
5239	} else {
5240		minlun = lun;
5241		maxlun = lun + 1;
5242	}
5243
5244	for (;i < maxtarget; i++) {
5245		for (j = minlun;j < maxlun; j++)
5246			ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5247	}
5248
5249	/*
5250	 * Go through the disconnected list and remove any entries we
5251	 * have queued for completion, 0'ing their control byte too.
5252	 * We save the active SCB and restore it ourselves, so there
5253	 * is no reason for this search to restore it too.
5254	 */
5255	ahc_search_disc_list(ahc, target, channel, lun, tag,
5256			     /*stop_on_first*/FALSE, /*remove*/TRUE,
5257			     /*save_state*/FALSE);
5258
5259	/*
5260	 * Go through the hardware SCB array looking for commands that
5261	 * were active but not on any list.
5262	 */
5263	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5264		u_int scbid;
5265
5266		ahc_outb(ahc, SCBPTR, i);
5267		scbid = ahc_inb(ahc, SCB_TAG);
5268		scbp = ahc_lookup_scb(ahc, scbid);
5269		if (scbp != NULL
5270		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
5271			ahc_add_curscb_to_free_list(ahc);
5272	}
5273
5274	/*
5275	 * Go through the pending CCB list and look for
5276	 * commands for this target that are still active.
5277	 * These are other tagged commands that were
5278	 * disconnected when the reset occured.
5279	 */
5280	scbp_next = LIST_FIRST(&ahc->pending_scbs);
5281	while (scbp_next != NULL) {
5282		scbp = scbp_next;
5283		scbp_next = LIST_NEXT(scbp, pending_links);
5284		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5285			cam_status ostat;
5286
5287			ostat = ahc_get_transaction_status(scbp);
5288			if (ostat == CAM_REQ_INPROG)
5289				ahc_set_transaction_status(scbp, status);
5290			ahc_freeze_scb(scbp);
5291			if ((scbp->flags & SCB_ACTIVE) == 0)
5292				printf("Inactive SCB on pending list\n");
5293			ahc_done(ahc, scbp);
5294			found++;
5295		}
5296	}
5297	ahc_outb(ahc, SCBPTR, active_scb);
5298	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5299	ahc_release_untagged_queues(ahc);
5300	return found;
5301}
5302
5303static void
5304ahc_reset_current_bus(struct ahc_softc *ahc)
5305{
5306	uint8_t scsiseq;
5307
5308	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5309	scsiseq = ahc_inb(ahc, SCSISEQ);
5310	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5311	ahc_delay(AHC_BUSRESET_DELAY);
5312	/* Turn off the bus reset */
5313	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5314
5315	ahc_clear_intstat(ahc);
5316
5317	/* Re-enable reset interrupts */
5318	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5319}
5320
5321int
5322ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5323{
5324	struct	ahc_devinfo devinfo;
5325	u_int	initiator, target, max_scsiid;
5326	u_int	sblkctl;
5327	int	found;
5328	int	restart_needed;
5329	char	cur_channel;
5330
5331	ahc->pending_device = NULL;
5332
5333	ahc_compile_devinfo(&devinfo,
5334			    CAM_TARGET_WILDCARD,
5335			    CAM_TARGET_WILDCARD,
5336			    CAM_LUN_WILDCARD,
5337			    channel, ROLE_UNKNOWN);
5338	pause_sequencer(ahc);
5339
5340	/* Make sure the sequencer is in a safe location. */
5341	ahc_clear_critical_section(ahc);
5342
5343	/*
5344	 * Run our command complete fifos to ensure that we perform
5345	 * completion processing on any commands that 'completed'
5346	 * before the reset occurred.
5347	 */
5348	ahc_run_qoutfifo(ahc);
5349#if AHC_TARGET_MODE
5350	if ((ahc->flags & AHC_TARGETROLE) != 0) {
5351		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5352	}
5353#endif
5354
5355	/*
5356	 * Reset the bus if we are initiating this reset
5357	 */
5358	sblkctl = ahc_inb(ahc, SBLKCTL);
5359	cur_channel = 'A';
5360	if ((ahc->features & AHC_TWIN) != 0
5361	 && ((sblkctl & SELBUSB) != 0))
5362	    cur_channel = 'B';
5363	if (cur_channel != channel) {
5364		/* Case 1: Command for another bus is active
5365		 * Stealthily reset the other bus without
5366		 * upsetting the current bus.
5367		 */
5368		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5369		ahc_outb(ahc, SIMODE1,
5370			 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5371		ahc_outb(ahc, SCSISEQ,
5372			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5373		if (initiate_reset)
5374			ahc_reset_current_bus(ahc);
5375		ahc_clear_intstat(ahc);
5376		ahc_outb(ahc, SBLKCTL, sblkctl);
5377		restart_needed = FALSE;
5378	} else {
5379		/* Case 2: A command from this bus is active or we're idle */
5380		ahc_clear_msg_state(ahc);
5381		ahc_outb(ahc, SIMODE1,
5382			 ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
5383		ahc_outb(ahc, SCSISEQ,
5384			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
5385		if (initiate_reset)
5386			ahc_reset_current_bus(ahc);
5387		ahc_clear_intstat(ahc);
5388		restart_needed = TRUE;
5389	}
5390
5391	/*
5392	 * Clean up all the state information for the
5393	 * pending transactions on this bus.
5394	 */
5395	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5396			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
5397			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5398
5399	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5400
5401#ifdef AHC_TARGET_MODE
5402	/*
5403	 * Send an immediate notify ccb to all target more peripheral
5404	 * drivers affected by this action.
5405	 */
5406	for (target = 0; target <= max_scsiid; target++) {
5407		struct tmode_tstate* tstate;
5408		u_int lun;
5409
5410		tstate = ahc->enabled_targets[target];
5411		if (tstate == NULL)
5412			continue;
5413		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5414			struct tmode_lstate* lstate;
5415
5416			lstate = tstate->enabled_luns[lun];
5417			if (lstate == NULL)
5418				continue;
5419
5420			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5421					       EVENT_TYPE_BUS_RESET, /*arg*/0);
5422			ahc_send_lstate_events(ahc, lstate);
5423		}
5424	}
5425#endif
5426	/* Notify the XPT that a bus reset occurred */
5427	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5428		       CAM_LUN_WILDCARD, AC_BUS_RESET);
5429
5430	/*
5431	 * Revert to async/narrow transfers until we renegotiate.
5432	 */
5433	for (target = 0; target <= max_scsiid; target++) {
5434
5435		if (ahc->enabled_targets[target] == NULL)
5436			continue;
5437		for (initiator = 0; initiator <= max_scsiid; initiator++) {
5438			struct ahc_devinfo devinfo;
5439
5440			ahc_compile_devinfo(&devinfo, target, initiator,
5441					    CAM_LUN_WILDCARD,
5442					    channel, ROLE_UNKNOWN);
5443			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5444				      AHC_TRANS_CUR, /*paused*/TRUE);
5445			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5446					 /*period*/0, /*offset*/0,
5447					 /*ppr_options*/0, AHC_TRANS_CUR,
5448					 /*paused*/TRUE);
5449		}
5450	}
5451
5452	if (restart_needed)
5453		restart_sequencer(ahc);
5454	else
5455		unpause_sequencer(ahc);
5456	return found;
5457}
5458
5459
5460/***************************** Residual Processing ****************************/
5461/*
5462 * Calculate the residual for a just completed SCB.
5463 */
5464static void
5465ahc_calc_residual(struct scb *scb)
5466{
5467	struct hardware_scb *hscb;
5468	struct status_pkt *spkt;
5469	uint32_t sgptr;
5470	uint32_t resid_sgptr;
5471	uint32_t resid;
5472
5473	/*
5474	 * 5 cases.
5475	 * 1) No residual.
5476	 *    SG_RESID_VALID clear in sgptr.
5477	 * 2) Transferless command
5478	 * 3) Never performed any transfers.
5479	 *    sgptr has SG_FULL_RESID set.
5480	 * 4) No residual but target did not
5481	 *    save data pointers after the
5482	 *    last transfer, so sgptr was
5483	 *    never updated.
5484	 * 5) We have a partial residual.
5485	 *    Use residual_sgptr to determine
5486	 *    where we are.
5487	 */
5488
5489	hscb = scb->hscb;
5490	sgptr = ahc_le32toh(hscb->sgptr);
5491	if ((sgptr & SG_RESID_VALID) == 0)
5492		/* Case 1 */
5493		return;
5494	sgptr &= ~SG_RESID_VALID;
5495
5496	if ((sgptr & SG_LIST_NULL) != 0)
5497		/* Case 2 */
5498		return;
5499
5500	spkt = &hscb->shared_data.status;
5501	resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
5502	if ((sgptr & SG_FULL_RESID) != 0) {
5503		/* Case 3 */
5504		resid = ahc_get_transfer_length(scb);
5505	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
5506		/* Case 4 */
5507		return;
5508	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
5509		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
5510	} else {
5511		struct ahc_dma_seg *sg;
5512
5513		/*
5514		 * Remainder of the SG where the transfer
5515		 * stopped.
5516		 */
5517		resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
5518		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
5519
5520		/* The residual sg_ptr always points to the next sg */
5521		sg--;
5522
5523		/*
5524		 * Add up the contents of all residual
5525		 * SG segments that are after the SG where
5526		 * the transfer stopped.
5527		 */
5528		while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
5529			sg++;
5530			resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
5531		}
5532	}
5533	if ((scb->flags & SCB_SENSE) == 0)
5534		ahc_set_residual(scb, resid);
5535	else
5536		ahc_set_sense_residual(scb, resid);
5537
5538#ifdef AHC_DEBUG
5539	if (ahc_debug & AHC_SHOWMISC) {
5540		ahc_print_path(ahc, scb);
5541		printf("Handled Residual of %d bytes\n", resid);
5542	}
5543#endif
5544}
5545
5546/******************************* Target Mode **********************************/
5547#ifdef AHC_TARGET_MODE
5548/*
5549 * Add a target mode event to this lun's queue
5550 */
5551static void
5552ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate,
5553		       u_int initiator_id, u_int event_type, u_int event_arg)
5554{
5555	struct ahc_tmode_event *event;
5556	int pending;
5557
5558	xpt_freeze_devq(lstate->path, /*count*/1);
5559	if (lstate->event_w_idx >= lstate->event_r_idx)
5560		pending = lstate->event_w_idx - lstate->event_r_idx;
5561	else
5562		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
5563			- (lstate->event_r_idx - lstate->event_w_idx);
5564
5565	if (event_type == EVENT_TYPE_BUS_RESET
5566	 || event_type == MSG_BUS_DEV_RESET) {
5567		/*
5568		 * Any earlier events are irrelevant, so reset our buffer.
5569		 * This has the effect of allowing us to deal with reset
5570		 * floods (an external device holding down the reset line)
5571		 * without losing the event that is really interesting.
5572		 */
5573		lstate->event_r_idx = 0;
5574		lstate->event_w_idx = 0;
5575		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
5576	}
5577
5578	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
5579		xpt_print_path(lstate->path);
5580		printf("immediate event %x:%x lost\n",
5581		       lstate->event_buffer[lstate->event_r_idx].event_type,
5582		       lstate->event_buffer[lstate->event_r_idx].event_arg);
5583		lstate->event_r_idx++;
5584		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5585			lstate->event_r_idx = 0;
5586		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
5587	}
5588
5589	event = &lstate->event_buffer[lstate->event_w_idx];
5590	event->initiator_id = initiator_id;
5591	event->event_type = event_type;
5592	event->event_arg = event_arg;
5593	lstate->event_w_idx++;
5594	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5595		lstate->event_w_idx = 0;
5596}
5597
5598/*
5599 * Send any target mode events queued up waiting
5600 * for immediate notify resources.
5601 */
5602void
5603ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate)
5604{
5605	struct ccb_hdr *ccbh;
5606	struct ccb_immed_notify *inot;
5607
5608	while (lstate->event_r_idx != lstate->event_w_idx
5609	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
5610		struct ahc_tmode_event *event;
5611
5612		event = &lstate->event_buffer[lstate->event_r_idx];
5613		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
5614		inot = (struct ccb_immed_notify *)ccbh;
5615		switch (event->event_type) {
5616		case EVENT_TYPE_BUS_RESET:
5617			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
5618			break;
5619		default:
5620			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5621			inot->message_args[0] = event->event_type;
5622			inot->message_args[1] = event->event_arg;
5623			break;
5624		}
5625		inot->initiator_id = event->initiator_id;
5626		inot->sense_len = 0;
5627		xpt_done((union ccb *)inot);
5628		lstate->event_r_idx++;
5629		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
5630			lstate->event_r_idx = 0;
5631	}
5632}
5633#endif
5634
5635/******************** Sequencer Program Patching/Download *********************/
5636
5637#ifdef AHC_DUMP_SEQ
5638void
5639ahc_dumpseq(struct ahc_softc* ahc)
5640{
5641	int i;
5642	int max_prog;
5643
5644	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5645		max_prog = 448;
5646	else if ((ahc->features & AHC_ULTRA2) != 0)
5647		max_prog = 768;
5648	else
5649		max_prog = 512;
5650
5651	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5652	ahc_outb(ahc, SEQADDR0, 0);
5653	ahc_outb(ahc, SEQADDR1, 0);
5654	for (i = 0; i < max_prog; i++) {
5655		uint8_t ins_bytes[4];
5656
5657		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5658		printf("0x%08x\n", ins_bytes[0] << 24
5659				 | ins_bytes[1] << 16
5660				 | ins_bytes[2] << 8
5661				 | ins_bytes[3]);
5662	}
5663}
5664#endif
5665
5666static void
5667ahc_loadseq(struct ahc_softc *ahc)
5668{
5669	struct	cs cs_table[num_critical_sections];
5670	u_int	begin_set[num_critical_sections];
5671	u_int	end_set[num_critical_sections];
5672	struct	patch *cur_patch;
5673	u_int	cs_count;
5674	u_int	cur_cs;
5675	u_int	i;
5676	int	downloaded;
5677	u_int	skip_addr;
5678	u_int	sg_prefetch_cnt;
5679	uint8_t	download_consts[7];
5680
5681	/*
5682	 * Start out with 0 critical sections
5683	 * that apply to this firmware load.
5684	 */
5685	cs_count = 0;
5686	cur_cs = 0;
5687	memset(begin_set, 0, sizeof(begin_set));
5688	memset(end_set, 0, sizeof(end_set));
5689
5690	/* Setup downloadable constant table */
5691	download_consts[QOUTFIFO_OFFSET] = 0;
5692	if (ahc->targetcmds != NULL)
5693		download_consts[QOUTFIFO_OFFSET] += 32;
5694	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5695	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
5696	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
5697	sg_prefetch_cnt = ahc->pci_cachesize;
5698	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
5699		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
5700	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
5701	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
5702	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
5703
5704	cur_patch = patches;
5705	downloaded = 0;
5706	skip_addr = 0;
5707	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5708	ahc_outb(ahc, SEQADDR0, 0);
5709	ahc_outb(ahc, SEQADDR1, 0);
5710
5711	for (i = 0; i < sizeof(seqprog)/4; i++) {
5712		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5713			/*
5714			 * Don't download this instruction as it
5715			 * is in a patch that was removed.
5716			 */
5717			continue;
5718		}
5719		/*
5720		 * Move through the CS table until we find a CS
5721		 * that might apply to this instruction.
5722		 */
5723		for (; cur_cs < num_critical_sections; cur_cs++) {
5724			if (critical_sections[cur_cs].end <= i) {
5725				if (begin_set[cs_count] == TRUE
5726				 && end_set[cs_count] == FALSE) {
5727					cs_table[cs_count].end = downloaded;
5728				 	end_set[cs_count] = TRUE;
5729					cs_count++;
5730				}
5731				continue;
5732			}
5733			if (critical_sections[cur_cs].begin <= i
5734			 && begin_set[cs_count] == FALSE) {
5735				cs_table[cs_count].begin = downloaded;
5736				begin_set[cs_count] = TRUE;
5737			}
5738			break;
5739		}
5740		ahc_download_instr(ahc, i, download_consts);
5741		downloaded++;
5742	}
5743
5744	ahc->num_critical_sections = cs_count;
5745	if (cs_count != 0) {
5746
5747		cs_count *= sizeof(struct cs);
5748		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
5749		if (ahc->critical_sections == NULL)
5750			panic("ahc_loadseq: Could not malloc");
5751		memcpy(ahc->critical_sections, cs_table, cs_count);
5752	}
5753	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5754	restart_sequencer(ahc);
5755
5756	if (bootverbose)
5757		printf(" %d instructions downloaded\n", downloaded);
5758}
5759
5760static int
5761ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5762		u_int start_instr, u_int *skip_addr)
5763{
5764	struct	patch *cur_patch;
5765	struct	patch *last_patch;
5766	u_int	num_patches;
5767
5768	num_patches = sizeof(patches)/sizeof(struct patch);
5769	last_patch = &patches[num_patches];
5770	cur_patch = *start_patch;
5771
5772	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5773
5774		if (cur_patch->patch_func(ahc) == 0) {
5775
5776			/* Start rejecting code */
5777			*skip_addr = start_instr + cur_patch->skip_instr;
5778			cur_patch += cur_patch->skip_patch;
5779		} else {
5780			/* Accepted this patch.  Advance to the next
5781			 * one and wait for our intruction pointer to
5782			 * hit this point.
5783			 */
5784			cur_patch++;
5785		}
5786	}
5787
5788	*start_patch = cur_patch;
5789	if (start_instr < *skip_addr)
5790		/* Still skipping */
5791		return (0);
5792
5793	return (1);
5794}
5795
5796static void
5797ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5798{
5799	union	ins_formats instr;
5800	struct	ins_format1 *fmt1_ins;
5801	struct	ins_format3 *fmt3_ins;
5802	u_int	opcode;
5803
5804	/*
5805	 * The firmware is always compiled into a little endian format.
5806	 */
5807	instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
5808
5809	fmt1_ins = &instr.format1;
5810	fmt3_ins = NULL;
5811
5812	/* Pull the opcode */
5813	opcode = instr.format1.opcode;
5814	switch (opcode) {
5815	case AIC_OP_JMP:
5816	case AIC_OP_JC:
5817	case AIC_OP_JNC:
5818	case AIC_OP_CALL:
5819	case AIC_OP_JNE:
5820	case AIC_OP_JNZ:
5821	case AIC_OP_JE:
5822	case AIC_OP_JZ:
5823	{
5824		struct patch *cur_patch;
5825		int address_offset;
5826		u_int address;
5827		u_int skip_addr;
5828		u_int i;
5829
5830		fmt3_ins = &instr.format3;
5831		address_offset = 0;
5832		address = fmt3_ins->address;
5833		cur_patch = patches;
5834		skip_addr = 0;
5835
5836		for (i = 0; i < address;) {
5837
5838			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5839
5840			if (skip_addr > i) {
5841				int end_addr;
5842
5843				end_addr = MIN(address, skip_addr);
5844				address_offset += end_addr - i;
5845				i = skip_addr;
5846			} else {
5847				i++;
5848			}
5849		}
5850		address -= address_offset;
5851		fmt3_ins->address = address;
5852		/* FALLTHROUGH */
5853	}
5854	case AIC_OP_OR:
5855	case AIC_OP_AND:
5856	case AIC_OP_XOR:
5857	case AIC_OP_ADD:
5858	case AIC_OP_ADC:
5859	case AIC_OP_BMOV:
5860		if (fmt1_ins->parity != 0) {
5861			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
5862		}
5863		fmt1_ins->parity = 0;
5864		if ((ahc->features & AHC_CMD_CHAN) == 0
5865		 && opcode == AIC_OP_BMOV) {
5866			/*
5867			 * Block move was added at the same time
5868			 * as the command channel.  Verify that
5869			 * this is only a move of a single element
5870			 * and convert the BMOV to a MOV
5871			 * (AND with an immediate of FF).
5872			 */
5873			if (fmt1_ins->immediate != 1)
5874				panic("%s: BMOV not supported\n",
5875				      ahc_name(ahc));
5876			fmt1_ins->opcode = AIC_OP_AND;
5877			fmt1_ins->immediate = 0xff;
5878		}
5879		/* FALLTHROUGH */
5880	case AIC_OP_ROL:
5881		if ((ahc->features & AHC_ULTRA2) != 0) {
5882			int i, count;
5883
5884			/* Calculate odd parity for the instruction */
5885			for (i = 0, count = 0; i < 31; i++) {
5886				uint32_t mask;
5887
5888				mask = 0x01 << i;
5889				if ((instr.integer & mask) != 0)
5890					count++;
5891			}
5892			if ((count & 0x01) == 0)
5893				instr.format1.parity = 1;
5894		} else {
5895			/* Compress the instruction for older sequencers */
5896			if (fmt3_ins != NULL) {
5897				instr.integer =
5898					fmt3_ins->immediate
5899				      | (fmt3_ins->source << 8)
5900				      | (fmt3_ins->address << 16)
5901				      |	(fmt3_ins->opcode << 25);
5902			} else {
5903				instr.integer =
5904					fmt1_ins->immediate
5905				      | (fmt1_ins->source << 8)
5906				      | (fmt1_ins->destination << 16)
5907				      |	(fmt1_ins->ret << 24)
5908				      |	(fmt1_ins->opcode << 25);
5909			}
5910		}
5911		/* The sequencer is a little endian cpu */
5912		instr.integer = ahc_htole32(instr.integer);
5913		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
5914		break;
5915	default:
5916		panic("Unknown opcode encountered in seq program");
5917		break;
5918	}
5919}
5920
5921void
5922ahc_dump_card_state(struct ahc_softc *ahc)
5923{
5924	struct scb *scb;
5925	struct scb_tailq *untagged_q;
5926	int target;
5927	int maxtarget;
5928	int i;
5929	uint8_t qinpos;
5930	uint8_t qintail;
5931	uint8_t qoutpos;
5932	uint8_t scb_index;
5933	uint8_t saved_scbptr;
5934
5935	saved_scbptr = ahc_inb(ahc, SCBPTR);
5936
5937	printf("%s: Dumping Card State at SEQADDR 0x%x\n",
5938	       ahc_name(ahc),
5939	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
5940
5941	printf("SCB count = %d\n", ahc->scb_data->numscbs);
5942	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
5943	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
5944	/* QINFIFO */
5945	printf("QINFIFO entries: ");
5946	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5947		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5948		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5949	} else
5950		qinpos = ahc_inb(ahc, QINPOS);
5951	qintail = ahc->qinfifonext;
5952	while (qinpos != qintail) {
5953		printf("%d ", ahc->qinfifo[qinpos]);
5954		qinpos++;
5955	}
5956	printf("\n");
5957
5958	printf("Waiting Queue entries: ");
5959	scb_index = ahc_inb(ahc, WAITING_SCBH);
5960	i = 0;
5961	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5962		ahc_outb(ahc, SCBPTR, scb_index);
5963		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
5964		scb_index = ahc_inb(ahc, SCB_NEXT);
5965	}
5966	printf("\n");
5967
5968	printf("Disconnected Queue entries: ");
5969	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
5970	i = 0;
5971	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5972		ahc_outb(ahc, SCBPTR, scb_index);
5973		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
5974		scb_index = ahc_inb(ahc, SCB_NEXT);
5975	}
5976	printf("\n");
5977
5978	printf("QOUTFIFO entries: ");
5979	qoutpos = ahc->qoutfifonext;
5980	i = 0;
5981	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
5982		printf("%d ", ahc->qoutfifo[qoutpos]);
5983		qoutpos++;
5984	}
5985	printf("\n");
5986
5987	printf("Sequencer Free SCB List: ");
5988	scb_index = ahc_inb(ahc, FREE_SCBH);
5989	i = 0;
5990	while (scb_index != SCB_LIST_NULL && i++ < 256) {
5991		ahc_outb(ahc, SCBPTR, scb_index);
5992		printf("%d ", scb_index);
5993		scb_index = ahc_inb(ahc, SCB_NEXT);
5994	}
5995	printf("\n");
5996
5997	printf("Pending list: ");
5998	i = 0;
5999	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6000		if (i++ > 256)
6001			break;
6002		printf("%d ", scb->hscb->tag);
6003	}
6004	printf("\n");
6005
6006	printf("Kernel Free SCB list: ");
6007	i = 0;
6008	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6009		if (i++ > 256)
6010			break;
6011		printf("%d ", scb->hscb->tag);
6012	}
6013	printf("\n");
6014
6015	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6016	for (target = 0; target <= maxtarget; target++) {
6017		untagged_q = &ahc->untagged_queues[target];
6018		if (TAILQ_FIRST(untagged_q) == NULL)
6019			continue;
6020		printf("Untagged Q(%d): ", target);
6021		i = 0;
6022		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6023			if (i++ > 256)
6024				break;
6025			printf("%d ", scb->hscb->tag);
6026		}
6027		printf("\n");
6028	}
6029
6030	ahc_platform_dump_card_state(ahc);
6031	ahc_outb(ahc, SCBPTR, saved_scbptr);
6032}
6033
6034/************************* Target Mode ****************************************/
6035#ifdef AHC_TARGET_MODE
6036cam_status
6037ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6038		    struct tmode_tstate **tstate, struct tmode_lstate **lstate,
6039		    int notfound_failure)
6040{
6041
6042	if ((ahc->features & AHC_TARGETMODE) == 0)
6043		return (CAM_REQ_INVALID);
6044
6045	/*
6046	 * Handle the 'black hole' device that sucks up
6047	 * requests to unattached luns on enabled targets.
6048	 */
6049	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6050	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6051		*tstate = NULL;
6052		*lstate = ahc->black_hole;
6053	} else {
6054		u_int max_id;
6055
6056		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6057		if (ccb->ccb_h.target_id > max_id)
6058			return (CAM_TID_INVALID);
6059
6060		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6061			return (CAM_LUN_INVALID);
6062
6063		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6064		*lstate = NULL;
6065		if (*tstate != NULL)
6066			*lstate =
6067			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6068	}
6069
6070	if (notfound_failure != 0 && *lstate == NULL)
6071		return (CAM_PATH_INVALID);
6072
6073	return (CAM_REQ_CMP);
6074}
6075
6076void
6077ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6078{
6079	struct	   tmode_tstate *tstate;
6080	struct	   tmode_lstate *lstate;
6081	struct	   ccb_en_lun *cel;
6082	cam_status status;
6083	u_int	   target;
6084	u_int	   lun;
6085	u_int	   target_mask;
6086	u_long	   s;
6087	char	   channel;
6088
6089	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6090				     /*notfound_failure*/FALSE);
6091
6092	if (status != CAM_REQ_CMP) {
6093		ccb->ccb_h.status = status;
6094		return;
6095	}
6096
6097	if ((ahc->features & AHC_MULTIROLE) != 0) {
6098		u_int	   our_id;
6099
6100		if (cam_sim_bus(sim) == 0)
6101			our_id = ahc->our_id;
6102		else
6103			our_id = ahc->our_id_b;
6104
6105		if (ccb->ccb_h.target_id != our_id) {
6106			if ((ahc->features & AHC_MULTI_TID) != 0
6107		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6108				/*
6109				 * Only allow additional targets if
6110				 * the initiator role is disabled.
6111				 * The hardware cannot handle a re-select-in
6112				 * on the initiator id during a re-select-out
6113				 * on a different target id.
6114				 */
6115				status = CAM_TID_INVALID;
6116			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
6117				|| ahc->enabled_luns > 0) {
6118				/*
6119				 * Only allow our target id to change
6120				 * if the initiator role is not configured
6121				 * and there are no enabled luns which
6122				 * are attached to the currently registered
6123				 * scsi id.
6124				 */
6125				status = CAM_TID_INVALID;
6126			}
6127		}
6128	}
6129
6130	if (status != CAM_REQ_CMP) {
6131		ccb->ccb_h.status = status;
6132		return;
6133	}
6134
6135	/*
6136	 * We now have an id that is valid.
6137	 * If we aren't in target mode, switch modes.
6138	 */
6139	if ((ahc->flags & AHC_TARGETROLE) == 0
6140	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6141		u_long	s;
6142
6143		printf("Configuring Target Mode\n");
6144		ahc_lock(ahc, &s);
6145		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6146			ccb->ccb_h.status = CAM_BUSY;
6147			ahc_unlock(ahc, &s);
6148			return;
6149		}
6150		ahc->flags |= AHC_TARGETROLE;
6151		if ((ahc->features & AHC_MULTIROLE) == 0)
6152			ahc->flags &= ~AHC_INITIATORROLE;
6153		pause_sequencer(ahc);
6154		ahc_loadseq(ahc);
6155		ahc_unlock(ahc, &s);
6156	}
6157	cel = &ccb->cel;
6158	target = ccb->ccb_h.target_id;
6159	lun = ccb->ccb_h.target_lun;
6160	channel = SIM_CHANNEL(ahc, sim);
6161	target_mask = 0x01 << target;
6162	if (channel == 'B')
6163		target_mask <<= 8;
6164
6165	if (cel->enable != 0) {
6166		u_int scsiseq;
6167
6168		/* Are we already enabled?? */
6169		if (lstate != NULL) {
6170			xpt_print_path(ccb->ccb_h.path);
6171			printf("Lun already enabled\n");
6172			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6173			return;
6174		}
6175
6176		if (cel->grp6_len != 0
6177		 || cel->grp7_len != 0) {
6178			/*
6179			 * Don't (yet?) support vendor
6180			 * specific commands.
6181			 */
6182			ccb->ccb_h.status = CAM_REQ_INVALID;
6183			printf("Non-zero Group Codes\n");
6184			return;
6185		}
6186
6187		/*
6188		 * Seems to be okay.
6189		 * Setup our data structures.
6190		 */
6191		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6192			tstate = ahc_alloc_tstate(ahc, target, channel);
6193			if (tstate == NULL) {
6194				xpt_print_path(ccb->ccb_h.path);
6195				printf("Couldn't allocate tstate\n");
6196				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6197				return;
6198			}
6199		}
6200		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
6201		if (lstate == NULL) {
6202			xpt_print_path(ccb->ccb_h.path);
6203			printf("Couldn't allocate lstate\n");
6204			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6205			return;
6206		}
6207		memset(lstate, 0, sizeof(*lstate));
6208		status = xpt_create_path(&lstate->path, /*periph*/NULL,
6209					 xpt_path_path_id(ccb->ccb_h.path),
6210					 xpt_path_target_id(ccb->ccb_h.path),
6211					 xpt_path_lun_id(ccb->ccb_h.path));
6212		if (status != CAM_REQ_CMP) {
6213			free(lstate, M_DEVBUF);
6214			xpt_print_path(ccb->ccb_h.path);
6215			printf("Couldn't allocate path\n");
6216			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6217			return;
6218		}
6219		SLIST_INIT(&lstate->accept_tios);
6220		SLIST_INIT(&lstate->immed_notifies);
6221		ahc_lock(ahc, &s);
6222		pause_sequencer(ahc);
6223		if (target != CAM_TARGET_WILDCARD) {
6224			tstate->enabled_luns[lun] = lstate;
6225			ahc->enabled_luns++;
6226
6227			if ((ahc->features & AHC_MULTI_TID) != 0) {
6228				u_int targid_mask;
6229
6230				targid_mask = ahc_inb(ahc, TARGID)
6231					    | (ahc_inb(ahc, TARGID + 1) << 8);
6232
6233				targid_mask |= target_mask;
6234				ahc_outb(ahc, TARGID, targid_mask);
6235				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
6236
6237				ahc_update_scsiid(ahc, targid_mask);
6238			} else {
6239				u_int our_id;
6240				char  channel;
6241
6242				channel = SIM_CHANNEL(ahc, sim);
6243				our_id = SIM_SCSI_ID(ahc, sim);
6244
6245				/*
6246				 * This can only happen if selections
6247				 * are not enabled
6248				 */
6249				if (target != our_id) {
6250					u_int sblkctl;
6251					char  cur_channel;
6252					int   swap;
6253
6254					sblkctl = ahc_inb(ahc, SBLKCTL);
6255					cur_channel = (sblkctl & SELBUSB)
6256						    ? 'B' : 'A';
6257					if ((ahc->features & AHC_TWIN) == 0)
6258						cur_channel = 'A';
6259					swap = cur_channel != channel;
6260					if (channel == 'A')
6261						ahc->our_id = target;
6262					else
6263						ahc->our_id_b = target;
6264
6265					if (swap)
6266						ahc_outb(ahc, SBLKCTL,
6267							 sblkctl ^ SELBUSB);
6268
6269					ahc_outb(ahc, SCSIID, target);
6270
6271					if (swap)
6272						ahc_outb(ahc, SBLKCTL, sblkctl);
6273				}
6274			}
6275		} else
6276			ahc->black_hole = lstate;
6277		/* Allow select-in operations */
6278		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
6279			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6280			scsiseq |= ENSELI;
6281			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6282			scsiseq = ahc_inb(ahc, SCSISEQ);
6283			scsiseq |= ENSELI;
6284			ahc_outb(ahc, SCSISEQ, scsiseq);
6285		}
6286		unpause_sequencer(ahc);
6287		ahc_unlock(ahc, &s);
6288		ccb->ccb_h.status = CAM_REQ_CMP;
6289		xpt_print_path(ccb->ccb_h.path);
6290		printf("Lun now enabled for target mode\n");
6291	} else {
6292		struct scb *scb;
6293		int i, empty;
6294
6295		if (lstate == NULL) {
6296			ccb->ccb_h.status = CAM_LUN_INVALID;
6297			return;
6298		}
6299
6300		ahc_lock(ahc, &s);
6301
6302		ccb->ccb_h.status = CAM_REQ_CMP;
6303		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6304			struct ccb_hdr *ccbh;
6305
6306			ccbh = &scb->io_ctx->ccb_h;
6307			if (ccbh->func_code == XPT_CONT_TARGET_IO
6308			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6309				printf("CTIO pending\n");
6310				ccb->ccb_h.status = CAM_REQ_INVALID;
6311				ahc_unlock(ahc, &s);
6312				return;
6313			}
6314		}
6315
6316		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6317			printf("ATIOs pending\n");
6318			ccb->ccb_h.status = CAM_REQ_INVALID;
6319		}
6320
6321		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6322			printf("INOTs pending\n");
6323			ccb->ccb_h.status = CAM_REQ_INVALID;
6324		}
6325
6326		if (ccb->ccb_h.status != CAM_REQ_CMP) {
6327			ahc_unlock(ahc, &s);
6328			return;
6329		}
6330
6331		xpt_print_path(ccb->ccb_h.path);
6332		printf("Target mode disabled\n");
6333		xpt_free_path(lstate->path);
6334		free(lstate, M_DEVBUF);
6335
6336		pause_sequencer(ahc);
6337		/* Can we clean up the target too? */
6338		if (target != CAM_TARGET_WILDCARD) {
6339			tstate->enabled_luns[lun] = NULL;
6340			ahc->enabled_luns--;
6341			for (empty = 1, i = 0; i < 8; i++)
6342				if (tstate->enabled_luns[i] != NULL) {
6343					empty = 0;
6344					break;
6345				}
6346
6347			if (empty) {
6348				ahc_free_tstate(ahc, target, channel,
6349						/*force*/FALSE);
6350				if (ahc->features & AHC_MULTI_TID) {
6351					u_int targid_mask;
6352
6353					targid_mask = ahc_inb(ahc, TARGID)
6354						    | (ahc_inb(ahc, TARGID + 1)
6355						       << 8);
6356
6357					targid_mask &= ~target_mask;
6358					ahc_outb(ahc, TARGID, targid_mask);
6359					ahc_outb(ahc, TARGID+1,
6360					 	 (targid_mask >> 8));
6361					ahc_update_scsiid(ahc, targid_mask);
6362				}
6363			}
6364		} else {
6365
6366			ahc->black_hole = NULL;
6367
6368			/*
6369			 * We can't allow selections without
6370			 * our black hole device.
6371			 */
6372			empty = TRUE;
6373		}
6374		if (ahc->enabled_luns == 0) {
6375			/* Disallow select-in */
6376			u_int scsiseq;
6377
6378			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6379			scsiseq &= ~ENSELI;
6380			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6381			scsiseq = ahc_inb(ahc, SCSISEQ);
6382			scsiseq &= ~ENSELI;
6383			ahc_outb(ahc, SCSISEQ, scsiseq);
6384
6385			if ((ahc->features & AHC_MULTIROLE) == 0) {
6386				printf("Configuring Initiator Mode\n");
6387				ahc->flags &= ~AHC_TARGETROLE;
6388				ahc->flags |= AHC_INITIATORROLE;
6389				pause_sequencer(ahc);
6390				ahc_loadseq(ahc);
6391			}
6392		}
6393		unpause_sequencer(ahc);
6394		ahc_unlock(ahc, &s);
6395	}
6396}
6397
6398static void
6399ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
6400{
6401	u_int scsiid_mask;
6402	u_int scsiid;
6403
6404	if ((ahc->features & AHC_MULTI_TID) == 0)
6405		panic("ahc_update_scsiid called on non-multitid unit\n");
6406
6407	/*
6408	 * Since we will rely on the the TARGID mask
6409	 * for selection enables, ensure that OID
6410	 * in SCSIID is not set to some other ID
6411	 * that we don't want to allow selections on.
6412	 */
6413	if ((ahc->features & AHC_ULTRA2) != 0)
6414		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
6415	else
6416		scsiid = ahc_inb(ahc, SCSIID);
6417	scsiid_mask = 0x1 << (scsiid & OID);
6418	if ((targid_mask & scsiid_mask) == 0) {
6419		u_int our_id;
6420
6421		/* ffs counts from 1 */
6422		our_id = ffs(targid_mask);
6423		if (our_id == 0)
6424			our_id = ahc->our_id;
6425		else
6426			our_id--;
6427		scsiid &= TID;
6428		scsiid |= our_id;
6429	}
6430	if ((ahc->features & AHC_ULTRA2) != 0)
6431		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
6432	else
6433		ahc_outb(ahc, SCSIID, scsiid);
6434}
6435
6436void
6437ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
6438{
6439	struct target_cmd *cmd;
6440
6441	/*
6442	 * If the card supports auto-access pause,
6443	 * we can access the card directly regardless
6444	 * of whether it is paused or not.
6445	 */
6446	if ((ahc->features & AHC_AUTOPAUSE) != 0)
6447		paused = TRUE;
6448
6449	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
6450
6451		/*
6452		 * Only advance through the queue if we
6453		 * have the resources to process the command.
6454		 */
6455		if (ahc_handle_target_cmd(ahc, cmd) != 0)
6456			break;
6457
6458		ahc->tqinfifonext++;
6459		cmd->cmd_valid = 0;
6460
6461		/*
6462		 * Lazily update our position in the target mode incomming
6463		 * command queue as seen by the sequencer.
6464		 */
6465		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
6466			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6467				u_int hs_mailbox;
6468
6469				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6470				hs_mailbox &= ~HOST_TQINPOS;
6471				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
6472				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6473			} else {
6474				if (!paused)
6475					pause_sequencer(ahc);
6476				ahc_outb(ahc, KERNEL_TQINPOS,
6477					 ahc->tqinfifonext & HOST_TQINPOS);
6478				if (!paused)
6479					unpause_sequencer(ahc);
6480			}
6481		}
6482	}
6483}
6484
6485static int
6486ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
6487{
6488	struct	  tmode_tstate *tstate;
6489	struct	  tmode_lstate *lstate;
6490	struct	  ccb_accept_tio *atio;
6491	uint8_t *byte;
6492	int	  initiator;
6493	int	  target;
6494	int	  lun;
6495
6496	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
6497	target = SCSIID_OUR_ID(cmd->scsiid);
6498	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
6499
6500	byte = cmd->bytes;
6501	tstate = ahc->enabled_targets[target];
6502	lstate = NULL;
6503	if (tstate != NULL)
6504		lstate = tstate->enabled_luns[lun];
6505
6506	/*
6507	 * Commands for disabled luns go to the black hole driver.
6508	 */
6509	if (lstate == NULL)
6510		lstate = ahc->black_hole;
6511
6512	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
6513	if (atio == NULL) {
6514		ahc->flags |= AHC_TQINFIFO_BLOCKED;
6515		/*
6516		 * Wait for more ATIOs from the peripheral driver for this lun.
6517		 */
6518		return (1);
6519	} else
6520		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
6521#if 0
6522	printf("Incoming command from %d for %d:%d%s\n",
6523	       initiator, target, lun,
6524	       lstate == ahc->black_hole ? "(Black Holed)" : "");
6525#endif
6526	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
6527
6528	if (lstate == ahc->black_hole) {
6529		/* Fill in the wildcards */
6530		atio->ccb_h.target_id = target;
6531		atio->ccb_h.target_lun = lun;
6532	}
6533
6534	/*
6535	 * Package it up and send it off to
6536	 * whomever has this lun enabled.
6537	 */
6538	atio->sense_len = 0;
6539	atio->init_id = initiator;
6540	if (byte[0] != 0xFF) {
6541		/* Tag was included */
6542		atio->tag_action = *byte++;
6543		atio->tag_id = *byte++;
6544		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
6545	} else {
6546		atio->ccb_h.flags = 0;
6547	}
6548	byte++;
6549
6550	/* Okay.  Now determine the cdb size based on the command code */
6551	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
6552	case 0:
6553		atio->cdb_len = 6;
6554		break;
6555	case 1:
6556	case 2:
6557		atio->cdb_len = 10;
6558		break;
6559	case 4:
6560		atio->cdb_len = 16;
6561		break;
6562	case 5:
6563		atio->cdb_len = 12;
6564		break;
6565	case 3:
6566	default:
6567		/* Only copy the opcode. */
6568		atio->cdb_len = 1;
6569		printf("Reserved or VU command code type encountered\n");
6570		break;
6571	}
6572
6573	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
6574
6575	atio->ccb_h.status |= CAM_CDB_RECVD;
6576
6577	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
6578		/*
6579		 * We weren't allowed to disconnect.
6580		 * We're hanging on the bus until a
6581		 * continue target I/O comes in response
6582		 * to this accept tio.
6583		 */
6584#if 0
6585		printf("Received Immediate Command %d:%d:%d - %p\n",
6586		       initiator, target, lun, ahc->pending_device);
6587#endif
6588		ahc->pending_device = lstate;
6589		ahc_freeze_ccb((union ccb *)atio);
6590		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
6591	}
6592	xpt_done((union ccb*)atio);
6593	return (0);
6594}
6595
6596#endif
6597