aic7xxx.c revision 104021
1132497Stjr/*
2132497Stjr * Core routines and tables shareable across OS platforms.
3132497Stjr *
4132497Stjr * Copyright (c) 1994-2002 Justin T. Gibbs.
5132497Stjr * Copyright (c) 2000-2002 Adaptec Inc.
6132497Stjr * All rights reserved.
7132497Stjr *
8132497Stjr * Redistribution and use in source and binary forms, with or without
9132497Stjr * modification, are permitted provided that the following conditions
10132497Stjr * are met:
11132497Stjr * 1. Redistributions of source code must retain the above copyright
12132497Stjr *    notice, this list of conditions, and the following disclaimer,
13132497Stjr *    without modification.
14132497Stjr * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15132497Stjr *    substantially similar to the "NO WARRANTY" disclaimer below
16132497Stjr *    ("Disclaimer") and any redistribution must be conditioned upon
17132497Stjr *    including a substantially similar Disclaimer requirement for further
18132497Stjr *    binary redistribution.
19132497Stjr * 3. Neither the names of the above-listed copyright holders nor the names
20132497Stjr *    of any contributors may be used to endorse or promote products derived
21132497Stjr *    from this software without specific prior written permission.
22132497Stjr *
23132497Stjr * Alternatively, this software may be distributed under the terms of the
24132497Stjr * GNU General Public License ("GPL") version 2 as published by the Free
25132497Stjr * Software Foundation.
26132497Stjr *
27132497Stjr * NO WARRANTY
28132497Stjr * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29132497Stjr * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30132497Stjr * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31132497Stjr * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32132497Stjr * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33132497Stjr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34132497Stjr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35132497Stjr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36132497Stjr * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37132497Stjr * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38132497Stjr * POSSIBILITY OF SUCH DAMAGES.
39132497Stjr *
40132497Stjr * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#79 $
41132497Stjr *
42132497Stjr * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 104021 2002-09-26 21:50:03Z gibbs $
43132497Stjr */
44132497Stjr
45132497Stjr#ifdef __linux__
46132497Stjr#include "aic7xxx_osm.h"
47132497Stjr#include "aic7xxx_inline.h"
48132497Stjr#include "aicasm/aicasm_insformat.h"
49132497Stjr#else
50132497Stjr#include <dev/aic7xxx/aic7xxx_osm.h>
51132497Stjr#include <dev/aic7xxx/aic7xxx_inline.h>
52132497Stjr#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
53132497Stjr#endif
54132497Stjr
55132497Stjr/****************************** Softc Data ************************************/
56132497Stjrstruct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
57132497Stjr
58132497Stjr/***************************** Lookup Tables **********************************/
59132497Stjrchar *ahc_chip_names[] =
60132497Stjr{
61132497Stjr	"NONE",
62132497Stjr	"aic7770",
63132497Stjr	"aic7850",
64132497Stjr	"aic7855",
65132497Stjr	"aic7859",
66132497Stjr	"aic7860",
67132497Stjr	"aic7870",
68132497Stjr	"aic7880",
69132497Stjr	"aic7895",
70132497Stjr	"aic7895C",
71132497Stjr	"aic7890/91",
72132497Stjr	"aic7896/97",
73132497Stjr	"aic7892",
74132497Stjr	"aic7899"
75132497Stjr};
76132497Stjrstatic const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
77132497Stjr
78132497Stjr/*
79132497Stjr * Hardware error codes.
80132497Stjr */
81132497Stjrstruct ahc_hard_error_entry {
82132497Stjr        uint8_t errno;
83132497Stjr	char *errmesg;
84132497Stjr};
85132497Stjr
86132497Stjrstatic struct ahc_hard_error_entry ahc_hard_errors[] = {
87132497Stjr	{ ILLHADDR,	"Illegal Host Access" },
88132497Stjr	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
89132497Stjr	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
90132497Stjr	{ SQPARERR,	"Sequencer Parity Error" },
91132497Stjr	{ DPARERR,	"Data-path Parity Error" },
92132497Stjr	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
93132497Stjr	{ PCIERRSTAT,	"PCI Error detected" },
94132497Stjr	{ CIOPARERR,	"CIOBUS Parity Error" },
95132497Stjr};
96132497Stjrstatic const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
97132497Stjr
98132497Stjrstatic struct ahc_phase_table_entry ahc_phase_table[] =
99132497Stjr{
100132497Stjr	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
101132497Stjr	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
102132497Stjr	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
103132497Stjr	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
104132497Stjr	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
105132497Stjr	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
106132497Stjr	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
107132497Stjr	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
108132497Stjr	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
109132497Stjr	{ 0,		MSG_NOOP,		"in unknown phase"	}
110132497Stjr};
111132497Stjr
112/*
113 * In most cases we only wish to itterate over real phases, so
114 * exclude the last element from the count.
115 */
116static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
117
118/*
119 * Valid SCSIRATE values.  (p. 3-17)
120 * Provides a mapping of tranfer periods in ns to the proper value to
121 * stick in the scsixfer reg.
122 */
123static struct ahc_syncrate ahc_syncrates[] =
124{
125      /* ultra2    fast/ultra  period     rate */
126	{ 0x42,      0x000,      9,      "80.0" },
127	{ 0x03,      0x000,     10,      "40.0" },
128	{ 0x04,      0x000,     11,      "33.0" },
129	{ 0x05,      0x100,     12,      "20.0" },
130	{ 0x06,      0x110,     15,      "16.0" },
131	{ 0x07,      0x120,     18,      "13.4" },
132	{ 0x08,      0x000,     25,      "10.0" },
133	{ 0x19,      0x010,     31,      "8.0"  },
134	{ 0x1a,      0x020,     37,      "6.67" },
135	{ 0x1b,      0x030,     43,      "5.7"  },
136	{ 0x1c,      0x040,     50,      "5.0"  },
137	{ 0x00,      0x050,     56,      "4.4"  },
138	{ 0x00,      0x060,     62,      "4.0"  },
139	{ 0x00,      0x070,     68,      "3.6"  },
140	{ 0x00,      0x000,      0,      NULL   }
141};
142
143/* Our Sequencer Program */
144#include "aic7xxx_seq.h"
145
146/**************************** Function Declarations ***************************/
147static void		ahc_force_renegotiation(struct ahc_softc *ahc);
148static struct ahc_tmode_tstate*
149			ahc_alloc_tstate(struct ahc_softc *ahc,
150					 u_int scsi_id, char channel);
151#ifdef AHC_TARGET_MODE
152static void		ahc_free_tstate(struct ahc_softc *ahc,
153					u_int scsi_id, char channel, int force);
154#endif
155static struct ahc_syncrate*
156			ahc_devlimited_syncrate(struct ahc_softc *ahc,
157					        struct ahc_initiator_tinfo *,
158						u_int *period,
159						u_int *ppr_options,
160						role_t role);
161static void		ahc_update_pending_scbs(struct ahc_softc *ahc);
162static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
163					  struct ahc_devinfo *devinfo);
164static void		ahc_print_devinfo(struct ahc_softc *ahc,
165					  struct ahc_devinfo *devinfo);
166static void		ahc_scb_devinfo(struct ahc_softc *ahc,
167					struct ahc_devinfo *devinfo,
168					struct scb *scb);
169static void		ahc_assert_atn(struct ahc_softc *ahc);
170static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
171						   struct ahc_devinfo *devinfo,
172						   struct scb *scb);
173static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
174					       struct ahc_devinfo *devinfo);
175static void		ahc_construct_sdtr(struct ahc_softc *ahc,
176					   struct ahc_devinfo *devinfo,
177					   u_int period, u_int offset);
178static void		ahc_construct_wdtr(struct ahc_softc *ahc,
179					   struct ahc_devinfo *devinfo,
180					   u_int bus_width);
181static void		ahc_construct_ppr(struct ahc_softc *ahc,
182					  struct ahc_devinfo *devinfo,
183					  u_int period, u_int offset,
184					  u_int bus_width, u_int ppr_options);
185static void		ahc_clear_msg_state(struct ahc_softc *ahc);
186static void		ahc_handle_message_phase(struct ahc_softc *ahc);
187typedef enum {
188	AHCMSG_1B,
189	AHCMSG_2B,
190	AHCMSG_EXT
191} ahc_msgtype;
192static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
193				     u_int msgval, int full);
194static int		ahc_parse_msg(struct ahc_softc *ahc,
195				      struct ahc_devinfo *devinfo);
196static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
197					      struct ahc_devinfo *devinfo);
198static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
199						struct ahc_devinfo *devinfo);
200static void		ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
201static void		ahc_handle_devreset(struct ahc_softc *ahc,
202					    struct ahc_devinfo *devinfo,
203					    cam_status status, char *message,
204					    int verbose_level);
205#if AHC_TARGET_MODE
206static void		ahc_setup_target_msgin(struct ahc_softc *ahc,
207					       struct ahc_devinfo *devinfo,
208					       struct scb *scb);
209#endif
210
211static bus_dmamap_callback_t	ahc_dmamap_cb;
212static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
213static int			ahc_init_scbdata(struct ahc_softc *ahc);
214static void			ahc_fini_scbdata(struct ahc_softc *ahc);
215static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
216					    struct scb *prev_scb,
217					    struct scb *scb);
218static int		ahc_qinfifo_count(struct ahc_softc *ahc);
219static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
220						   u_int prev, u_int scbptr);
221static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
222static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
223				     u_int scbpos, u_int prev);
224static void		ahc_reset_current_bus(struct ahc_softc *ahc);
225#ifdef AHC_DUMP_SEQ
226static void		ahc_dumpseq(struct ahc_softc *ahc);
227#endif
228static void		ahc_loadseq(struct ahc_softc *ahc);
229static int		ahc_check_patch(struct ahc_softc *ahc,
230					struct patch **start_patch,
231					u_int start_instr, u_int *skip_addr);
232static void		ahc_download_instr(struct ahc_softc *ahc,
233					   u_int instrptr, uint8_t *dconsts);
234#ifdef AHC_TARGET_MODE
235static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
236					       struct ahc_tmode_lstate *lstate,
237					       u_int initiator_id,
238					       u_int event_type,
239					       u_int event_arg);
240static void		ahc_update_scsiid(struct ahc_softc *ahc,
241					  u_int targid_mask);
242static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
243					      struct target_cmd *cmd);
244#endif
245/************************* Sequencer Execution Control ************************/
246/*
247 * Restart the sequencer program from address zero
248 */
249void
250ahc_restart(struct ahc_softc *ahc)
251{
252
253	ahc_pause(ahc);
254
255	/* No more pending messages. */
256	ahc_clear_msg_state(ahc);
257
258	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
259	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
260	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
261	ahc_outb(ahc, LASTPHASE, P_BUSFREE);
262	ahc_outb(ahc, SAVED_SCSIID, 0xFF);
263	ahc_outb(ahc, SAVED_LUN, 0xFF);
264
265	/*
266	 * Ensure that the sequencer's idea of TQINPOS
267	 * matches our own.  The sequencer increments TQINPOS
268	 * only after it sees a DMA complete and a reset could
269	 * occur before the increment leaving the kernel to believe
270	 * the command arrived but the sequencer to not.
271	 */
272	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
273
274	/* Always allow reselection */
275	ahc_outb(ahc, SCSISEQ,
276		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
277	if ((ahc->features & AHC_CMD_CHAN) != 0) {
278		/* Ensure that no DMA operations are in progress */
279		ahc_outb(ahc, CCSCBCNT, 0);
280		ahc_outb(ahc, CCSGCTL, 0);
281		ahc_outb(ahc, CCSCBCTL, 0);
282	}
283	/*
284	 * If we were in the process of DMA'ing SCB data into
285	 * an SCB, replace that SCB on the free list.  This prevents
286	 * an SCB leak.
287	 */
288	if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
289		ahc_add_curscb_to_free_list(ahc);
290		ahc_outb(ahc, SEQ_FLAGS2,
291			 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
292	}
293	ahc_outb(ahc, MWI_RESIDUAL, 0);
294	ahc_outb(ahc, SEQCTL, FASTMODE);
295	ahc_outb(ahc, SEQADDR0, 0);
296	ahc_outb(ahc, SEQADDR1, 0);
297	ahc_unpause(ahc);
298}
299
300/************************* Input/Output Queues ********************************/
301void
302ahc_run_qoutfifo(struct ahc_softc *ahc)
303{
304	struct scb *scb;
305	u_int  scb_index;
306
307	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
308	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
309
310		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
311		if ((ahc->qoutfifonext & 0x03) == 0x03) {
312			u_int modnext;
313
314			/*
315			 * Clear 32bits of QOUTFIFO at a time
316			 * so that we don't clobber an incoming
317			 * byte DMA to the array on architectures
318			 * that only support 32bit load and store
319			 * operations.
320			 */
321			modnext = ahc->qoutfifonext & ~0x3;
322			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
323			ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
324					ahc->shared_data_dmamap,
325					/*offset*/modnext, /*len*/4,
326					BUS_DMASYNC_PREREAD);
327		}
328		ahc->qoutfifonext++;
329
330		scb = ahc_lookup_scb(ahc, scb_index);
331		if (scb == NULL) {
332			printf("%s: WARNING no command for scb %d "
333			       "(cmdcmplt)\nQOUTPOS = %d\n",
334			       ahc_name(ahc), scb_index,
335			       (ahc->qoutfifonext - 1) & 0xFF);
336			continue;
337		}
338
339		/*
340		 * Save off the residual
341		 * if there is one.
342		 */
343		ahc_update_residual(ahc, scb);
344		ahc_done(ahc, scb);
345	}
346}
347
348void
349ahc_run_untagged_queues(struct ahc_softc *ahc)
350{
351	int i;
352
353	for (i = 0; i < 16; i++)
354		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
355}
356
357void
358ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
359{
360	struct scb *scb;
361
362	if (ahc->untagged_queue_lock != 0)
363		return;
364
365	if ((scb = TAILQ_FIRST(queue)) != NULL
366	 && (scb->flags & SCB_ACTIVE) == 0) {
367		scb->flags |= SCB_ACTIVE;
368		ahc_queue_scb(ahc, scb);
369	}
370}
371
372/************************* Interrupt Handling *********************************/
373void
374ahc_handle_brkadrint(struct ahc_softc *ahc)
375{
376	/*
377	 * We upset the sequencer :-(
378	 * Lookup the error message
379	 */
380	int i;
381	int error;
382
383	error = ahc_inb(ahc, ERROR);
384	for (i = 0; error != 1 && i < num_errors; i++)
385		error >>= 1;
386	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
387	       ahc_name(ahc), ahc_hard_errors[i].errmesg,
388	       ahc_inb(ahc, SEQADDR0) |
389	       (ahc_inb(ahc, SEQADDR1) << 8));
390
391	ahc_dump_card_state(ahc);
392
393	/* Tell everyone that this HBA is no longer availible */
394	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
395		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
396		       CAM_NO_HBA);
397
398	/* Disable all interrupt sources by resetting the controller */
399	ahc_shutdown(ahc);
400}
401
402void
403ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
404{
405	struct scb *scb;
406	struct ahc_devinfo devinfo;
407
408	ahc_fetch_devinfo(ahc, &devinfo);
409
410	/*
411	 * Clear the upper byte that holds SEQINT status
412	 * codes and clear the SEQINT bit. We will unpause
413	 * the sequencer, if appropriate, after servicing
414	 * the request.
415	 */
416	ahc_outb(ahc, CLRINT, CLRSEQINT);
417	switch (intstat & SEQINT_MASK) {
418	case BAD_STATUS:
419	{
420		u_int  scb_index;
421		struct hardware_scb *hscb;
422
423		/*
424		 * Set the default return value to 0 (don't
425		 * send sense).  The sense code will change
426		 * this if needed.
427		 */
428		ahc_outb(ahc, RETURN_1, 0);
429
430		/*
431		 * The sequencer will notify us when a command
432		 * has an error that would be of interest to
433		 * the kernel.  This allows us to leave the sequencer
434		 * running in the common case of command completes
435		 * without error.  The sequencer will already have
436		 * dma'd the SCB back up to us, so we can reference
437		 * the in kernel copy directly.
438		 */
439		scb_index = ahc_inb(ahc, SCB_TAG);
440		scb = ahc_lookup_scb(ahc, scb_index);
441		if (scb == NULL) {
442			ahc_print_devinfo(ahc, &devinfo);
443			printf("ahc_intr - referenced scb "
444			       "not valid during seqint 0x%x scb(%d)\n",
445			       intstat, scb_index);
446			ahc_dump_card_state(ahc);
447			panic("for safety");
448			goto unpause;
449		}
450
451		hscb = scb->hscb;
452
453		/* Don't want to clobber the original sense code */
454		if ((scb->flags & SCB_SENSE) != 0) {
455			/*
456			 * Clear the SCB_SENSE Flag and have
457			 * the sequencer do a normal command
458			 * complete.
459			 */
460			scb->flags &= ~SCB_SENSE;
461			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
462			break;
463		}
464		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
465		/* Freeze the queue until the client sees the error. */
466		ahc_freeze_devq(ahc, scb);
467		ahc_freeze_scb(scb);
468		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
469		switch (hscb->shared_data.status.scsi_status) {
470		case SCSI_STATUS_OK:
471			printf("%s: Interrupted for staus of 0???\n",
472			       ahc_name(ahc));
473			break;
474		case SCSI_STATUS_CMD_TERMINATED:
475		case SCSI_STATUS_CHECK_COND:
476		{
477			struct ahc_dma_seg *sg;
478			struct scsi_sense *sc;
479			struct ahc_initiator_tinfo *targ_info;
480			struct ahc_tmode_tstate *tstate;
481			struct ahc_transinfo *tinfo;
482#ifdef AHC_DEBUG
483			if (ahc_debug & AHC_SHOW_SENSE) {
484				ahc_print_path(ahc, scb);
485				printf("SCB %d: requests Check Status\n",
486				       scb->hscb->tag);
487			}
488#endif
489
490			if (ahc_perform_autosense(scb) == 0)
491				break;
492
493			targ_info = ahc_fetch_transinfo(ahc,
494							devinfo.channel,
495							devinfo.our_scsiid,
496							devinfo.target,
497							&tstate);
498			tinfo = &targ_info->curr;
499			sg = scb->sg_list;
500			sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
501			/*
502			 * Save off the residual if there is one.
503			 */
504			ahc_update_residual(ahc, scb);
505#ifdef AHC_DEBUG
506			if (ahc_debug & AHC_SHOW_SENSE) {
507				ahc_print_path(ahc, scb);
508				printf("Sending Sense\n");
509			}
510#endif
511			sg->addr = ahc_get_sense_bufaddr(ahc, scb);
512			sg->len = ahc_get_sense_bufsize(ahc, scb);
513			sg->len |= AHC_DMA_LAST_SEG;
514
515			/* Fixup byte order */
516			sg->addr = ahc_htole32(sg->addr);
517			sg->len = ahc_htole32(sg->len);
518
519			sc->opcode = REQUEST_SENSE;
520			sc->byte2 = 0;
521			if (tinfo->protocol_version <= SCSI_REV_2
522			 && SCB_GET_LUN(scb) < 8)
523				sc->byte2 = SCB_GET_LUN(scb) << 5;
524			sc->unused[0] = 0;
525			sc->unused[1] = 0;
526			sc->length = sg->len;
527			sc->control = 0;
528
529			/*
530			 * We can't allow the target to disconnect.
531			 * This will be an untagged transaction and
532			 * having the target disconnect will make this
533			 * transaction indestinguishable from outstanding
534			 * tagged transactions.
535			 */
536			hscb->control = 0;
537
538			/*
539			 * This request sense could be because the
540			 * the device lost power or in some other
541			 * way has lost our transfer negotiations.
542			 * Renegotiate if appropriate.  Unit attention
543			 * errors will be reported before any data
544			 * phases occur.
545			 */
546			if (ahc_get_residual(scb)
547			 == ahc_get_transfer_length(scb)) {
548				ahc_update_neg_request(ahc, &devinfo,
549						       tstate, targ_info,
550						       /*force*/TRUE);
551			}
552			if (tstate->auto_negotiate & devinfo.target_mask) {
553				hscb->control |= MK_MESSAGE;
554				scb->flags &= ~SCB_NEGOTIATE;
555				scb->flags |= SCB_AUTO_NEGOTIATE;
556			}
557			hscb->cdb_len = sizeof(*sc);
558			hscb->dataptr = sg->addr;
559			hscb->datacnt = sg->len;
560			hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
561			hscb->sgptr = ahc_htole32(hscb->sgptr);
562			scb->sg_count = 1;
563			scb->flags |= SCB_SENSE;
564			ahc_qinfifo_requeue_tail(ahc, scb);
565			ahc_outb(ahc, RETURN_1, SEND_SENSE);
566#ifdef __FreeBSD__
567			/*
568			 * Ensure we have enough time to actually
569			 * retrieve the sense.
570			 */
571			untimeout(ahc_timeout, (caddr_t)scb,
572				  scb->io_ctx->ccb_h.timeout_ch);
573			scb->io_ctx->ccb_h.timeout_ch =
574			    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
575#endif
576			break;
577		}
578		default:
579			break;
580		}
581		break;
582	}
583	case NO_MATCH:
584	{
585		/* Ensure we don't leave the selection hardware on */
586		ahc_outb(ahc, SCSISEQ,
587			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
588
589		printf("%s:%c:%d: no active SCB for reconnecting "
590		       "target - issuing BUS DEVICE RESET\n",
591		       ahc_name(ahc), devinfo.channel, devinfo.target);
592		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
593		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
594		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
595		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
596		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
597		       "SINDEX == 0x%x\n",
598		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
599		       ahc_index_busy_tcl(ahc,
600			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
601				      ahc_inb(ahc, SAVED_LUN))),
602		       ahc_inb(ahc, SINDEX));
603		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
604		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
605		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
606		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
607		       ahc_inb(ahc, SCB_CONTROL));
608		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
609		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
610		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
611		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
612		ahc_dump_card_state(ahc);
613		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
614		ahc->msgout_len = 1;
615		ahc->msgout_index = 0;
616		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
617		ahc_outb(ahc, MSG_OUT, HOST_MSG);
618		ahc_assert_atn(ahc);
619		break;
620	}
621	case SEND_REJECT:
622	{
623		u_int rejbyte = ahc_inb(ahc, ACCUM);
624		printf("%s:%c:%d: Warning - unknown message received from "
625		       "target (0x%x).  Rejecting\n",
626		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
627		break;
628	}
629	case NO_IDENT:
630	{
631		/*
632		 * The reconnecting target either did not send an identify
633		 * message, or did, but we didn't find an SCB to match and
634		 * before it could respond to our ATN/abort, it hit a dataphase.
635		 * The only safe thing to do is to blow it away with a bus
636		 * reset.
637		 */
638		int found;
639
640		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
641		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
642		       ahc_name(ahc), devinfo.channel, devinfo.target,
643		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
644		found = ahc_reset_channel(ahc, devinfo.channel,
645					  /*initiate reset*/TRUE);
646		printf("%s: Issued Channel %c Bus Reset. "
647		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
648		       found);
649		return;
650	}
651	case IGN_WIDE_RES:
652		ahc_handle_ign_wide_residue(ahc, &devinfo);
653		break;
654	case PDATA_REINIT:
655		ahc_reinitialize_dataptrs(ahc);
656		break;
657	case BAD_PHASE:
658	{
659		u_int lastphase;
660
661		lastphase = ahc_inb(ahc, LASTPHASE);
662		printf("%s:%c:%d: unknown scsi bus phase %x, "
663		       "lastphase = 0x%x.  Attempting to continue\n",
664		       ahc_name(ahc), devinfo.channel, devinfo.target,
665		       lastphase, ahc_inb(ahc, SCSISIGI));
666		break;
667	}
668	case MISSED_BUSFREE:
669	{
670		u_int lastphase;
671
672		lastphase = ahc_inb(ahc, LASTPHASE);
673		printf("%s:%c:%d: Missed busfree. "
674		       "Lastphase = 0x%x, Curphase = 0x%x\n",
675		       ahc_name(ahc), devinfo.channel, devinfo.target,
676		       lastphase, ahc_inb(ahc, SCSISIGI));
677		ahc_restart(ahc);
678		return;
679	}
680	case HOST_MSG_LOOP:
681	{
682		/*
683		 * The sequencer has encountered a message phase
684		 * that requires host assistance for completion.
685		 * While handling the message phase(s), we will be
686		 * notified by the sequencer after each byte is
687		 * transfered so we can track bus phase changes.
688		 *
689		 * If this is the first time we've seen a HOST_MSG_LOOP
690		 * interrupt, initialize the state of the host message
691		 * loop.
692		 */
693		if (ahc->msg_type == MSG_TYPE_NONE) {
694			struct scb *scb;
695			u_int scb_index;
696			u_int bus_phase;
697
698			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
699			if (bus_phase != P_MESGIN
700			 && bus_phase != P_MESGOUT) {
701				printf("ahc_intr: HOST_MSG_LOOP bad "
702				       "phase 0x%x\n",
703				      bus_phase);
704				/*
705				 * Probably transitioned to bus free before
706				 * we got here.  Just punt the message.
707				 */
708				ahc_clear_intstat(ahc);
709				ahc_restart(ahc);
710				return;
711			}
712
713			scb_index = ahc_inb(ahc, SCB_TAG);
714			scb = ahc_lookup_scb(ahc, scb_index);
715			if (devinfo.role == ROLE_INITIATOR) {
716				if (scb == NULL)
717					panic("HOST_MSG_LOOP with "
718					      "invalid SCB %x\n", scb_index);
719
720				if (bus_phase == P_MESGOUT)
721					ahc_setup_initiator_msgout(ahc,
722								   &devinfo,
723								   scb);
724				else {
725					ahc->msg_type =
726					    MSG_TYPE_INITIATOR_MSGIN;
727					ahc->msgin_index = 0;
728				}
729			}
730#if AHC_TARGET_MODE
731			else {
732				if (bus_phase == P_MESGOUT) {
733					ahc->msg_type =
734					    MSG_TYPE_TARGET_MSGOUT;
735					ahc->msgin_index = 0;
736				}
737				else
738					ahc_setup_target_msgin(ahc,
739							       &devinfo,
740							       scb);
741			}
742#endif
743		}
744
745		ahc_handle_message_phase(ahc);
746		break;
747	}
748	case PERR_DETECTED:
749	{
750		/*
751		 * If we've cleared the parity error interrupt
752		 * but the sequencer still believes that SCSIPERR
753		 * is true, it must be that the parity error is
754		 * for the currently presented byte on the bus,
755		 * and we are not in a phase (data-in) where we will
756		 * eventually ack this byte.  Ack the byte and
757		 * throw it away in the hope that the target will
758		 * take us to message out to deliver the appropriate
759		 * error message.
760		 */
761		if ((intstat & SCSIINT) == 0
762		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
763
764			if ((ahc->features & AHC_DT) == 0) {
765				u_int curphase;
766
767				/*
768				 * The hardware will only let you ack bytes
769				 * if the expected phase in SCSISIGO matches
770				 * the current phase.  Make sure this is
771				 * currently the case.
772				 */
773				curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
774				ahc_outb(ahc, LASTPHASE, curphase);
775				ahc_outb(ahc, SCSISIGO, curphase);
776			}
777			ahc_inb(ahc, SCSIDATL);
778		}
779		break;
780	}
781	case DATA_OVERRUN:
782	{
783		/*
784		 * When the sequencer detects an overrun, it
785		 * places the controller in "BITBUCKET" mode
786		 * and allows the target to complete its transfer.
787		 * Unfortunately, none of the counters get updated
788		 * when the controller is in this mode, so we have
789		 * no way of knowing how large the overrun was.
790		 */
791		u_int scbindex = ahc_inb(ahc, SCB_TAG);
792		u_int lastphase = ahc_inb(ahc, LASTPHASE);
793		u_int i;
794
795		scb = ahc_lookup_scb(ahc, scbindex);
796		for (i = 0; i < num_phases; i++) {
797			if (lastphase == ahc_phase_table[i].phase)
798				break;
799		}
800		ahc_print_path(ahc, scb);
801		printf("data overrun detected %s."
802		       "  Tag == 0x%x.\n",
803		       ahc_phase_table[i].phasemsg,
804  		       scb->hscb->tag);
805		ahc_print_path(ahc, scb);
806		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
807		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
808		       ahc_get_transfer_length(scb), scb->sg_count);
809		if (scb->sg_count > 0) {
810			for (i = 0; i < scb->sg_count; i++) {
811
812				printf("sg[%d] - Addr 0x%x%x : Length %d\n",
813				       i,
814				       (ahc_le32toh(scb->sg_list[i].len) >> 24
815				        & SG_HIGH_ADDR_BITS),
816				       ahc_le32toh(scb->sg_list[i].addr),
817				       ahc_le32toh(scb->sg_list[i].len)
818				       & AHC_SG_LEN_MASK);
819			}
820		}
821		/*
822		 * Set this and it will take effect when the
823		 * target does a command complete.
824		 */
825		ahc_freeze_devq(ahc, scb);
826		if ((scb->flags & SCB_SENSE) == 0) {
827			ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
828		} else {
829			scb->flags &= ~SCB_SENSE;
830			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
831		}
832		ahc_freeze_scb(scb);
833
834		if ((ahc->features & AHC_ULTRA2) != 0) {
835			/*
836			 * Clear the channel in case we return
837			 * to data phase later.
838			 */
839			ahc_outb(ahc, SXFRCTL0,
840				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
841			ahc_outb(ahc, SXFRCTL0,
842				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
843		}
844		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
845			u_int dscommand1;
846
847			/* Ensure HHADDR is 0 for future DMA operations. */
848			dscommand1 = ahc_inb(ahc, DSCOMMAND1);
849			ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
850			ahc_outb(ahc, HADDR, 0);
851			ahc_outb(ahc, DSCOMMAND1, dscommand1);
852		}
853		break;
854	}
855	case MKMSG_FAILED:
856	{
857		u_int scbindex;
858
859		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
860		       ahc_name(ahc), devinfo.channel, devinfo.target,
861		       devinfo.lun);
862		scbindex = ahc_inb(ahc, SCB_TAG);
863		scb = ahc_lookup_scb(ahc, scbindex);
864		if (scb != NULL
865		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
866			/*
867			 * Ensure that we didn't put a second instance of this
868			 * SCB into the QINFIFO.
869			 */
870			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
871					   SCB_GET_CHANNEL(ahc, scb),
872					   SCB_GET_LUN(scb), scb->hscb->tag,
873					   ROLE_INITIATOR, /*status*/0,
874					   SEARCH_REMOVE);
875		break;
876	}
877	case NO_FREE_SCB:
878	{
879		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
880		ahc_dump_card_state(ahc);
881		panic("for safety");
882		break;
883	}
884	case SCB_MISMATCH:
885	{
886		u_int scbptr;
887
888		scbptr = ahc_inb(ahc, SCBPTR);
889		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
890		       scbptr, ahc_inb(ahc, ARG_1),
891		       ahc->scb_data->hscbs[scbptr].tag);
892		ahc_dump_card_state(ahc);
893		panic("for saftey");
894		break;
895	}
896	case OUT_OF_RANGE:
897	{
898		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
899		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
900		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
901		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
902		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
903		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
904		       "SINDEX == 0x%x\n, A == 0x%x\n",
905		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
906		       ahc_index_busy_tcl(ahc,
907			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
908				      ahc_inb(ahc, SAVED_LUN))),
909		       ahc_inb(ahc, SINDEX),
910		       ahc_inb(ahc, ACCUM));
911		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
912		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
913		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
914		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
915		       ahc_inb(ahc, SCB_CONTROL));
916		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
917		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
918		ahc_dump_card_state(ahc);
919		panic("for safety");
920		break;
921	}
922	default:
923		printf("ahc_intr: seqint, "
924		       "intstat == 0x%x, scsisigi = 0x%x\n",
925		       intstat, ahc_inb(ahc, SCSISIGI));
926		break;
927	}
928unpause:
929	/*
930	 *  The sequencer is paused immediately on
931	 *  a SEQINT, so we should restart it when
932	 *  we're done.
933	 */
934	ahc_unpause(ahc);
935}
936
937void
938ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
939{
940	u_int	scb_index;
941	u_int	status0;
942	u_int	status;
943	struct	scb *scb;
944	char	cur_channel;
945	char	intr_channel;
946
947	/* Make sure the sequencer is in a safe location. */
948	ahc_clear_critical_section(ahc);
949
950	if ((ahc->features & AHC_TWIN) != 0
951	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
952		cur_channel = 'B';
953	else
954		cur_channel = 'A';
955	intr_channel = cur_channel;
956
957	if ((ahc->features & AHC_ULTRA2) != 0)
958		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
959	else
960		status0 = 0;
961	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
962	if (status == 0 && status0 == 0) {
963		if ((ahc->features & AHC_TWIN) != 0) {
964			/* Try the other channel */
965		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
966			status = ahc_inb(ahc, SSTAT1)
967			       & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
968			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
969		}
970		if (status == 0) {
971			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
972			ahc_outb(ahc, CLRINT, CLRSCSIINT);
973			ahc_unpause(ahc);
974			return;
975		}
976	}
977
978	scb_index = ahc_inb(ahc, SCB_TAG);
979	scb = ahc_lookup_scb(ahc, scb_index);
980	if (scb != NULL
981	 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
982		scb = NULL;
983
984	if ((ahc->features & AHC_ULTRA2) != 0
985	 && (status0 & IOERR) != 0) {
986		int now_lvd;
987
988		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
989		printf("%s: Transceiver State Has Changed to %s mode\n",
990		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
991		ahc_outb(ahc, CLRSINT0, CLRIOERR);
992		/*
993		 * When transitioning to SE mode, the reset line
994		 * glitches, triggering an arbitration bug in some
995		 * Ultra2 controllers.  This bug is cleared when we
996		 * assert the reset line.  Since a reset glitch has
997		 * already occurred with this transition and a
998		 * transceiver state change is handled just like
999		 * a bus reset anyway, asserting the reset line
1000		 * ourselves is safe.
1001		 */
1002		ahc_reset_channel(ahc, intr_channel,
1003				 /*Initiate Reset*/now_lvd == 0);
1004	} else if ((status & SCSIRSTI) != 0) {
1005		printf("%s: Someone reset channel %c\n",
1006			ahc_name(ahc), intr_channel);
1007		if (intr_channel != cur_channel)
1008		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
1009		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
1010	} else if ((status & SCSIPERR) != 0) {
1011		/*
1012		 * Determine the bus phase and queue an appropriate message.
1013		 * SCSIPERR is latched true as soon as a parity error
1014		 * occurs.  If the sequencer acked the transfer that
1015		 * caused the parity error and the currently presented
1016		 * transfer on the bus has correct parity, SCSIPERR will
1017		 * be cleared by CLRSCSIPERR.  Use this to determine if
1018		 * we should look at the last phase the sequencer recorded,
1019		 * or the current phase presented on the bus.
1020		 */
1021		u_int mesg_out;
1022		u_int curphase;
1023		u_int errorphase;
1024		u_int lastphase;
1025		u_int scsirate;
1026		u_int i;
1027		u_int sstat2;
1028
1029		lastphase = ahc_inb(ahc, LASTPHASE);
1030		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1031		sstat2 = ahc_inb(ahc, SSTAT2);
1032		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1033		/*
1034		 * For all phases save DATA, the sequencer won't
1035		 * automatically ack a byte that has a parity error
1036		 * in it.  So the only way that the current phase
1037		 * could be 'data-in' is if the parity error is for
1038		 * an already acked byte in the data phase.  During
1039		 * synchronous data-in transfers, we may actually
1040		 * ack bytes before latching the current phase in
1041		 * LASTPHASE, leading to the discrepancy between
1042		 * curphase and lastphase.
1043		 */
1044		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1045		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
1046			errorphase = curphase;
1047		else
1048			errorphase = lastphase;
1049
1050		for (i = 0; i < num_phases; i++) {
1051			if (errorphase == ahc_phase_table[i].phase)
1052				break;
1053		}
1054		mesg_out = ahc_phase_table[i].mesg_out;
1055		if (scb != NULL)
1056			ahc_print_path(ahc, scb);
1057		else
1058			printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1059			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1060		scsirate = ahc_inb(ahc, SCSIRATE);
1061		printf("parity error detected %s. "
1062		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1063		       ahc_phase_table[i].phasemsg,
1064		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
1065		       scsirate);
1066
1067		if ((ahc->features & AHC_DT) != 0) {
1068
1069			if ((sstat2 & CRCVALERR) != 0)
1070				printf("\tCRC Value Mismatch\n");
1071			if ((sstat2 & CRCENDERR) != 0)
1072				printf("\tNo terminal CRC packet recevied\n");
1073			if ((sstat2 & CRCREQERR) != 0)
1074				printf("\tIllegal CRC packet request\n");
1075			if ((sstat2 & DUAL_EDGE_ERR) != 0) {
1076				printf("\tUnexpected %sDT Data Phase\n",
1077				       (scsirate & SINGLE_EDGE) ? "" : "non-");
1078				/*
1079				 * This error applies regardless of
1080				 * data direction, so ignore the value
1081				 * in the phase table.
1082				 */
1083				mesg_out = MSG_INITIATOR_DET_ERR;
1084			}
1085		}
1086
1087		/*
1088		 * We've set the hardware to assert ATN if we
1089		 * get a parity error on "in" phases, so all we
1090		 * need to do is stuff the message buffer with
1091		 * the appropriate message.  "In" phases have set
1092		 * mesg_out to something other than MSG_NOP.
1093		 */
1094		if (mesg_out != MSG_NOOP) {
1095			if (ahc->msg_type != MSG_TYPE_NONE)
1096				ahc->send_msg_perror = TRUE;
1097			else
1098				ahc_outb(ahc, MSG_OUT, mesg_out);
1099		}
1100		/*
1101		 * Force a renegotiation with this target just in
1102		 * case we are out of sync for some external reason
1103		 * unknown (or unreported) by the target.
1104		 */
1105		ahc_force_renegotiation(ahc);
1106		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1107		ahc_unpause(ahc);
1108	} else if ((status & SELTO) != 0) {
1109		u_int	scbptr;
1110
1111		/* Stop the selection */
1112		ahc_outb(ahc, SCSISEQ, 0);
1113
1114		/* No more pending messages */
1115		ahc_clear_msg_state(ahc);
1116
1117		/* Clear interrupt state */
1118		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1119		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1120
1121		/*
1122		 * Although the driver does not care about the
1123		 * 'Selection in Progress' status bit, the busy
1124		 * LED does.  SELINGO is only cleared by a sucessfull
1125		 * selection, so we must manually clear it to insure
1126		 * the LED turns off just incase no future successful
1127		 * selections occur (e.g. no devices on the bus).
1128		 */
1129		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1130
1131		scbptr = ahc_inb(ahc, WAITING_SCBH);
1132		ahc_outb(ahc, SCBPTR, scbptr);
1133		scb_index = ahc_inb(ahc, SCB_TAG);
1134
1135		scb = ahc_lookup_scb(ahc, scb_index);
1136		if (scb == NULL) {
1137			printf("%s: ahc_intr - referenced scb not "
1138			       "valid during SELTO scb(%d, %d)\n",
1139			       ahc_name(ahc), scbptr, scb_index);
1140		} else {
1141			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1142			ahc_freeze_devq(ahc, scb);
1143#ifdef AHC_DEBUG
1144			if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1145				ahc_print_path(ahc, scb);
1146				printf("Saw Selection Timeout for SCB 0x%x\n",
1147				       scb_index);
1148			}
1149#endif
1150		}
1151		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1152		/*
1153		 * Force a renegotiation with this target just in
1154		 * case the cable was pulled and will later be
1155		 * re-attached.  The target may forget its negotiation
1156		 * settings with us should it attempt to reselect
1157		 * during the interruption.  The target will not issue
1158		 * a unit attention in this case, so we must always
1159		 * renegotiate.
1160		 */
1161		ahc_force_renegotiation(ahc);
1162		ahc_restart(ahc);
1163	} else if ((status & BUSFREE) != 0
1164		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1165		u_int lastphase;
1166		u_int saved_scsiid;
1167		u_int saved_lun;
1168		u_int target;
1169		u_int initiator_role_id;
1170		char channel;
1171		int printerror;
1172
1173		/*
1174		 * Clear our selection hardware as soon as possible.
1175		 * We may have an entry in the waiting Q for this target,
1176		 * that is affected by this busfree and we don't want to
1177		 * go about selecting the target while we handle the event.
1178		 */
1179		ahc_outb(ahc, SCSISEQ,
1180			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1181
1182		/*
1183		 * Disable busfree interrupts and clear the busfree
1184		 * interrupt status.  We do this here so that several
1185		 * bus transactions occur prior to clearing the SCSIINT
1186		 * latch.  It can take a bit for the clearing to take effect.
1187		 */
1188		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1189		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1190
1191		/*
1192		 * Look at what phase we were last in.
1193		 * If its message out, chances are pretty good
1194		 * that the busfree was in response to one of
1195		 * our abort requests.
1196		 */
1197		lastphase = ahc_inb(ahc, LASTPHASE);
1198		saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1199		saved_lun = ahc_inb(ahc, SAVED_LUN);
1200		target = SCSIID_TARGET(ahc, saved_scsiid);
1201		initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1202		channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1203		printerror = 1;
1204
1205		if (lastphase == P_MESGOUT) {
1206			struct ahc_devinfo devinfo;
1207			u_int tag;
1208
1209			ahc_fetch_devinfo(ahc, &devinfo);
1210			tag = SCB_LIST_NULL;
1211			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1212			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1213				if (ahc->msgout_buf[ahc->msgout_index - 1]
1214				 == MSG_ABORT_TAG)
1215					tag = scb->hscb->tag;
1216				ahc_print_path(ahc, scb);
1217				printf("SCB %d - Abort%s Completed.\n",
1218				       scb->hscb->tag, tag == SCB_LIST_NULL ?
1219				       "" : " Tag");
1220				ahc_abort_scbs(ahc, target, channel,
1221					       saved_lun, tag,
1222					       ROLE_INITIATOR,
1223					       CAM_REQ_ABORTED);
1224				printerror = 0;
1225			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
1226						MSG_BUS_DEV_RESET, TRUE)) {
1227#ifdef __FreeBSD__
1228				/*
1229				 * Don't mark the user's request for this BDR
1230				 * as completing with CAM_BDR_SENT.  CAM3
1231				 * specifies CAM_REQ_CMP.
1232				 */
1233				if (scb != NULL
1234				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1235				 && ahc_match_scb(ahc, scb, target, channel,
1236						  CAM_LUN_WILDCARD,
1237						  SCB_LIST_NULL,
1238						  ROLE_INITIATOR)) {
1239					ahc_set_transaction_status(scb, CAM_REQ_CMP);
1240				}
1241#endif
1242				ahc_compile_devinfo(&devinfo,
1243						    initiator_role_id,
1244						    target,
1245						    CAM_LUN_WILDCARD,
1246						    channel,
1247						    ROLE_INITIATOR);
1248				ahc_handle_devreset(ahc, &devinfo,
1249						    CAM_BDR_SENT,
1250						    "Bus Device Reset",
1251						    /*verbose_level*/0);
1252				printerror = 0;
1253			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1254						MSG_EXT_PPR, FALSE)) {
1255				struct ahc_initiator_tinfo *tinfo;
1256				struct ahc_tmode_tstate *tstate;
1257
1258				/*
1259				 * PPR Rejected.  Try non-ppr negotiation
1260				 * and retry command.
1261				 */
1262				tinfo = ahc_fetch_transinfo(ahc,
1263							    devinfo.channel,
1264							    devinfo.our_scsiid,
1265							    devinfo.target,
1266							    &tstate);
1267				tinfo->curr.transport_version = 2;
1268				tinfo->goal.transport_version = 2;
1269				tinfo->goal.ppr_options = 0;
1270				ahc_qinfifo_requeue_tail(ahc, scb);
1271				printerror = 0;
1272			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1273						MSG_EXT_WDTR, FALSE)
1274				|| ahc_sent_msg(ahc, AHCMSG_EXT,
1275						MSG_EXT_SDTR, FALSE)) {
1276				/*
1277				 * Negotiation Rejected.  Go-async and
1278				 * retry command.
1279				 */
1280				ahc_set_width(ahc, &devinfo,
1281					      MSG_EXT_WDTR_BUS_8_BIT,
1282					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
1283					      /*paused*/TRUE);
1284				ahc_set_syncrate(ahc, &devinfo,
1285						/*syncrate*/NULL,
1286						/*period*/0, /*offset*/0,
1287						/*ppr_options*/0,
1288						AHC_TRANS_CUR|AHC_TRANS_GOAL,
1289						/*paused*/TRUE);
1290				ahc_qinfifo_requeue_tail(ahc, scb);
1291				printerror = 0;
1292			}
1293		}
1294		if (printerror != 0) {
1295			u_int i;
1296
1297			if (scb != NULL) {
1298				u_int tag;
1299
1300				if ((scb->hscb->control & TAG_ENB) != 0)
1301					tag = scb->hscb->tag;
1302				else
1303					tag = SCB_LIST_NULL;
1304				ahc_print_path(ahc, scb);
1305				ahc_abort_scbs(ahc, target, channel,
1306					       SCB_GET_LUN(scb), tag,
1307					       ROLE_INITIATOR,
1308					       CAM_UNEXP_BUSFREE);
1309			} else {
1310				/*
1311				 * We had not fully identified this connection,
1312				 * so we cannot abort anything.
1313				 */
1314				printf("%s: ", ahc_name(ahc));
1315			}
1316			for (i = 0; i < num_phases; i++) {
1317				if (lastphase == ahc_phase_table[i].phase)
1318					break;
1319			}
1320			/*
1321			 * Renegotiate with this device at the
1322			 * next oportunity just in case this busfree
1323			 * is due to a negotiation mismatch with the
1324			 * device.
1325			 */
1326			ahc_force_renegotiation(ahc);
1327			printf("Unexpected busfree %s\n"
1328			       "SEQADDR == 0x%x\n",
1329			       ahc_phase_table[i].phasemsg,
1330			       ahc_inb(ahc, SEQADDR0)
1331				| (ahc_inb(ahc, SEQADDR1) << 8));
1332		}
1333		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1334		ahc_restart(ahc);
1335	} else {
1336		printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1337		       ahc_name(ahc), status);
1338		ahc_outb(ahc, CLRINT, CLRSCSIINT);
1339	}
1340}
1341
1342/*
1343 * Force renegotiation to occur the next time we initiate
1344 * a command to the current device.
1345 */
1346static void
1347ahc_force_renegotiation(struct ahc_softc *ahc)
1348{
1349	struct	ahc_devinfo devinfo;
1350	struct	ahc_initiator_tinfo *targ_info;
1351	struct	ahc_tmode_tstate *tstate;
1352
1353	ahc_fetch_devinfo(ahc, &devinfo);
1354	targ_info = ahc_fetch_transinfo(ahc,
1355					devinfo.channel,
1356					devinfo.our_scsiid,
1357					devinfo.target,
1358					&tstate);
1359	ahc_update_neg_request(ahc, &devinfo, tstate,
1360			       targ_info, /*force*/TRUE);
1361}
1362
1363#define AHC_MAX_STEPS 2000
1364void
1365ahc_clear_critical_section(struct ahc_softc *ahc)
1366{
1367	int	stepping;
1368	int	steps;
1369	u_int	simode0;
1370	u_int	simode1;
1371
1372	if (ahc->num_critical_sections == 0)
1373		return;
1374
1375	stepping = FALSE;
1376	steps = 0;
1377	simode0 = 0;
1378	simode1 = 0;
1379	for (;;) {
1380		struct	cs *cs;
1381		u_int	seqaddr;
1382		u_int	i;
1383
1384		seqaddr = ahc_inb(ahc, SEQADDR0)
1385			| (ahc_inb(ahc, SEQADDR1) << 8);
1386
1387		/*
1388		 * Seqaddr represents the next instruction to execute,
1389		 * so we are really executing the instruction just
1390		 * before it.
1391		 */
1392		if (seqaddr != 0)
1393			seqaddr -= 1;
1394		cs = ahc->critical_sections;
1395		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1396
1397			if (cs->begin < seqaddr && cs->end >= seqaddr)
1398				break;
1399		}
1400
1401		if (i == ahc->num_critical_sections)
1402			break;
1403
1404		if (steps > AHC_MAX_STEPS) {
1405			printf("%s: Infinite loop in critical section\n",
1406			       ahc_name(ahc));
1407			ahc_dump_card_state(ahc);
1408			panic("critical section loop");
1409		}
1410
1411		steps++;
1412		if (stepping == FALSE) {
1413
1414			/*
1415			 * Disable all interrupt sources so that the
1416			 * sequencer will not be stuck by a pausing
1417			 * interrupt condition while we attempt to
1418			 * leave a critical section.
1419			 */
1420			simode0 = ahc_inb(ahc, SIMODE0);
1421			ahc_outb(ahc, SIMODE0, 0);
1422			simode1 = ahc_inb(ahc, SIMODE1);
1423			ahc_outb(ahc, SIMODE1, 0);
1424			ahc_outb(ahc, CLRINT, CLRSCSIINT);
1425			ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP);
1426			stepping = TRUE;
1427		}
1428		ahc_outb(ahc, HCNTRL, ahc->unpause);
1429		while (!ahc_is_paused(ahc))
1430			ahc_delay(200);
1431	}
1432	if (stepping) {
1433		ahc_outb(ahc, SIMODE0, simode0);
1434		ahc_outb(ahc, SIMODE1, simode1);
1435		ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP);
1436	}
1437}
1438
1439/*
1440 * Clear any pending interrupt status.
1441 */
1442void
1443ahc_clear_intstat(struct ahc_softc *ahc)
1444{
1445	/* Clear any interrupt conditions this may have caused */
1446	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1447				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1448				CLRREQINIT);
1449	ahc_flush_device_writes(ahc);
1450	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1451 	ahc_flush_device_writes(ahc);
1452	ahc_outb(ahc, CLRINT, CLRSCSIINT);
1453	ahc_flush_device_writes(ahc);
1454}
1455
1456/**************************** Debugging Routines ******************************/
1457#ifdef AHC_DEBUG
1458uint32_t ahc_debug = AHC_DEBUG_OPTS;
1459#endif
1460
1461void
1462ahc_print_scb(struct scb *scb)
1463{
1464	int i;
1465
1466	struct hardware_scb *hscb = scb->hscb;
1467
1468	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1469	       (void *)scb,
1470	       hscb->control,
1471	       hscb->scsiid,
1472	       hscb->lun,
1473	       hscb->cdb_len);
1474	printf("Shared Data: ");
1475	for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1476		printf("%#02x", hscb->shared_data.cdb[i]);
1477	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1478		ahc_le32toh(hscb->dataptr),
1479		ahc_le32toh(hscb->datacnt),
1480		ahc_le32toh(hscb->sgptr),
1481		hscb->tag);
1482	if (scb->sg_count > 0) {
1483		for (i = 0; i < scb->sg_count; i++) {
1484			printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1485			       i,
1486			       (ahc_le32toh(scb->sg_list[i].len) >> 24
1487			        & SG_HIGH_ADDR_BITS),
1488			       ahc_le32toh(scb->sg_list[i].addr),
1489			       ahc_le32toh(scb->sg_list[i].len));
1490		}
1491	}
1492}
1493
1494/************************* Transfer Negotiation *******************************/
1495/*
1496 * Allocate per target mode instance (ID we respond to as a target)
1497 * transfer negotiation data structures.
1498 */
1499static struct ahc_tmode_tstate *
1500ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1501{
1502	struct ahc_tmode_tstate *master_tstate;
1503	struct ahc_tmode_tstate *tstate;
1504	int i;
1505
1506	master_tstate = ahc->enabled_targets[ahc->our_id];
1507	if (channel == 'B') {
1508		scsi_id += 8;
1509		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1510	}
1511	if (ahc->enabled_targets[scsi_id] != NULL
1512	 && ahc->enabled_targets[scsi_id] != master_tstate)
1513		panic("%s: ahc_alloc_tstate - Target already allocated",
1514		      ahc_name(ahc));
1515	tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
1516						   M_DEVBUF, M_NOWAIT);
1517	if (tstate == NULL)
1518		return (NULL);
1519
1520	/*
1521	 * If we have allocated a master tstate, copy user settings from
1522	 * the master tstate (taken from SRAM or the EEPROM) for this
1523	 * channel, but reset our current and goal settings to async/narrow
1524	 * until an initiator talks to us.
1525	 */
1526	if (master_tstate != NULL) {
1527		memcpy(tstate, master_tstate, sizeof(*tstate));
1528		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1529		tstate->ultraenb = 0;
1530		for (i = 0; i < AHC_NUM_TARGETS; i++) {
1531			memset(&tstate->transinfo[i].curr, 0,
1532			      sizeof(tstate->transinfo[i].curr));
1533			memset(&tstate->transinfo[i].goal, 0,
1534			      sizeof(tstate->transinfo[i].goal));
1535		}
1536	} else
1537		memset(tstate, 0, sizeof(*tstate));
1538	ahc->enabled_targets[scsi_id] = tstate;
1539	return (tstate);
1540}
1541
1542#ifdef AHC_TARGET_MODE
1543/*
1544 * Free per target mode instance (ID we respond to as a target)
1545 * transfer negotiation data structures.
1546 */
1547static void
1548ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1549{
1550	struct ahc_tmode_tstate *tstate;
1551
1552	/*
1553	 * Don't clean up our "master" tstate.
1554	 * It has our default user settings.
1555	 */
1556	if (((channel == 'B' && scsi_id == ahc->our_id_b)
1557	  || (channel == 'A' && scsi_id == ahc->our_id))
1558	 && force == FALSE)
1559		return;
1560
1561	if (channel == 'B')
1562		scsi_id += 8;
1563	tstate = ahc->enabled_targets[scsi_id];
1564	if (tstate != NULL)
1565		free(tstate, M_DEVBUF);
1566	ahc->enabled_targets[scsi_id] = NULL;
1567}
1568#endif
1569
1570/*
1571 * Called when we have an active connection to a target on the bus,
1572 * this function finds the nearest syncrate to the input period limited
1573 * by the capabilities of the bus connectivity of and sync settings for
1574 * the target.
1575 */
1576struct ahc_syncrate *
1577ahc_devlimited_syncrate(struct ahc_softc *ahc,
1578			struct ahc_initiator_tinfo *tinfo,
1579			u_int *period, u_int *ppr_options, role_t role)
1580{
1581	struct	ahc_transinfo *transinfo;
1582	u_int	maxsync;
1583
1584	if ((ahc->features & AHC_ULTRA2) != 0) {
1585		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1586		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1587			maxsync = AHC_SYNCRATE_DT;
1588		} else {
1589			maxsync = AHC_SYNCRATE_ULTRA;
1590			/* Can't do DT on an SE bus */
1591			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1592		}
1593	} else if ((ahc->features & AHC_ULTRA) != 0) {
1594		maxsync = AHC_SYNCRATE_ULTRA;
1595	} else {
1596		maxsync = AHC_SYNCRATE_FAST;
1597	}
1598	/*
1599	 * Never allow a value higher than our current goal
1600	 * period otherwise we may allow a target initiated
1601	 * negotiation to go above the limit as set by the
1602	 * user.  In the case of an initiator initiated
1603	 * sync negotiation, we limit based on the user
1604	 * setting.  This allows the system to still accept
1605	 * incoming negotiations even if target initiated
1606	 * negotiation is not performed.
1607	 */
1608	if (role == ROLE_TARGET)
1609		transinfo = &tinfo->user;
1610	else
1611		transinfo = &tinfo->goal;
1612	*ppr_options &= transinfo->ppr_options;
1613	if (transinfo->period == 0) {
1614		*period = 0;
1615		*ppr_options = 0;
1616		return (NULL);
1617	}
1618	*period = MAX(*period, transinfo->period);
1619	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1620}
1621
1622/*
1623 * Look up the valid period to SCSIRATE conversion in our table.
1624 * Return the period and offset that should be sent to the target
1625 * if this was the beginning of an SDTR.
1626 */
1627struct ahc_syncrate *
1628ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1629		  u_int *ppr_options, u_int maxsync)
1630{
1631	struct ahc_syncrate *syncrate;
1632
1633	if ((ahc->features & AHC_DT) == 0)
1634		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1635
1636	/* Skip all DT only entries if DT is not available */
1637	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1638	 && maxsync < AHC_SYNCRATE_ULTRA2)
1639		maxsync = AHC_SYNCRATE_ULTRA2;
1640
1641	for (syncrate = &ahc_syncrates[maxsync];
1642	     syncrate->rate != NULL;
1643	     syncrate++) {
1644
1645		/*
1646		 * The Ultra2 table doesn't go as low
1647		 * as for the Fast/Ultra cards.
1648		 */
1649		if ((ahc->features & AHC_ULTRA2) != 0
1650		 && (syncrate->sxfr_u2 == 0))
1651			break;
1652
1653		if (*period <= syncrate->period) {
1654			/*
1655			 * When responding to a target that requests
1656			 * sync, the requested rate may fall between
1657			 * two rates that we can output, but still be
1658			 * a rate that we can receive.  Because of this,
1659			 * we want to respond to the target with
1660			 * the same rate that it sent to us even
1661			 * if the period we use to send data to it
1662			 * is lower.  Only lower the response period
1663			 * if we must.
1664			 */
1665			if (syncrate == &ahc_syncrates[maxsync])
1666				*period = syncrate->period;
1667
1668			/*
1669			 * At some speeds, we only support
1670			 * ST transfers.
1671			 */
1672		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1673				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1674			break;
1675		}
1676	}
1677
1678	if ((*period == 0)
1679	 || (syncrate->rate == NULL)
1680	 || ((ahc->features & AHC_ULTRA2) != 0
1681	  && (syncrate->sxfr_u2 == 0))) {
1682		/* Use asynchronous transfers. */
1683		*period = 0;
1684		syncrate = NULL;
1685		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1686	}
1687	return (syncrate);
1688}
1689
1690/*
1691 * Convert from an entry in our syncrate table to the SCSI equivalent
1692 * sync "period" factor.
1693 */
1694u_int
1695ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1696{
1697	struct ahc_syncrate *syncrate;
1698
1699	if ((ahc->features & AHC_ULTRA2) != 0)
1700		scsirate &= SXFR_ULTRA2;
1701	else
1702		scsirate &= SXFR;
1703
1704	syncrate = &ahc_syncrates[maxsync];
1705	while (syncrate->rate != NULL) {
1706
1707		if ((ahc->features & AHC_ULTRA2) != 0) {
1708			if (syncrate->sxfr_u2 == 0)
1709				break;
1710			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1711				return (syncrate->period);
1712		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1713				return (syncrate->period);
1714		}
1715		syncrate++;
1716	}
1717	return (0); /* async */
1718}
1719
1720/*
1721 * Truncate the given synchronous offset to a value the
1722 * current adapter type and syncrate are capable of.
1723 */
1724void
1725ahc_validate_offset(struct ahc_softc *ahc,
1726		    struct ahc_initiator_tinfo *tinfo,
1727		    struct ahc_syncrate *syncrate,
1728		    u_int *offset, int wide, role_t role)
1729{
1730	u_int maxoffset;
1731
1732	/* Limit offset to what we can do */
1733	if (syncrate == NULL) {
1734		maxoffset = 0;
1735	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1736		maxoffset = MAX_OFFSET_ULTRA2;
1737	} else {
1738		if (wide)
1739			maxoffset = MAX_OFFSET_16BIT;
1740		else
1741			maxoffset = MAX_OFFSET_8BIT;
1742	}
1743	*offset = MIN(*offset, maxoffset);
1744	if (tinfo != NULL) {
1745		if (role == ROLE_TARGET)
1746			*offset = MIN(*offset, tinfo->user.offset);
1747		else
1748			*offset = MIN(*offset, tinfo->goal.offset);
1749	}
1750}
1751
1752/*
1753 * Truncate the given transfer width parameter to a value the
1754 * current adapter type is capable of.
1755 */
1756void
1757ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1758		   u_int *bus_width, role_t role)
1759{
1760	switch (*bus_width) {
1761	default:
1762		if (ahc->features & AHC_WIDE) {
1763			/* Respond Wide */
1764			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1765			break;
1766		}
1767		/* FALLTHROUGH */
1768	case MSG_EXT_WDTR_BUS_8_BIT:
1769		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1770		break;
1771	}
1772	if (tinfo != NULL) {
1773		if (role == ROLE_TARGET)
1774			*bus_width = MIN(tinfo->user.width, *bus_width);
1775		else
1776			*bus_width = MIN(tinfo->goal.width, *bus_width);
1777	}
1778}
1779
1780/*
1781 * Update the bitmask of targets for which the controller should
1782 * negotiate with at the next convenient oportunity.  This currently
1783 * means the next time we send the initial identify messages for
1784 * a new transaction.
1785 */
1786int
1787ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1788		       struct ahc_tmode_tstate *tstate,
1789		       struct ahc_initiator_tinfo *tinfo, int force)
1790{
1791	u_int auto_negotiate_orig;
1792
1793	auto_negotiate_orig = tstate->auto_negotiate;
1794	if (tinfo->curr.period != tinfo->goal.period
1795	 || tinfo->curr.width != tinfo->goal.width
1796	 || tinfo->curr.offset != tinfo->goal.offset
1797	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
1798	 || (force
1799	  && (tinfo->goal.offset != 0
1800	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1801	   || tinfo->goal.ppr_options != 0)))
1802		tstate->auto_negotiate |= devinfo->target_mask;
1803	else
1804		tstate->auto_negotiate &= ~devinfo->target_mask;
1805
1806	return (auto_negotiate_orig != tstate->auto_negotiate);
1807}
1808
1809/*
1810 * Update the user/goal/curr tables of synchronous negotiation
1811 * parameters as well as, in the case of a current or active update,
1812 * any data structures on the host controller.  In the case of an
1813 * active update, the specified target is currently talking to us on
1814 * the bus, so the transfer parameter update must take effect
1815 * immediately.
1816 */
1817void
1818ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1819		 struct ahc_syncrate *syncrate, u_int period,
1820		 u_int offset, u_int ppr_options, u_int type, int paused)
1821{
1822	struct	ahc_initiator_tinfo *tinfo;
1823	struct	ahc_tmode_tstate *tstate;
1824	u_int	old_period;
1825	u_int	old_offset;
1826	u_int	old_ppr;
1827	int	active;
1828	int	update_needed;
1829
1830	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1831	update_needed = 0;
1832
1833	if (syncrate == NULL) {
1834		period = 0;
1835		offset = 0;
1836	}
1837
1838	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1839				    devinfo->target, &tstate);
1840
1841	if ((type & AHC_TRANS_USER) != 0) {
1842		tinfo->user.period = period;
1843		tinfo->user.offset = offset;
1844		tinfo->user.ppr_options = ppr_options;
1845	}
1846
1847	if ((type & AHC_TRANS_GOAL) != 0) {
1848		tinfo->goal.period = period;
1849		tinfo->goal.offset = offset;
1850		tinfo->goal.ppr_options = ppr_options;
1851	}
1852
1853	old_period = tinfo->curr.period;
1854	old_offset = tinfo->curr.offset;
1855	old_ppr	   = tinfo->curr.ppr_options;
1856
1857	if ((type & AHC_TRANS_CUR) != 0
1858	 && (old_period != period
1859	  || old_offset != offset
1860	  || old_ppr != ppr_options)) {
1861		u_int	scsirate;
1862
1863		update_needed++;
1864		scsirate = tinfo->scsirate;
1865		if ((ahc->features & AHC_ULTRA2) != 0) {
1866
1867			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1868			if (syncrate != NULL) {
1869				scsirate |= syncrate->sxfr_u2;
1870				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1871					scsirate |= ENABLE_CRC;
1872				else
1873					scsirate |= SINGLE_EDGE;
1874			}
1875		} else {
1876
1877			scsirate &= ~(SXFR|SOFS);
1878			/*
1879			 * Ensure Ultra mode is set properly for
1880			 * this target.
1881			 */
1882			tstate->ultraenb &= ~devinfo->target_mask;
1883			if (syncrate != NULL) {
1884				if (syncrate->sxfr & ULTRA_SXFR) {
1885					tstate->ultraenb |=
1886						devinfo->target_mask;
1887				}
1888				scsirate |= syncrate->sxfr & SXFR;
1889				scsirate |= offset & SOFS;
1890			}
1891			if (active) {
1892				u_int sxfrctl0;
1893
1894				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1895				sxfrctl0 &= ~FAST20;
1896				if (tstate->ultraenb & devinfo->target_mask)
1897					sxfrctl0 |= FAST20;
1898				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1899			}
1900		}
1901		if (active) {
1902			ahc_outb(ahc, SCSIRATE, scsirate);
1903			if ((ahc->features & AHC_ULTRA2) != 0)
1904				ahc_outb(ahc, SCSIOFFSET, offset);
1905		}
1906
1907		tinfo->scsirate = scsirate;
1908		tinfo->curr.period = period;
1909		tinfo->curr.offset = offset;
1910		tinfo->curr.ppr_options = ppr_options;
1911
1912		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1913			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1914		if (bootverbose) {
1915			if (offset != 0) {
1916				printf("%s: target %d synchronous at %sMHz%s, "
1917				       "offset = 0x%x\n", ahc_name(ahc),
1918				       devinfo->target, syncrate->rate,
1919				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1920				       ? " DT" : "", offset);
1921			} else {
1922				printf("%s: target %d using "
1923				       "asynchronous transfers\n",
1924				       ahc_name(ahc), devinfo->target);
1925			}
1926		}
1927	}
1928
1929	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1930						tinfo, /*force*/FALSE);
1931
1932	if (update_needed)
1933		ahc_update_pending_scbs(ahc);
1934}
1935
1936/*
1937 * Update the user/goal/curr tables of wide negotiation
1938 * parameters as well as, in the case of a current or active update,
1939 * any data structures on the host controller.  In the case of an
1940 * active update, the specified target is currently talking to us on
1941 * the bus, so the transfer parameter update must take effect
1942 * immediately.
1943 */
1944void
1945ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1946	      u_int width, u_int type, int paused)
1947{
1948	struct	ahc_initiator_tinfo *tinfo;
1949	struct	ahc_tmode_tstate *tstate;
1950	u_int	oldwidth;
1951	int	active;
1952	int	update_needed;
1953
1954	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1955	update_needed = 0;
1956	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1957				    devinfo->target, &tstate);
1958
1959	if ((type & AHC_TRANS_USER) != 0)
1960		tinfo->user.width = width;
1961
1962	if ((type & AHC_TRANS_GOAL) != 0)
1963		tinfo->goal.width = width;
1964
1965	oldwidth = tinfo->curr.width;
1966	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1967		u_int	scsirate;
1968
1969		update_needed++;
1970		scsirate =  tinfo->scsirate;
1971		scsirate &= ~WIDEXFER;
1972		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1973			scsirate |= WIDEXFER;
1974
1975		tinfo->scsirate = scsirate;
1976
1977		if (active)
1978			ahc_outb(ahc, SCSIRATE, scsirate);
1979
1980		tinfo->curr.width = width;
1981
1982		ahc_send_async(ahc, devinfo->channel, devinfo->target,
1983			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
1984		if (bootverbose) {
1985			printf("%s: target %d using %dbit transfers\n",
1986			       ahc_name(ahc), devinfo->target,
1987			       8 * (0x01 << width));
1988		}
1989	}
1990
1991	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
1992						tinfo, /*force*/FALSE);
1993	if (update_needed)
1994		ahc_update_pending_scbs(ahc);
1995}
1996
1997/*
1998 * Update the current state of tagged queuing for a given target.
1999 */
2000void
2001ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2002	     ahc_queue_alg alg)
2003{
2004 	ahc_platform_set_tags(ahc, devinfo, alg);
2005 	ahc_send_async(ahc, devinfo->channel, devinfo->target,
2006 		       devinfo->lun, AC_TRANSFER_NEG, &alg);
2007}
2008
2009/*
2010 * When the transfer settings for a connection change, update any
2011 * in-transit SCBs to contain the new data so the hardware will
2012 * be set correctly during future (re)selections.
2013 */
2014static void
2015ahc_update_pending_scbs(struct ahc_softc *ahc)
2016{
2017	struct	scb *pending_scb;
2018	int	pending_scb_count;
2019	int	i;
2020	int	paused;
2021	u_int	saved_scbptr;
2022
2023	/*
2024	 * Traverse the pending SCB list and ensure that all of the
2025	 * SCBs there have the proper settings.
2026	 */
2027	pending_scb_count = 0;
2028	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2029		struct ahc_devinfo devinfo;
2030		struct hardware_scb *pending_hscb;
2031		struct ahc_initiator_tinfo *tinfo;
2032		struct ahc_tmode_tstate *tstate;
2033
2034		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
2035		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
2036					    devinfo.our_scsiid,
2037					    devinfo.target, &tstate);
2038		pending_hscb = pending_scb->hscb;
2039		pending_hscb->control &= ~ULTRAENB;
2040		if ((tstate->ultraenb & devinfo.target_mask) != 0)
2041			pending_hscb->control |= ULTRAENB;
2042		pending_hscb->scsirate = tinfo->scsirate;
2043		pending_hscb->scsioffset = tinfo->curr.offset;
2044		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
2045		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
2046			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
2047			pending_hscb->control &= ~MK_MESSAGE;
2048		}
2049		ahc_sync_scb(ahc, pending_scb,
2050			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2051		pending_scb_count++;
2052	}
2053
2054	if (pending_scb_count == 0)
2055		return;
2056
2057	if (ahc_is_paused(ahc)) {
2058		paused = 1;
2059	} else {
2060		paused = 0;
2061		ahc_pause(ahc);
2062	}
2063
2064	saved_scbptr = ahc_inb(ahc, SCBPTR);
2065	/* Ensure that the hscbs down on the card match the new information */
2066	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
2067		struct	hardware_scb *pending_hscb;
2068		u_int	control;
2069		u_int	scb_tag;
2070
2071		ahc_outb(ahc, SCBPTR, i);
2072		scb_tag = ahc_inb(ahc, SCB_TAG);
2073		pending_scb = ahc_lookup_scb(ahc, scb_tag);
2074		if (pending_scb == NULL)
2075			continue;
2076
2077		pending_hscb = pending_scb->hscb;
2078		control = ahc_inb(ahc, SCB_CONTROL);
2079		control &= ~(ULTRAENB|MK_MESSAGE);
2080		control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
2081		ahc_outb(ahc, SCB_CONTROL, control);
2082		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
2083		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
2084	}
2085	ahc_outb(ahc, SCBPTR, saved_scbptr);
2086
2087	if (paused == 0)
2088		ahc_unpause(ahc);
2089}
2090
2091/**************************** Pathing Information *****************************/
2092static void
2093ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2094{
2095	u_int	saved_scsiid;
2096	role_t	role;
2097	int	our_id;
2098
2099	if (ahc_inb(ahc, SSTAT0) & TARGET)
2100		role = ROLE_TARGET;
2101	else
2102		role = ROLE_INITIATOR;
2103
2104	if (role == ROLE_TARGET
2105	 && (ahc->features & AHC_MULTI_TID) != 0
2106	 && (ahc_inb(ahc, SEQ_FLAGS)
2107 	   & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
2108		/* We were selected, so pull our id from TARGIDIN */
2109		our_id = ahc_inb(ahc, TARGIDIN) & OID;
2110	} else if ((ahc->features & AHC_ULTRA2) != 0)
2111		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2112	else
2113		our_id = ahc_inb(ahc, SCSIID) & OID;
2114
2115	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2116	ahc_compile_devinfo(devinfo,
2117			    our_id,
2118			    SCSIID_TARGET(ahc, saved_scsiid),
2119			    ahc_inb(ahc, SAVED_LUN),
2120			    SCSIID_CHANNEL(ahc, saved_scsiid),
2121			    role);
2122}
2123
2124struct ahc_phase_table_entry*
2125ahc_lookup_phase_entry(int phase)
2126{
2127	struct ahc_phase_table_entry *entry;
2128	struct ahc_phase_table_entry *last_entry;
2129
2130	/*
2131	 * num_phases doesn't include the default entry which
2132	 * will be returned if the phase doesn't match.
2133	 */
2134	last_entry = &ahc_phase_table[num_phases];
2135	for (entry = ahc_phase_table; entry < last_entry; entry++) {
2136		if (phase == entry->phase)
2137			break;
2138	}
2139	return (entry);
2140}
2141
2142void
2143ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2144		    u_int lun, char channel, role_t role)
2145{
2146	devinfo->our_scsiid = our_id;
2147	devinfo->target = target;
2148	devinfo->lun = lun;
2149	devinfo->target_offset = target;
2150	devinfo->channel = channel;
2151	devinfo->role = role;
2152	if (channel == 'B')
2153		devinfo->target_offset += 8;
2154	devinfo->target_mask = (0x01 << devinfo->target_offset);
2155}
2156
2157static void
2158ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2159{
2160	printf("%s:%c:%d:%d:", ahc_name(ahc), devinfo->channel,
2161	       devinfo->target, devinfo->lun);
2162}
2163
2164static void
2165ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2166		struct scb *scb)
2167{
2168	role_t	role;
2169	int	our_id;
2170
2171	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2172	role = ROLE_INITIATOR;
2173	if ((scb->hscb->control & TARGET_SCB) != 0)
2174		role = ROLE_TARGET;
2175	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2176			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2177}
2178
2179
2180/************************ Message Phase Processing ****************************/
2181static void
2182ahc_assert_atn(struct ahc_softc *ahc)
2183{
2184	u_int scsisigo;
2185
2186	scsisigo = ATNO;
2187	if ((ahc->features & AHC_DT) == 0)
2188		scsisigo |= ahc_inb(ahc, SCSISIGI);
2189	ahc_outb(ahc, SCSISIGO, scsisigo);
2190}
2191
2192/*
2193 * When an initiator transaction with the MK_MESSAGE flag either reconnects
2194 * or enters the initial message out phase, we are interrupted.  Fill our
2195 * outgoing message buffer with the appropriate message and beging handing
2196 * the message phase(s) manually.
2197 */
2198static void
2199ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2200			   struct scb *scb)
2201{
2202	/*
2203	 * To facilitate adding multiple messages together,
2204	 * each routine should increment the index and len
2205	 * variables instead of setting them explicitly.
2206	 */
2207	ahc->msgout_index = 0;
2208	ahc->msgout_len = 0;
2209
2210	if ((scb->flags & SCB_DEVICE_RESET) == 0
2211	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2212		u_int identify_msg;
2213
2214		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2215		if ((scb->hscb->control & DISCENB) != 0)
2216			identify_msg |= MSG_IDENTIFY_DISCFLAG;
2217		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2218		ahc->msgout_len++;
2219
2220		if ((scb->hscb->control & TAG_ENB) != 0) {
2221			ahc->msgout_buf[ahc->msgout_index++] =
2222			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2223			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2224			ahc->msgout_len += 2;
2225		}
2226	}
2227
2228	if (scb->flags & SCB_DEVICE_RESET) {
2229		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2230		ahc->msgout_len++;
2231		ahc_print_path(ahc, scb);
2232		printf("Bus Device Reset Message Sent\n");
2233		/*
2234		 * Clear our selection hardware in advance of
2235		 * the busfree.  We may have an entry in the waiting
2236		 * Q for this target, and we don't want to go about
2237		 * selecting while we handle the busfree and blow it
2238		 * away.
2239		 */
2240		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2241	} else if ((scb->flags & SCB_ABORT) != 0) {
2242		if ((scb->hscb->control & TAG_ENB) != 0)
2243			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2244		else
2245			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2246		ahc->msgout_len++;
2247		ahc_print_path(ahc, scb);
2248		printf("Abort%s Message Sent\n",
2249		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2250		/*
2251		 * Clear our selection hardware in advance of
2252		 * the busfree.  We may have an entry in the waiting
2253		 * Q for this target, and we don't want to go about
2254		 * selecting while we handle the busfree and blow it
2255		 * away.
2256		 */
2257		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2258	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2259		ahc_build_transfer_msg(ahc, devinfo);
2260	} else {
2261		printf("ahc_intr: AWAITING_MSG for an SCB that "
2262		       "does not have a waiting message\n");
2263		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2264		       devinfo->target_mask);
2265		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2266		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2267		      ahc_inb(ahc, MSG_OUT), scb->flags);
2268	}
2269
2270	/*
2271	 * Clear the MK_MESSAGE flag from the SCB so we aren't
2272	 * asked to send this message again.
2273	 */
2274	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2275	scb->hscb->control &= ~MK_MESSAGE;
2276	ahc->msgout_index = 0;
2277	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2278}
2279
2280/*
2281 * Build an appropriate transfer negotiation message for the
2282 * currently active target.
2283 */
2284static void
2285ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2286{
2287	/*
2288	 * We need to initiate transfer negotiations.
2289	 * If our current and goal settings are identical,
2290	 * we want to renegotiate due to a check condition.
2291	 */
2292	struct	ahc_initiator_tinfo *tinfo;
2293	struct	ahc_tmode_tstate *tstate;
2294	struct	ahc_syncrate *rate;
2295	int	dowide;
2296	int	dosync;
2297	int	doppr;
2298	int	use_ppr;
2299	u_int	period;
2300	u_int	ppr_options;
2301	u_int	offset;
2302
2303	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2304				    devinfo->target, &tstate);
2305	/*
2306	 * Filter our period based on the current connection.
2307	 * If we can't perform DT transfers on this segment (not in LVD
2308	 * mode for instance), then our decision to issue a PPR message
2309	 * may change.
2310	 */
2311	period = tinfo->goal.period;
2312	ppr_options = tinfo->goal.ppr_options;
2313	/* Target initiated PPR is not allowed in the SCSI spec */
2314	if (devinfo->role == ROLE_TARGET)
2315		ppr_options = 0;
2316	rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2317				       &ppr_options, devinfo->role);
2318	dowide = tinfo->curr.width != tinfo->goal.width;
2319	dosync = tinfo->curr.period != period;
2320	doppr = tinfo->curr.ppr_options != ppr_options;
2321
2322	if (!dowide && !dosync && !doppr) {
2323		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2324		dosync = tinfo->goal.offset != 0;
2325		doppr = tinfo->goal.ppr_options != 0;
2326	}
2327
2328	if (!dowide && !dosync && !doppr) {
2329		panic("ahc_intr: AWAITING_MSG for negotiation, "
2330		      "but no negotiation needed\n");
2331	}
2332
2333	use_ppr = (tinfo->curr.transport_version >= 3) || doppr;
2334	/* Target initiated PPR is not allowed in the SCSI spec */
2335	if (devinfo->role == ROLE_TARGET)
2336		use_ppr = 0;
2337
2338	/*
2339	 * Both the PPR message and SDTR message require the
2340	 * goal syncrate to be limited to what the target device
2341	 * is capable of handling (based on whether an LVD->SE
2342	 * expander is on the bus), so combine these two cases.
2343	 * Regardless, guarantee that if we are using WDTR and SDTR
2344	 * messages that WDTR comes first.
2345	 */
2346	if (use_ppr || (dosync && !dowide)) {
2347
2348		offset = tinfo->goal.offset;
2349		ahc_validate_offset(ahc, tinfo, rate, &offset,
2350				    use_ppr ? tinfo->goal.width
2351					    : tinfo->curr.width,
2352				    devinfo->role);
2353		if (use_ppr) {
2354			ahc_construct_ppr(ahc, devinfo, period, offset,
2355					  tinfo->goal.width, ppr_options);
2356		} else {
2357			ahc_construct_sdtr(ahc, devinfo, period, offset);
2358		}
2359	} else {
2360		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2361	}
2362}
2363
2364/*
2365 * Build a synchronous negotiation message in our message
2366 * buffer based on the input parameters.
2367 */
2368static void
2369ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2370		   u_int period, u_int offset)
2371{
2372	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2373	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
2374	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
2375	ahc->msgout_buf[ahc->msgout_index++] = period;
2376	ahc->msgout_buf[ahc->msgout_index++] = offset;
2377	ahc->msgout_len += 5;
2378	if (bootverbose) {
2379		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2380		       ahc_name(ahc), devinfo->channel, devinfo->target,
2381		       devinfo->lun, period, offset);
2382	}
2383}
2384
2385/*
2386 * Build a wide negotiation message in our message
2387 * buffer based on the input parameters.
2388 */
2389static void
2390ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2391		   u_int bus_width)
2392{
2393	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2394	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
2395	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
2396	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2397	ahc->msgout_len += 4;
2398	if (bootverbose) {
2399		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2400		       ahc_name(ahc), devinfo->channel, devinfo->target,
2401		       devinfo->lun, bus_width);
2402	}
2403}
2404
2405/*
2406 * Build a parallel protocol request message in our message
2407 * buffer based on the input parameters.
2408 */
2409static void
2410ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2411		  u_int period, u_int offset, u_int bus_width,
2412		  u_int ppr_options)
2413{
2414	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
2415	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
2416	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
2417	ahc->msgout_buf[ahc->msgout_index++] = period;
2418	ahc->msgout_buf[ahc->msgout_index++] = 0;
2419	ahc->msgout_buf[ahc->msgout_index++] = offset;
2420	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
2421	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
2422	ahc->msgout_len += 8;
2423	if (bootverbose) {
2424		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2425		       "offset %x, ppr_options %x\n", ahc_name(ahc),
2426		       devinfo->channel, devinfo->target, devinfo->lun,
2427		       bus_width, period, offset, ppr_options);
2428	}
2429}
2430
2431/*
2432 * Clear any active message state.
2433 */
2434static void
2435ahc_clear_msg_state(struct ahc_softc *ahc)
2436{
2437	ahc->msgout_len = 0;
2438	ahc->msgin_index = 0;
2439	ahc->msg_type = MSG_TYPE_NONE;
2440	if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2441		/*
2442		 * The target didn't care to respond to our
2443		 * message request, so clear ATN.
2444		 */
2445		ahc_outb(ahc, CLRSINT1, CLRATNO);
2446	}
2447	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2448	ahc_outb(ahc, SEQ_FLAGS2,
2449		 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
2450}
2451
2452/*
2453 * Manual message loop handler.
2454 */
2455static void
2456ahc_handle_message_phase(struct ahc_softc *ahc)
2457{
2458	struct	ahc_devinfo devinfo;
2459	u_int	bus_phase;
2460	int	end_session;
2461
2462	ahc_fetch_devinfo(ahc, &devinfo);
2463	end_session = FALSE;
2464	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2465
2466reswitch:
2467	switch (ahc->msg_type) {
2468	case MSG_TYPE_INITIATOR_MSGOUT:
2469	{
2470		int lastbyte;
2471		int phasemis;
2472		int msgdone;
2473
2474		if (ahc->msgout_len == 0)
2475			panic("HOST_MSG_LOOP interrupt with no active message");
2476
2477#ifdef AHC_DEBUG
2478		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2479			ahc_print_devinfo(ahc, &devinfo);
2480			printf("INITIATOR_MSG_OUT");
2481#endif
2482		phasemis = bus_phase != P_MESGOUT;
2483		if (phasemis) {
2484#ifdef AHC_DEBUG
2485			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2486				printf(" PHASEMIS %s\n",
2487				       ahc_lookup_phase_entry(bus_phase)
2488							     ->phasemsg);
2489			}
2490#endif
2491			if (bus_phase == P_MESGIN) {
2492				/*
2493				 * Change gears and see if
2494				 * this messages is of interest to
2495				 * us or should be passed back to
2496				 * the sequencer.
2497				 */
2498				ahc_outb(ahc, CLRSINT1, CLRATNO);
2499				ahc->send_msg_perror = FALSE;
2500				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2501				ahc->msgin_index = 0;
2502				goto reswitch;
2503			}
2504			end_session = TRUE;
2505			break;
2506		}
2507
2508		if (ahc->send_msg_perror) {
2509			ahc_outb(ahc, CLRSINT1, CLRATNO);
2510			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2511#ifdef AHC_DEBUG
2512			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2513				printf(" byte 0x%x\n", ahc->send_msg_perror);
2514#endif
2515			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2516			break;
2517		}
2518
2519		msgdone	= ahc->msgout_index == ahc->msgout_len;
2520		if (msgdone) {
2521			/*
2522			 * The target has requested a retry.
2523			 * Re-assert ATN, reset our message index to
2524			 * 0, and try again.
2525			 */
2526			ahc->msgout_index = 0;
2527			ahc_assert_atn(ahc);
2528		}
2529
2530		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2531		if (lastbyte) {
2532			/* Last byte is signified by dropping ATN */
2533			ahc_outb(ahc, CLRSINT1, CLRATNO);
2534		}
2535
2536		/*
2537		 * Clear our interrupt status and present
2538		 * the next byte on the bus.
2539		 */
2540		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2541#ifdef AHC_DEBUG
2542		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2543			printf(" byte 0x%x\n",
2544			       ahc->msgout_buf[ahc->msgout_index]);
2545#endif
2546		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2547		break;
2548	}
2549	case MSG_TYPE_INITIATOR_MSGIN:
2550	{
2551		int phasemis;
2552		int message_done;
2553
2554#ifdef AHC_DEBUG
2555		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2556			ahc_print_devinfo(ahc, &devinfo);
2557			printf("INITIATOR_MSG_IN");
2558		}
2559#endif
2560		phasemis = bus_phase != P_MESGIN;
2561		if (phasemis) {
2562#ifdef AHC_DEBUG
2563			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2564				printf(" PHASEMIS %s\n",
2565				       ahc_lookup_phase_entry(bus_phase)
2566							     ->phasemsg);
2567			}
2568#endif
2569			ahc->msgin_index = 0;
2570			if (bus_phase == P_MESGOUT
2571			 && (ahc->send_msg_perror == TRUE
2572			  || (ahc->msgout_len != 0
2573			   && ahc->msgout_index == 0))) {
2574				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2575				goto reswitch;
2576			}
2577			end_session = TRUE;
2578			break;
2579		}
2580
2581		/* Pull the byte in without acking it */
2582		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2583#ifdef AHC_DEBUG
2584		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2585			printf(" byte 0x%x\n",
2586			       ahc->msgin_buf[ahc->msgin_index]);
2587#endif
2588
2589		message_done = ahc_parse_msg(ahc, &devinfo);
2590
2591		if (message_done) {
2592			/*
2593			 * Clear our incoming message buffer in case there
2594			 * is another message following this one.
2595			 */
2596			ahc->msgin_index = 0;
2597
2598			/*
2599			 * If this message illicited a response,
2600			 * assert ATN so the target takes us to the
2601			 * message out phase.
2602			 */
2603			if (ahc->msgout_len != 0)
2604				ahc_assert_atn(ahc);
2605		} else
2606			ahc->msgin_index++;
2607
2608		if (message_done == MSGLOOP_TERMINATED) {
2609			end_session = TRUE;
2610		} else {
2611			/* Ack the byte */
2612			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2613			ahc_inb(ahc, SCSIDATL);
2614		}
2615		break;
2616	}
2617	case MSG_TYPE_TARGET_MSGIN:
2618	{
2619		int msgdone;
2620		int msgout_request;
2621
2622		if (ahc->msgout_len == 0)
2623			panic("Target MSGIN with no active message");
2624
2625		/*
2626		 * If we interrupted a mesgout session, the initiator
2627		 * will not know this until our first REQ.  So, we
2628		 * only honor mesgout requests after we've sent our
2629		 * first byte.
2630		 */
2631		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2632		 && ahc->msgout_index > 0)
2633			msgout_request = TRUE;
2634		else
2635			msgout_request = FALSE;
2636
2637		if (msgout_request) {
2638
2639			/*
2640			 * Change gears and see if
2641			 * this messages is of interest to
2642			 * us or should be passed back to
2643			 * the sequencer.
2644			 */
2645			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2646			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2647			ahc->msgin_index = 0;
2648			/* Dummy read to REQ for first byte */
2649			ahc_inb(ahc, SCSIDATL);
2650			ahc_outb(ahc, SXFRCTL0,
2651				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2652			break;
2653		}
2654
2655		msgdone = ahc->msgout_index == ahc->msgout_len;
2656		if (msgdone) {
2657			ahc_outb(ahc, SXFRCTL0,
2658				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2659			end_session = TRUE;
2660			break;
2661		}
2662
2663		/*
2664		 * Present the next byte on the bus.
2665		 */
2666		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2667		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2668		break;
2669	}
2670	case MSG_TYPE_TARGET_MSGOUT:
2671	{
2672		int lastbyte;
2673		int msgdone;
2674
2675		/*
2676		 * The initiator signals that this is
2677		 * the last byte by dropping ATN.
2678		 */
2679		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2680
2681		/*
2682		 * Read the latched byte, but turn off SPIOEN first
2683		 * so that we don't inadvertently cause a REQ for the
2684		 * next byte.
2685		 */
2686		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2687		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2688		msgdone = ahc_parse_msg(ahc, &devinfo);
2689		if (msgdone == MSGLOOP_TERMINATED) {
2690			/*
2691			 * The message is *really* done in that it caused
2692			 * us to go to bus free.  The sequencer has already
2693			 * been reset at this point, so pull the ejection
2694			 * handle.
2695			 */
2696			return;
2697		}
2698
2699		ahc->msgin_index++;
2700
2701		/*
2702		 * XXX Read spec about initiator dropping ATN too soon
2703		 *     and use msgdone to detect it.
2704		 */
2705		if (msgdone == MSGLOOP_MSGCOMPLETE) {
2706			ahc->msgin_index = 0;
2707
2708			/*
2709			 * If this message illicited a response, transition
2710			 * to the Message in phase and send it.
2711			 */
2712			if (ahc->msgout_len != 0) {
2713				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2714				ahc_outb(ahc, SXFRCTL0,
2715					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2716				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2717				ahc->msgin_index = 0;
2718				break;
2719			}
2720		}
2721
2722		if (lastbyte)
2723			end_session = TRUE;
2724		else {
2725			/* Ask for the next byte. */
2726			ahc_outb(ahc, SXFRCTL0,
2727				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2728		}
2729
2730		break;
2731	}
2732	default:
2733		panic("Unknown REQINIT message type");
2734	}
2735
2736	if (end_session) {
2737		ahc_clear_msg_state(ahc);
2738		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2739	} else
2740		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2741}
2742
2743/*
2744 * See if we sent a particular extended message to the target.
2745 * If "full" is true, return true only if the target saw the full
2746 * message.  If "full" is false, return true if the target saw at
2747 * least the first byte of the message.
2748 */
2749static int
2750ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2751{
2752	int found;
2753	u_int index;
2754
2755	found = FALSE;
2756	index = 0;
2757
2758	while (index < ahc->msgout_len) {
2759		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2760			u_int end_index;
2761
2762			end_index = index + 1 + ahc->msgout_buf[index + 1];
2763			if (ahc->msgout_buf[index+2] == msgval
2764			 && type == AHCMSG_EXT) {
2765
2766				if (full) {
2767					if (ahc->msgout_index > end_index)
2768						found = TRUE;
2769				} else if (ahc->msgout_index > index)
2770					found = TRUE;
2771			}
2772			index = end_index;
2773		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
2774			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2775
2776			/* Skip tag type and tag id or residue param*/
2777			index += 2;
2778		} else {
2779			/* Single byte message */
2780			if (type == AHCMSG_1B
2781			 && ahc->msgout_buf[index] == msgval
2782			 && ahc->msgout_index > index)
2783				found = TRUE;
2784			index++;
2785		}
2786
2787		if (found)
2788			break;
2789	}
2790	return (found);
2791}
2792
2793/*
2794 * Wait for a complete incoming message, parse it, and respond accordingly.
2795 */
2796static int
2797ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2798{
2799	struct	ahc_initiator_tinfo *tinfo;
2800	struct	ahc_tmode_tstate *tstate;
2801	int	reject;
2802	int	done;
2803	int	response;
2804	u_int	targ_scsirate;
2805
2806	done = MSGLOOP_IN_PROG;
2807	response = FALSE;
2808	reject = FALSE;
2809	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2810				    devinfo->target, &tstate);
2811	targ_scsirate = tinfo->scsirate;
2812
2813	/*
2814	 * Parse as much of the message as is availible,
2815	 * rejecting it if we don't support it.  When
2816	 * the entire message is availible and has been
2817	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
2818	 * that we have parsed an entire message.
2819	 *
2820	 * In the case of extended messages, we accept the length
2821	 * byte outright and perform more checking once we know the
2822	 * extended message type.
2823	 */
2824	switch (ahc->msgin_buf[0]) {
2825	case MSG_DISCONNECT:
2826	case MSG_SAVEDATAPOINTER:
2827	case MSG_CMDCOMPLETE:
2828	case MSG_RESTOREPOINTERS:
2829	case MSG_IGN_WIDE_RESIDUE:
2830		/*
2831		 * End our message loop as these are messages
2832		 * the sequencer handles on its own.
2833		 */
2834		done = MSGLOOP_TERMINATED;
2835		break;
2836	case MSG_MESSAGE_REJECT:
2837		response = ahc_handle_msg_reject(ahc, devinfo);
2838		/* FALLTHROUGH */
2839	case MSG_NOOP:
2840		done = MSGLOOP_MSGCOMPLETE;
2841		break;
2842	case MSG_EXTENDED:
2843	{
2844		/* Wait for enough of the message to begin validation */
2845		if (ahc->msgin_index < 2)
2846			break;
2847		switch (ahc->msgin_buf[2]) {
2848		case MSG_EXT_SDTR:
2849		{
2850			struct	 ahc_syncrate *syncrate;
2851			u_int	 period;
2852			u_int	 ppr_options;
2853			u_int	 offset;
2854			u_int	 saved_offset;
2855
2856			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
2857				reject = TRUE;
2858				break;
2859			}
2860
2861			/*
2862			 * Wait until we have both args before validating
2863			 * and acting on this message.
2864			 *
2865			 * Add one to MSG_EXT_SDTR_LEN to account for
2866			 * the extended message preamble.
2867			 */
2868			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
2869				break;
2870
2871			period = ahc->msgin_buf[3];
2872			ppr_options = 0;
2873			saved_offset = offset = ahc->msgin_buf[4];
2874			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2875							   &ppr_options,
2876							   devinfo->role);
2877			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
2878					    targ_scsirate & WIDEXFER,
2879					    devinfo->role);
2880			if (bootverbose) {
2881				printf("(%s:%c:%d:%d): Received "
2882				       "SDTR period %x, offset %x\n\t"
2883				       "Filtered to period %x, offset %x\n",
2884				       ahc_name(ahc), devinfo->channel,
2885				       devinfo->target, devinfo->lun,
2886				       ahc->msgin_buf[3], saved_offset,
2887				       period, offset);
2888			}
2889			ahc_set_syncrate(ahc, devinfo,
2890					 syncrate, period,
2891					 offset, ppr_options,
2892					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2893					 /*paused*/TRUE);
2894
2895			/*
2896			 * See if we initiated Sync Negotiation
2897			 * and didn't have to fall down to async
2898			 * transfers.
2899			 */
2900			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
2901				/* We started it */
2902				if (saved_offset != offset) {
2903					/* Went too low - force async */
2904					reject = TRUE;
2905				}
2906			} else {
2907				/*
2908				 * Send our own SDTR in reply
2909				 */
2910				if (bootverbose
2911				 && devinfo->role == ROLE_INITIATOR) {
2912					printf("(%s:%c:%d:%d): Target "
2913					       "Initiated SDTR\n",
2914					       ahc_name(ahc), devinfo->channel,
2915					       devinfo->target, devinfo->lun);
2916				}
2917				ahc->msgout_index = 0;
2918				ahc->msgout_len = 0;
2919				ahc_construct_sdtr(ahc, devinfo,
2920						   period, offset);
2921				ahc->msgout_index = 0;
2922				response = TRUE;
2923			}
2924			done = MSGLOOP_MSGCOMPLETE;
2925			break;
2926		}
2927		case MSG_EXT_WDTR:
2928		{
2929			u_int bus_width;
2930			u_int saved_width;
2931			u_int sending_reply;
2932
2933			sending_reply = FALSE;
2934			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
2935				reject = TRUE;
2936				break;
2937			}
2938
2939			/*
2940			 * Wait until we have our arg before validating
2941			 * and acting on this message.
2942			 *
2943			 * Add one to MSG_EXT_WDTR_LEN to account for
2944			 * the extended message preamble.
2945			 */
2946			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
2947				break;
2948
2949			bus_width = ahc->msgin_buf[3];
2950			saved_width = bus_width;
2951			ahc_validate_width(ahc, tinfo, &bus_width,
2952					   devinfo->role);
2953			if (bootverbose) {
2954				printf("(%s:%c:%d:%d): Received WDTR "
2955				       "%x filtered to %x\n",
2956				       ahc_name(ahc), devinfo->channel,
2957				       devinfo->target, devinfo->lun,
2958				       saved_width, bus_width);
2959			}
2960
2961			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
2962				/*
2963				 * Don't send a WDTR back to the
2964				 * target, since we asked first.
2965				 * If the width went higher than our
2966				 * request, reject it.
2967				 */
2968				if (saved_width > bus_width) {
2969					reject = TRUE;
2970					printf("(%s:%c:%d:%d): requested %dBit "
2971					       "transfers.  Rejecting...\n",
2972					       ahc_name(ahc), devinfo->channel,
2973					       devinfo->target, devinfo->lun,
2974					       8 * (0x01 << bus_width));
2975					bus_width = 0;
2976				}
2977			} else {
2978				/*
2979				 * Send our own WDTR in reply
2980				 */
2981				if (bootverbose
2982				 && devinfo->role == ROLE_INITIATOR) {
2983					printf("(%s:%c:%d:%d): Target "
2984					       "Initiated WDTR\n",
2985					       ahc_name(ahc), devinfo->channel,
2986					       devinfo->target, devinfo->lun);
2987				}
2988				ahc->msgout_index = 0;
2989				ahc->msgout_len = 0;
2990				ahc_construct_wdtr(ahc, devinfo, bus_width);
2991				ahc->msgout_index = 0;
2992				response = TRUE;
2993				sending_reply = TRUE;
2994			}
2995			ahc_set_width(ahc, devinfo, bus_width,
2996				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
2997				      /*paused*/TRUE);
2998			/* After a wide message, we are async */
2999			ahc_set_syncrate(ahc, devinfo,
3000					 /*syncrate*/NULL, /*period*/0,
3001					 /*offset*/0, /*ppr_options*/0,
3002					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
3003			if (sending_reply == FALSE && reject == FALSE) {
3004
3005				if (tinfo->goal.offset) {
3006					ahc->msgout_index = 0;
3007					ahc->msgout_len = 0;
3008					ahc_build_transfer_msg(ahc, devinfo);
3009					ahc->msgout_index = 0;
3010					response = TRUE;
3011				}
3012			}
3013			done = MSGLOOP_MSGCOMPLETE;
3014			break;
3015		}
3016		case MSG_EXT_PPR:
3017		{
3018			struct	ahc_syncrate *syncrate;
3019			u_int	period;
3020			u_int	offset;
3021			u_int	bus_width;
3022			u_int	ppr_options;
3023			u_int	saved_width;
3024			u_int	saved_offset;
3025			u_int	saved_ppr_options;
3026
3027			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3028				reject = TRUE;
3029				break;
3030			}
3031
3032			/*
3033			 * Wait until we have all args before validating
3034			 * and acting on this message.
3035			 *
3036			 * Add one to MSG_EXT_PPR_LEN to account for
3037			 * the extended message preamble.
3038			 */
3039			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3040				break;
3041
3042			period = ahc->msgin_buf[3];
3043			offset = ahc->msgin_buf[5];
3044			bus_width = ahc->msgin_buf[6];
3045			saved_width = bus_width;
3046			ppr_options = ahc->msgin_buf[7];
3047			/*
3048			 * According to the spec, a DT only
3049			 * period factor with no DT option
3050			 * set implies async.
3051			 */
3052			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3053			 && period == 9)
3054				offset = 0;
3055			saved_ppr_options = ppr_options;
3056			saved_offset = offset;
3057
3058			/*
3059			 * Mask out any options we don't support
3060			 * on any controller.  Transfer options are
3061			 * only available if we are negotiating wide.
3062			 */
3063			ppr_options &= MSG_EXT_PPR_DT_REQ;
3064			if (bus_width == 0)
3065				ppr_options = 0;
3066
3067			ahc_validate_width(ahc, tinfo, &bus_width,
3068					   devinfo->role);
3069			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3070							   &ppr_options,
3071							   devinfo->role);
3072			ahc_validate_offset(ahc, tinfo, syncrate,
3073					    &offset, bus_width,
3074					    devinfo->role);
3075
3076			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
3077				/*
3078				 * If we are unable to do any of the
3079				 * requested options (we went too low),
3080				 * then we'll have to reject the message.
3081				 */
3082				if (saved_width > bus_width
3083				 || saved_offset != offset
3084				 || saved_ppr_options != ppr_options) {
3085					reject = TRUE;
3086					period = 0;
3087					offset = 0;
3088					bus_width = 0;
3089					ppr_options = 0;
3090					syncrate = NULL;
3091				}
3092			} else {
3093				if (devinfo->role != ROLE_TARGET)
3094					printf("(%s:%c:%d:%d): Target "
3095					       "Initiated PPR\n",
3096					       ahc_name(ahc), devinfo->channel,
3097					       devinfo->target, devinfo->lun);
3098				else
3099					printf("(%s:%c:%d:%d): Initiator "
3100					       "Initiated PPR\n",
3101					       ahc_name(ahc), devinfo->channel,
3102					       devinfo->target, devinfo->lun);
3103				ahc->msgout_index = 0;
3104				ahc->msgout_len = 0;
3105				ahc_construct_ppr(ahc, devinfo, period, offset,
3106						  bus_width, ppr_options);
3107				ahc->msgout_index = 0;
3108				response = TRUE;
3109			}
3110			if (bootverbose) {
3111				printf("(%s:%c:%d:%d): Received PPR width %x, "
3112				       "period %x, offset %x,options %x\n"
3113				       "\tFiltered to width %x, period %x, "
3114				       "offset %x, options %x\n",
3115				       ahc_name(ahc), devinfo->channel,
3116				       devinfo->target, devinfo->lun,
3117				       saved_width, ahc->msgin_buf[3],
3118				       saved_offset, saved_ppr_options,
3119				       bus_width, period, offset, ppr_options);
3120			}
3121			ahc_set_width(ahc, devinfo, bus_width,
3122				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3123				      /*paused*/TRUE);
3124			ahc_set_syncrate(ahc, devinfo,
3125					 syncrate, period,
3126					 offset, ppr_options,
3127					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3128					 /*paused*/TRUE);
3129			done = MSGLOOP_MSGCOMPLETE;
3130			break;
3131		}
3132		default:
3133			/* Unknown extended message.  Reject it. */
3134			reject = TRUE;
3135			break;
3136		}
3137		break;
3138	}
3139#ifdef AHC_TARGET_MODE
3140	case MSG_BUS_DEV_RESET:
3141		ahc_handle_devreset(ahc, devinfo,
3142				    CAM_BDR_SENT,
3143				    "Bus Device Reset Received",
3144				    /*verbose_level*/0);
3145		ahc_restart(ahc);
3146		done = MSGLOOP_TERMINATED;
3147		break;
3148	case MSG_ABORT_TAG:
3149	case MSG_ABORT:
3150	case MSG_CLEAR_QUEUE:
3151	{
3152		int tag;
3153
3154		/* Target mode messages */
3155		if (devinfo->role != ROLE_TARGET) {
3156			reject = TRUE;
3157			break;
3158		}
3159		tag = SCB_LIST_NULL;
3160		if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
3161			tag = ahc_inb(ahc, INITIATOR_TAG);
3162		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3163			       devinfo->lun, tag, ROLE_TARGET,
3164			       CAM_REQ_ABORTED);
3165
3166		tstate = ahc->enabled_targets[devinfo->our_scsiid];
3167		if (tstate != NULL) {
3168			struct ahc_tmode_lstate* lstate;
3169
3170			lstate = tstate->enabled_luns[devinfo->lun];
3171			if (lstate != NULL) {
3172				ahc_queue_lstate_event(ahc, lstate,
3173						       devinfo->our_scsiid,
3174						       ahc->msgin_buf[0],
3175						       /*arg*/tag);
3176				ahc_send_lstate_events(ahc, lstate);
3177			}
3178		}
3179		ahc_restart(ahc);
3180		done = MSGLOOP_TERMINATED;
3181		break;
3182	}
3183#endif
3184	case MSG_TERM_IO_PROC:
3185	default:
3186		reject = TRUE;
3187		break;
3188	}
3189
3190	if (reject) {
3191		/*
3192		 * Setup to reject the message.
3193		 */
3194		ahc->msgout_index = 0;
3195		ahc->msgout_len = 1;
3196		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3197		done = MSGLOOP_MSGCOMPLETE;
3198		response = TRUE;
3199	}
3200
3201	if (done != MSGLOOP_IN_PROG && !response)
3202		/* Clear the outgoing message buffer */
3203		ahc->msgout_len = 0;
3204
3205	return (done);
3206}
3207
3208/*
3209 * Process a message reject message.
3210 */
3211static int
3212ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3213{
3214	/*
3215	 * What we care about here is if we had an
3216	 * outstanding SDTR or WDTR message for this
3217	 * target.  If we did, this is a signal that
3218	 * the target is refusing negotiation.
3219	 */
3220	struct scb *scb;
3221	struct ahc_initiator_tinfo *tinfo;
3222	struct ahc_tmode_tstate *tstate;
3223	u_int scb_index;
3224	u_int last_msg;
3225	int   response = 0;
3226
3227	scb_index = ahc_inb(ahc, SCB_TAG);
3228	scb = ahc_lookup_scb(ahc, scb_index);
3229	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3230				    devinfo->our_scsiid,
3231				    devinfo->target, &tstate);
3232	/* Might be necessary */
3233	last_msg = ahc_inb(ahc, LAST_MSG);
3234
3235	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3236		/*
3237		 * Target does not support the PPR message.
3238		 * Attempt to negotiate SPI-2 style.
3239		 */
3240		if (bootverbose) {
3241			printf("(%s:%c:%d:%d): PPR Rejected. "
3242			       "Trying WDTR/SDTR\n",
3243			       ahc_name(ahc), devinfo->channel,
3244			       devinfo->target, devinfo->lun);
3245		}
3246		tinfo->goal.ppr_options = 0;
3247		tinfo->curr.transport_version = 2;
3248		tinfo->goal.transport_version = 2;
3249		ahc->msgout_index = 0;
3250		ahc->msgout_len = 0;
3251		ahc_build_transfer_msg(ahc, devinfo);
3252		ahc->msgout_index = 0;
3253		response = 1;
3254	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3255
3256		/* note 8bit xfers */
3257		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
3258		       "8bit transfers\n", ahc_name(ahc),
3259		       devinfo->channel, devinfo->target, devinfo->lun);
3260		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3261			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3262			      /*paused*/TRUE);
3263		/*
3264		 * No need to clear the sync rate.  If the target
3265		 * did not accept the command, our syncrate is
3266		 * unaffected.  If the target started the negotiation,
3267		 * but rejected our response, we already cleared the
3268		 * sync rate before sending our WDTR.
3269		 */
3270		if (tinfo->goal.offset) {
3271
3272			/* Start the sync negotiation */
3273			ahc->msgout_index = 0;
3274			ahc->msgout_len = 0;
3275			ahc_build_transfer_msg(ahc, devinfo);
3276			ahc->msgout_index = 0;
3277			response = 1;
3278		}
3279	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3280		/* note asynch xfers and clear flag */
3281		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3282				 /*offset*/0, /*ppr_options*/0,
3283				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3284				 /*paused*/TRUE);
3285		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3286		       "Using asynchronous transfers\n",
3287		       ahc_name(ahc), devinfo->channel,
3288		       devinfo->target, devinfo->lun);
3289	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3290		int tag_type;
3291		int mask;
3292
3293		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3294
3295		if (tag_type == MSG_SIMPLE_TASK) {
3296			printf("(%s:%c:%d:%d): refuses tagged commands.  "
3297			       "Performing non-tagged I/O\n", ahc_name(ahc),
3298			       devinfo->channel, devinfo->target, devinfo->lun);
3299			ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
3300			mask = ~0x23;
3301		} else {
3302			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
3303			       "Performing simple queue tagged I/O only\n",
3304			       ahc_name(ahc), devinfo->channel, devinfo->target,
3305			       devinfo->lun, tag_type == MSG_ORDERED_TASK
3306			       ? "ordered" : "head of queue");
3307			ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
3308			mask = ~0x03;
3309		}
3310
3311		/*
3312		 * Resend the identify for this CCB as the target
3313		 * may believe that the selection is invalid otherwise.
3314		 */
3315		ahc_outb(ahc, SCB_CONTROL,
3316			 ahc_inb(ahc, SCB_CONTROL) & mask);
3317	 	scb->hscb->control &= mask;
3318		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3319					/*type*/MSG_SIMPLE_TASK);
3320		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3321		ahc_assert_atn(ahc);
3322
3323		/*
3324		 * This transaction is now at the head of
3325		 * the untagged queue for this target.
3326		 */
3327		if ((ahc->flags & AHC_SCB_BTT) == 0) {
3328			struct scb_tailq *untagged_q;
3329
3330			untagged_q =
3331			    &(ahc->untagged_queues[devinfo->target_offset]);
3332			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3333			scb->flags |= SCB_UNTAGGEDQ;
3334		}
3335		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3336			     scb->hscb->tag);
3337
3338		/*
3339		 * Requeue all tagged commands for this target
3340		 * currently in our posession so they can be
3341		 * converted to untagged commands.
3342		 */
3343		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3344				   SCB_GET_CHANNEL(ahc, scb),
3345				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3346				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3347				   SEARCH_COMPLETE);
3348	} else {
3349		/*
3350		 * Otherwise, we ignore it.
3351		 */
3352		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3353		       ahc_name(ahc), devinfo->channel, devinfo->target,
3354		       last_msg);
3355	}
3356	return (response);
3357}
3358
3359/*
3360 * Process an ingnore wide residue message.
3361 */
3362static void
3363ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3364{
3365	u_int scb_index;
3366	struct scb *scb;
3367
3368	scb_index = ahc_inb(ahc, SCB_TAG);
3369	scb = ahc_lookup_scb(ahc, scb_index);
3370	/*
3371	 * XXX Actually check data direction in the sequencer?
3372	 * Perhaps add datadir to some spare bits in the hscb?
3373	 */
3374	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3375	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3376		/*
3377		 * Ignore the message if we haven't
3378		 * seen an appropriate data phase yet.
3379		 */
3380	} else {
3381		/*
3382		 * If the residual occurred on the last
3383		 * transfer and the transfer request was
3384		 * expected to end on an odd count, do
3385		 * nothing.  Otherwise, subtract a byte
3386		 * and update the residual count accordingly.
3387		 */
3388		uint32_t sgptr;
3389
3390		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3391		if ((sgptr & SG_LIST_NULL) != 0
3392		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
3393			/*
3394			 * If the residual occurred on the last
3395			 * transfer and the transfer request was
3396			 * expected to end on an odd count, do
3397			 * nothing.
3398			 */
3399		} else {
3400			struct ahc_dma_seg *sg;
3401			uint32_t data_cnt;
3402			uint32_t data_addr;
3403			uint32_t sglen;
3404
3405			/* Pull in the rest of the sgptr */
3406			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3407			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3408			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
3409			sgptr &= SG_PTR_MASK;
3410			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24)
3411				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
3412				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
3413				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
3414
3415			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
3416				  | (ahc_inb(ahc, SHADDR + 2) << 16)
3417				  | (ahc_inb(ahc, SHADDR + 1) << 8)
3418				  | (ahc_inb(ahc, SHADDR));
3419
3420			data_cnt += 1;
3421			data_addr -= 1;
3422
3423			sg = ahc_sg_bus_to_virt(scb, sgptr);
3424			/*
3425			 * The residual sg ptr points to the next S/G
3426			 * to load so we must go back one.
3427			 */
3428			sg--;
3429			sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
3430			if (sg != scb->sg_list
3431			 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3432
3433				sg--;
3434				sglen = ahc_le32toh(sg->len);
3435				/*
3436				 * Preserve High Address and SG_LIST bits
3437				 * while setting the count to 1.
3438				 */
3439				data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3440				data_addr = ahc_le32toh(sg->addr)
3441					  + (sglen & AHC_SG_LEN_MASK) - 1;
3442
3443				/*
3444				 * Increment sg so it points to the
3445				 * "next" sg.
3446				 */
3447				sg++;
3448				sgptr = ahc_sg_virt_to_bus(scb, sg);
3449				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
3450					 sgptr >> 24);
3451				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
3452					 sgptr >> 16);
3453				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
3454					 sgptr >> 8);
3455				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3456			}
3457
3458			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
3459			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
3460			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
3461			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3462		}
3463	}
3464}
3465
3466
3467/*
3468 * Reinitialize the data pointers for the active transfer
3469 * based on its current residual.
3470 */
3471static void
3472ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3473{
3474	struct	 scb *scb;
3475	struct	 ahc_dma_seg *sg;
3476	u_int	 scb_index;
3477	uint32_t sgptr;
3478	uint32_t resid;
3479	uint32_t dataptr;
3480
3481	scb_index = ahc_inb(ahc, SCB_TAG);
3482	scb = ahc_lookup_scb(ahc, scb_index);
3483	sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3484	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3485	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3486	      |	ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3487
3488	sgptr &= SG_PTR_MASK;
3489	sg = ahc_sg_bus_to_virt(scb, sgptr);
3490
3491	/* The residual sg_ptr always points to the next sg */
3492	sg--;
3493
3494	resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3495	      | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3496	      | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3497
3498	dataptr = ahc_le32toh(sg->addr)
3499		+ (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
3500		- resid;
3501	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3502		u_int dscommand1;
3503
3504		dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3505		ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3506		ahc_outb(ahc, HADDR,
3507			 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3508		ahc_outb(ahc, DSCOMMAND1, dscommand1);
3509	}
3510	ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3511	ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3512	ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3513	ahc_outb(ahc, HADDR, dataptr);
3514	ahc_outb(ahc, HCNT + 2, resid >> 16);
3515	ahc_outb(ahc, HCNT + 1, resid >> 8);
3516	ahc_outb(ahc, HCNT, resid);
3517	if ((ahc->features & AHC_ULTRA2) == 0) {
3518		ahc_outb(ahc, STCNT + 2, resid >> 16);
3519		ahc_outb(ahc, STCNT + 1, resid >> 8);
3520		ahc_outb(ahc, STCNT, resid);
3521	}
3522}
3523
3524/*
3525 * Handle the effects of issuing a bus device reset message.
3526 */
3527static void
3528ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3529		    cam_status status, char *message, int verbose_level)
3530{
3531#ifdef AHC_TARGET_MODE
3532	struct ahc_tmode_tstate* tstate;
3533	u_int lun;
3534#endif
3535	int found;
3536
3537	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3538			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3539			       status);
3540
3541#ifdef AHC_TARGET_MODE
3542	/*
3543	 * Send an immediate notify ccb to all target mord peripheral
3544	 * drivers affected by this action.
3545	 */
3546	tstate = ahc->enabled_targets[devinfo->our_scsiid];
3547	if (tstate != NULL) {
3548		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3549			struct ahc_tmode_lstate* lstate;
3550
3551			lstate = tstate->enabled_luns[lun];
3552			if (lstate == NULL)
3553				continue;
3554
3555			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3556					       MSG_BUS_DEV_RESET, /*arg*/0);
3557			ahc_send_lstate_events(ahc, lstate);
3558		}
3559	}
3560#endif
3561
3562	/*
3563	 * Go back to async/narrow transfers and renegotiate.
3564	 */
3565	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3566		      AHC_TRANS_CUR, /*paused*/TRUE);
3567	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3568			 /*period*/0, /*offset*/0, /*ppr_options*/0,
3569			 AHC_TRANS_CUR, /*paused*/TRUE);
3570
3571	ahc_send_async(ahc, devinfo->channel, devinfo->target,
3572		       CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3573
3574	if (message != NULL
3575	 && (verbose_level <= bootverbose))
3576		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3577		       message, devinfo->channel, devinfo->target, found);
3578}
3579
3580#ifdef AHC_TARGET_MODE
3581static void
3582ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3583		       struct scb *scb)
3584{
3585
3586	/*
3587	 * To facilitate adding multiple messages together,
3588	 * each routine should increment the index and len
3589	 * variables instead of setting them explicitly.
3590	 */
3591	ahc->msgout_index = 0;
3592	ahc->msgout_len = 0;
3593
3594	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3595		ahc_build_transfer_msg(ahc, devinfo);
3596	else
3597		panic("ahc_intr: AWAITING target message with no message");
3598
3599	ahc->msgout_index = 0;
3600	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3601}
3602#endif
3603/**************************** Initialization **********************************/
3604/*
3605 * Allocate a controller structure for a new device
3606 * and perform initial initializion.
3607 */
3608struct ahc_softc *
3609ahc_alloc(void *platform_arg, char *name)
3610{
3611	struct  ahc_softc *ahc;
3612	int	i;
3613
3614#ifndef	__FreeBSD__
3615	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3616	if (!ahc) {
3617		printf("aic7xxx: cannot malloc softc!\n");
3618		free(name, M_DEVBUF);
3619		return NULL;
3620	}
3621#else
3622	ahc = device_get_softc((device_t)platform_arg);
3623#endif
3624	memset(ahc, 0, sizeof(*ahc));
3625	ahc->seep_config = malloc(sizeof(*ahc->seep_config),
3626				  M_DEVBUF, M_NOWAIT);
3627	if (ahc->seep_config == NULL) {
3628#ifndef	__FreeBSD__
3629		free(ahc, M_DEVBUF);
3630#endif
3631		free(name, M_DEVBUF);
3632		return (NULL);
3633	}
3634	LIST_INIT(&ahc->pending_scbs);
3635	/* We don't know our unit number until the OSM sets it */
3636	ahc->name = name;
3637	ahc->unit = -1;
3638	ahc->description = NULL;
3639	ahc->channel = 'A';
3640	ahc->channel_b = 'B';
3641	ahc->chip = AHC_NONE;
3642	ahc->features = AHC_FENONE;
3643	ahc->bugs = AHC_BUGNONE;
3644	ahc->flags = AHC_FNONE;
3645
3646	for (i = 0; i < AHC_NUM_TARGETS; i++)
3647		TAILQ_INIT(&ahc->untagged_queues[i]);
3648	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3649		ahc_free(ahc);
3650		ahc = NULL;
3651	}
3652	return (ahc);
3653}
3654
3655int
3656ahc_softc_init(struct ahc_softc *ahc)
3657{
3658
3659	/* The IRQMS bit is only valid on VL and EISA chips */
3660	if ((ahc->chip & AHC_PCI) == 0)
3661		ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3662	else
3663		ahc->unpause = 0;
3664	ahc->pause = ahc->unpause | PAUSE;
3665	/* XXX The shared scb data stuff should be deprecated */
3666	if (ahc->scb_data == NULL) {
3667		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3668				       M_DEVBUF, M_NOWAIT);
3669		if (ahc->scb_data == NULL)
3670			return (ENOMEM);
3671		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3672	}
3673
3674	return (0);
3675}
3676
3677void
3678ahc_softc_insert(struct ahc_softc *ahc)
3679{
3680	struct ahc_softc *list_ahc;
3681
3682#if AHC_PCI_CONFIG > 0
3683	/*
3684	 * Second Function PCI devices need to inherit some
3685	 * settings from function 0.
3686	 */
3687	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
3688	 && (ahc->features & AHC_MULTI_FUNC) != 0) {
3689		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3690			ahc_dev_softc_t list_pci;
3691			ahc_dev_softc_t pci;
3692
3693			list_pci = list_ahc->dev_softc;
3694			pci = ahc->dev_softc;
3695			if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
3696			 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
3697				struct ahc_softc *master;
3698				struct ahc_softc *slave;
3699
3700				if (ahc_get_pci_function(list_pci) == 0) {
3701					master = list_ahc;
3702					slave = ahc;
3703				} else {
3704					master = ahc;
3705					slave = list_ahc;
3706				}
3707				slave->flags &= ~AHC_BIOS_ENABLED;
3708				slave->flags |=
3709				    master->flags & AHC_BIOS_ENABLED;
3710				slave->flags &= ~AHC_PRIMARY_CHANNEL;
3711				slave->flags |=
3712				    master->flags & AHC_PRIMARY_CHANNEL;
3713				break;
3714			}
3715		}
3716	}
3717#endif
3718
3719	/*
3720	 * Insertion sort into our list of softcs.
3721	 */
3722	list_ahc = TAILQ_FIRST(&ahc_tailq);
3723	while (list_ahc != NULL
3724	    && ahc_softc_comp(list_ahc, ahc) <= 0)
3725		list_ahc = TAILQ_NEXT(list_ahc, links);
3726	if (list_ahc != NULL)
3727		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
3728	else
3729		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
3730	ahc->init_level++;
3731}
3732
3733/*
3734 * Verify that the passed in softc pointer is for a
3735 * controller that is still configured.
3736 */
3737struct ahc_softc *
3738ahc_find_softc(struct ahc_softc *ahc)
3739{
3740	struct ahc_softc *list_ahc;
3741
3742	TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
3743		if (list_ahc == ahc)
3744			return (ahc);
3745	}
3746	return (NULL);
3747}
3748
3749void
3750ahc_set_unit(struct ahc_softc *ahc, int unit)
3751{
3752	ahc->unit = unit;
3753}
3754
3755void
3756ahc_set_name(struct ahc_softc *ahc, char *name)
3757{
3758	if (ahc->name != NULL)
3759		free(ahc->name, M_DEVBUF);
3760	ahc->name = name;
3761}
3762
3763void
3764ahc_free(struct ahc_softc *ahc)
3765{
3766	int i;
3767
3768	ahc_fini_scbdata(ahc);
3769	switch (ahc->init_level) {
3770	default:
3771	case 5:
3772		ahc_shutdown(ahc);
3773		TAILQ_REMOVE(&ahc_tailq, ahc, links);
3774		/* FALLTHROUGH */
3775	case 4:
3776		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3777				  ahc->shared_data_dmamap);
3778		/* FALLTHROUGH */
3779	case 3:
3780		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3781				ahc->shared_data_dmamap);
3782		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3783				   ahc->shared_data_dmamap);
3784		/* FALLTHROUGH */
3785	case 2:
3786		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3787	case 1:
3788#ifndef __linux__
3789		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3790#endif
3791		break;
3792	case 0:
3793		break;
3794	}
3795
3796#ifndef __linux__
3797	ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3798#endif
3799	ahc_platform_free(ahc);
3800	for (i = 0; i < AHC_NUM_TARGETS; i++) {
3801		struct ahc_tmode_tstate *tstate;
3802
3803		tstate = ahc->enabled_targets[i];
3804		if (tstate != NULL) {
3805#if AHC_TARGET_MODE
3806			int j;
3807
3808			for (j = 0; j < AHC_NUM_LUNS; j++) {
3809				struct ahc_tmode_lstate *lstate;
3810
3811				lstate = tstate->enabled_luns[j];
3812				if (lstate != NULL) {
3813					xpt_free_path(lstate->path);
3814					free(lstate, M_DEVBUF);
3815				}
3816			}
3817#endif
3818			free(tstate, M_DEVBUF);
3819		}
3820	}
3821#if AHC_TARGET_MODE
3822	if (ahc->black_hole != NULL) {
3823		xpt_free_path(ahc->black_hole->path);
3824		free(ahc->black_hole, M_DEVBUF);
3825	}
3826#endif
3827	if (ahc->name != NULL)
3828		free(ahc->name, M_DEVBUF);
3829	if (ahc->seep_config != NULL)
3830		free(ahc->seep_config, M_DEVBUF);
3831#ifndef __FreeBSD__
3832	free(ahc, M_DEVBUF);
3833#endif
3834	return;
3835}
3836
3837void
3838ahc_shutdown(void *arg)
3839{
3840	struct	ahc_softc *ahc;
3841	int	i;
3842
3843	ahc = (struct ahc_softc *)arg;
3844
3845	/* This will reset most registers to 0, but not all */
3846	ahc_reset(ahc);
3847	ahc_outb(ahc, SCSISEQ, 0);
3848	ahc_outb(ahc, SXFRCTL0, 0);
3849	ahc_outb(ahc, DSPCISTATUS, 0);
3850
3851	for (i = TARG_SCSIRATE; i < SCSICONF; i++)
3852		ahc_outb(ahc, i, 0);
3853}
3854
3855/*
3856 * Reset the controller and record some information about it
3857 * that is only available just after a reset.
3858 */
3859int
3860ahc_reset(struct ahc_softc *ahc)
3861{
3862	u_int	sblkctl;
3863	u_int	sxfrctl1_a, sxfrctl1_b;
3864	int	wait;
3865
3866	/*
3867	 * Preserve the value of the SXFRCTL1 register for all channels.
3868	 * It contains settings that affect termination and we don't want
3869	 * to disturb the integrity of the bus.
3870	 */
3871	ahc_pause(ahc);
3872	sxfrctl1_b = 0;
3873	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
3874		u_int sblkctl;
3875
3876		/*
3877		 * Save channel B's settings in case this chip
3878		 * is setup for TWIN channel operation.
3879		 */
3880		sblkctl = ahc_inb(ahc, SBLKCTL);
3881		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3882		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
3883		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3884	}
3885	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
3886
3887	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
3888
3889	/*
3890	 * Ensure that the reset has finished.  We delay 1000us
3891	 * prior to reading the register to make sure the chip
3892	 * has sufficiently completed its reset to handle register
3893	 * accesses.
3894	 */
3895	wait = 1000;
3896	do {
3897		ahc_delay(1000);
3898	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
3899
3900	if (wait == 0) {
3901		printf("%s: WARNING - Failed chip reset!  "
3902		       "Trying to initialize anyway.\n", ahc_name(ahc));
3903	}
3904	ahc_outb(ahc, HCNTRL, ahc->pause);
3905
3906	/* Determine channel configuration */
3907	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
3908	/* No Twin Channel PCI cards */
3909	if ((ahc->chip & AHC_PCI) != 0)
3910		sblkctl &= ~SELBUSB;
3911	switch (sblkctl) {
3912	case 0:
3913		/* Single Narrow Channel */
3914		break;
3915	case 2:
3916		/* Wide Channel */
3917		ahc->features |= AHC_WIDE;
3918		break;
3919	case 8:
3920		/* Twin Channel */
3921		ahc->features |= AHC_TWIN;
3922		break;
3923	default:
3924		printf(" Unsupported adapter type.  Ignoring\n");
3925		return(-1);
3926	}
3927
3928	/*
3929	 * Reload sxfrctl1.
3930	 *
3931	 * We must always initialize STPWEN to 1 before we
3932	 * restore the saved values.  STPWEN is initialized
3933	 * to a tri-state condition which can only be cleared
3934	 * by turning it on.
3935	 */
3936	if ((ahc->features & AHC_TWIN) != 0) {
3937		u_int sblkctl;
3938
3939		sblkctl = ahc_inb(ahc, SBLKCTL);
3940		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
3941		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
3942		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
3943	}
3944	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
3945
3946#ifdef AHC_DUMP_SEQ
3947	if (ahc->init_level == 0)
3948		ahc_dumpseq(ahc);
3949#endif
3950
3951	return (0);
3952}
3953
3954/*
3955 * Determine the number of SCBs available on the controller
3956 */
3957int
3958ahc_probe_scbs(struct ahc_softc *ahc) {
3959	int i;
3960
3961	for (i = 0; i < AHC_SCB_MAX; i++) {
3962
3963		ahc_outb(ahc, SCBPTR, i);
3964		ahc_outb(ahc, SCB_BASE, i);
3965		if (ahc_inb(ahc, SCB_BASE) != i)
3966			break;
3967		ahc_outb(ahc, SCBPTR, 0);
3968		if (ahc_inb(ahc, SCB_BASE) != 0)
3969			break;
3970	}
3971	return (i);
3972}
3973
3974static void
3975ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3976{
3977	bus_addr_t *baddr;
3978
3979	baddr = (bus_addr_t *)arg;
3980	*baddr = segs->ds_addr;
3981}
3982
3983static void
3984ahc_build_free_scb_list(struct ahc_softc *ahc)
3985{
3986	int scbsize;
3987	int i;
3988
3989	scbsize = 32;
3990	if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
3991		scbsize = 64;
3992
3993	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
3994		int j;
3995
3996		ahc_outb(ahc, SCBPTR, i);
3997
3998		/*
3999		 * Touch all SCB bytes to avoid parity errors
4000		 * should one of our debugging routines read
4001		 * an otherwise uninitiatlized byte.
4002		 */
4003		for (j = 0; j < scbsize; j++)
4004			ahc_outb(ahc, SCB_BASE+j, 0xFF);
4005
4006		/* Clear the control byte. */
4007		ahc_outb(ahc, SCB_CONTROL, 0);
4008
4009		/* Set the next pointer */
4010		if ((ahc->flags & AHC_PAGESCBS) != 0)
4011			ahc_outb(ahc, SCB_NEXT, i+1);
4012		else
4013			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4014
4015		/* Make the tag number, SCSIID, and lun invalid */
4016		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4017		ahc_outb(ahc, SCB_SCSIID, 0xFF);
4018		ahc_outb(ahc, SCB_LUN, 0xFF);
4019	}
4020
4021	/* Make sure that the last SCB terminates the free list */
4022	ahc_outb(ahc, SCBPTR, i-1);
4023	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4024}
4025
4026static int
4027ahc_init_scbdata(struct ahc_softc *ahc)
4028{
4029	struct scb_data *scb_data;
4030
4031	scb_data = ahc->scb_data;
4032	SLIST_INIT(&scb_data->free_scbs);
4033	SLIST_INIT(&scb_data->sg_maps);
4034
4035	/* Allocate SCB resources */
4036	scb_data->scbarray =
4037	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4038				 M_DEVBUF, M_NOWAIT);
4039	if (scb_data->scbarray == NULL)
4040		return (ENOMEM);
4041	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
4042
4043	/* Determine the number of hardware SCBs and initialize them */
4044
4045	scb_data->maxhscbs = ahc_probe_scbs(ahc);
4046	if ((ahc->flags & AHC_PAGESCBS) != 0) {
4047		/* SCB 0 heads the free list */
4048		ahc_outb(ahc, FREE_SCBH, 0);
4049	} else {
4050		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
4051	}
4052
4053	if (ahc->scb_data->maxhscbs == 0) {
4054		printf("%s: No SCB space found\n", ahc_name(ahc));
4055		return (ENXIO);
4056	}
4057
4058	ahc_build_free_scb_list(ahc);
4059
4060	/*
4061	 * Create our DMA tags.  These tags define the kinds of device
4062	 * accessible memory allocations and memory mappings we will
4063	 * need to perform during normal operation.
4064	 *
4065	 * Unless we need to further restrict the allocation, we rely
4066	 * on the restrictions of the parent dmat, hence the common
4067	 * use of MAXADDR and MAXSIZE.
4068	 */
4069
4070	/* DMA tag for our hardware scb structures */
4071	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4072			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4073			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4074			       /*highaddr*/BUS_SPACE_MAXADDR,
4075			       /*filter*/NULL, /*filterarg*/NULL,
4076			       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4077			       /*nsegments*/1,
4078			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4079			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
4080		goto error_exit;
4081	}
4082
4083	scb_data->init_level++;
4084
4085	/* Allocation for our hscbs */
4086	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
4087			     (void **)&scb_data->hscbs,
4088			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
4089		goto error_exit;
4090	}
4091
4092	scb_data->init_level++;
4093
4094	/* And permanently map them */
4095	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
4096			scb_data->hscbs,
4097			AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4098			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
4099
4100	scb_data->init_level++;
4101
4102	/* DMA tag for our sense buffers */
4103	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4104			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4105			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4106			       /*highaddr*/BUS_SPACE_MAXADDR,
4107			       /*filter*/NULL, /*filterarg*/NULL,
4108			       AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4109			       /*nsegments*/1,
4110			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4111			       /*flags*/0, &scb_data->sense_dmat) != 0) {
4112		goto error_exit;
4113	}
4114
4115	scb_data->init_level++;
4116
4117	/* Allocate them */
4118	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
4119			     (void **)&scb_data->sense,
4120			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
4121		goto error_exit;
4122	}
4123
4124	scb_data->init_level++;
4125
4126	/* And permanently map them */
4127	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
4128			scb_data->sense,
4129			AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4130			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
4131
4132	scb_data->init_level++;
4133
4134	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
4135	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
4136			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4137			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4138			       /*highaddr*/BUS_SPACE_MAXADDR,
4139			       /*filter*/NULL, /*filterarg*/NULL,
4140			       PAGE_SIZE, /*nsegments*/1,
4141			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4142			       /*flags*/0, &scb_data->sg_dmat) != 0) {
4143		goto error_exit;
4144	}
4145
4146	scb_data->init_level++;
4147
4148	/* Perform initial CCB allocation */
4149	memset(scb_data->hscbs, 0,
4150	       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
4151	ahc_alloc_scbs(ahc);
4152
4153	if (scb_data->numscbs == 0) {
4154		printf("%s: ahc_init_scbdata - "
4155		       "Unable to allocate initial scbs\n",
4156		       ahc_name(ahc));
4157		goto error_exit;
4158	}
4159
4160	/*
4161	 * Tell the sequencer which SCB will be the next one it receives.
4162	 */
4163	ahc->next_queued_scb = ahc_get_scb(ahc);
4164	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4165
4166	/*
4167	 * Note that we were successfull
4168	 */
4169	return (0);
4170
4171error_exit:
4172
4173	return (ENOMEM);
4174}
4175
4176static void
4177ahc_fini_scbdata(struct ahc_softc *ahc)
4178{
4179	struct scb_data *scb_data;
4180
4181	scb_data = ahc->scb_data;
4182	if (scb_data == NULL)
4183		return;
4184
4185	switch (scb_data->init_level) {
4186	default:
4187	case 7:
4188	{
4189		struct sg_map_node *sg_map;
4190
4191		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
4192			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
4193			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
4194					  sg_map->sg_dmamap);
4195			ahc_dmamem_free(ahc, scb_data->sg_dmat,
4196					sg_map->sg_vaddr,
4197					sg_map->sg_dmamap);
4198			free(sg_map, M_DEVBUF);
4199		}
4200		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
4201	}
4202	case 6:
4203		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
4204				  scb_data->sense_dmamap);
4205	case 5:
4206		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4207				scb_data->sense_dmamap);
4208		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
4209				   scb_data->sense_dmamap);
4210	case 4:
4211		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
4212	case 3:
4213		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
4214				  scb_data->hscb_dmamap);
4215	case 2:
4216		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4217				scb_data->hscb_dmamap);
4218		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
4219				   scb_data->hscb_dmamap);
4220	case 1:
4221		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4222		break;
4223	case 0:
4224		break;
4225	}
4226	if (scb_data->scbarray != NULL)
4227		free(scb_data->scbarray, M_DEVBUF);
4228}
4229
4230void
4231ahc_alloc_scbs(struct ahc_softc *ahc)
4232{
4233	struct scb_data *scb_data;
4234	struct scb *next_scb;
4235	struct sg_map_node *sg_map;
4236	bus_addr_t physaddr;
4237	struct ahc_dma_seg *segs;
4238	int newcount;
4239	int i;
4240
4241	scb_data = ahc->scb_data;
4242	if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
4243		/* Can't allocate any more */
4244		return;
4245
4246	next_scb = &scb_data->scbarray[scb_data->numscbs];
4247
4248	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4249
4250	if (sg_map == NULL)
4251		return;
4252
4253	/* Allocate S/G space for the next batch of SCBS */
4254	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
4255			     (void **)&sg_map->sg_vaddr,
4256			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4257		free(sg_map, M_DEVBUF);
4258		return;
4259	}
4260
4261	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4262
4263	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4264			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4265			&sg_map->sg_physaddr, /*flags*/0);
4266
4267	segs = sg_map->sg_vaddr;
4268	physaddr = sg_map->sg_physaddr;
4269
4270	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4271	newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
4272	for (i = 0; i < newcount; i++) {
4273		struct scb_platform_data *pdata;
4274#ifndef __linux__
4275		int error;
4276#endif
4277		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4278							   M_DEVBUF, M_NOWAIT);
4279		if (pdata == NULL)
4280			break;
4281		next_scb->platform_data = pdata;
4282		next_scb->sg_map = sg_map;
4283		next_scb->sg_list = segs;
4284		/*
4285		 * The sequencer always starts with the second entry.
4286		 * The first entry is embedded in the scb.
4287		 */
4288		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4289		next_scb->ahc_softc = ahc;
4290		next_scb->flags = SCB_FREE;
4291#ifndef __linux__
4292		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4293					  &next_scb->dmamap);
4294		if (error != 0)
4295			break;
4296#endif
4297		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4298		next_scb->hscb->tag = ahc->scb_data->numscbs;
4299		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4300				  next_scb, links.sle);
4301		segs += AHC_NSEG;
4302		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4303		next_scb++;
4304		ahc->scb_data->numscbs++;
4305	}
4306}
4307
4308void
4309ahc_controller_info(struct ahc_softc *ahc, char *buf)
4310{
4311	int len;
4312
4313	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4314	buf += len;
4315	if ((ahc->features & AHC_TWIN) != 0)
4316 		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4317			      "B SCSI Id=%d, primary %c, ",
4318			      ahc->our_id, ahc->our_id_b,
4319			      (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4320	else {
4321		const char *speed;
4322		const char *type;
4323
4324		speed = "";
4325		if ((ahc->features & AHC_ULTRA) != 0) {
4326			speed = "Ultra ";
4327		} else if ((ahc->features & AHC_DT) != 0) {
4328			speed = "Ultra160 ";
4329		} else if ((ahc->features & AHC_ULTRA2) != 0) {
4330			speed = "Ultra2 ";
4331		}
4332		if ((ahc->features & AHC_WIDE) != 0) {
4333			type = "Wide";
4334		} else {
4335			type = "Single";
4336		}
4337		len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4338			      speed, type, ahc->channel, ahc->our_id);
4339	}
4340	buf += len;
4341
4342	if ((ahc->flags & AHC_PAGESCBS) != 0)
4343		sprintf(buf, "%d/%d SCBs",
4344			ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
4345	else
4346		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4347}
4348
4349/*
4350 * Start the board, ready for normal operation
4351 */
4352int
4353ahc_init(struct ahc_softc *ahc)
4354{
4355	int	 max_targ;
4356	int	 i;
4357	int	 term;
4358	u_int	 scsi_conf;
4359	u_int	 scsiseq_template;
4360	u_int	 ultraenb;
4361	u_int	 discenable;
4362	u_int	 tagenable;
4363	size_t	 driver_data_size;
4364	uint32_t physaddr;
4365
4366#ifdef AHC_DEBUG_SEQUENCER
4367	ahc->flags |= AHC_SEQUENCER_DEBUG;
4368#endif
4369
4370#ifdef AHC_PRINT_SRAM
4371	printf("Scratch Ram:");
4372	for (i = 0x20; i < 0x5f; i++) {
4373		if (((i % 8) == 0) && (i != 0)) {
4374			printf ("\n              ");
4375		}
4376		printf (" 0x%x", ahc_inb(ahc, i));
4377	}
4378	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4379		for (i = 0x70; i < 0x7f; i++) {
4380			if (((i % 8) == 0) && (i != 0)) {
4381				printf ("\n              ");
4382			}
4383			printf (" 0x%x", ahc_inb(ahc, i));
4384		}
4385	}
4386	printf ("\n");
4387	/*
4388	 * Reading uninitialized scratch ram may
4389	 * generate parity errors.
4390	 */
4391	ahc_outb(ahc, CLRINT, CLRPARERR);
4392	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
4393#endif
4394	max_targ = 15;
4395
4396	/*
4397	 * Assume we have a board at this stage and it has been reset.
4398	 */
4399	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4400		ahc->our_id = ahc->our_id_b = 7;
4401
4402	/*
4403	 * Default to allowing initiator operations.
4404	 */
4405	ahc->flags |= AHC_INITIATORROLE;
4406
4407	/*
4408	 * Only allow target mode features if this unit has them enabled.
4409	 */
4410	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4411		ahc->features &= ~AHC_TARGETMODE;
4412
4413#ifndef __linux__
4414	/* DMA tag for mapping buffers into device visible space. */
4415	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4416			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4417			       /*lowaddr*/BUS_SPACE_MAXADDR,
4418			       /*highaddr*/BUS_SPACE_MAXADDR,
4419			       /*filter*/NULL, /*filterarg*/NULL,
4420			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4421			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4422			       /*flags*/BUS_DMA_ALLOCNOW,
4423			       &ahc->buffer_dmat) != 0) {
4424		return (ENOMEM);
4425	}
4426#endif
4427
4428	ahc->init_level++;
4429
4430	/*
4431	 * DMA tag for our command fifos and other data in system memory
4432	 * the card's sequencer must be able to access.  For initiator
4433	 * roles, we need to allocate space for the qinfifo and qoutfifo.
4434	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4435	 * When providing for the target mode role, we must additionally
4436	 * provide space for the incoming target command fifo and an extra
4437	 * byte to deal with a dma bug in some chip versions.
4438	 */
4439	driver_data_size = 2 * 256 * sizeof(uint8_t);
4440	if ((ahc->features & AHC_TARGETMODE) != 0)
4441		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4442				 + /*DMA WideOdd Bug Buffer*/1;
4443	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4444			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4445			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4446			       /*highaddr*/BUS_SPACE_MAXADDR,
4447			       /*filter*/NULL, /*filterarg*/NULL,
4448			       driver_data_size,
4449			       /*nsegments*/1,
4450			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4451			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4452		return (ENOMEM);
4453	}
4454
4455	ahc->init_level++;
4456
4457	/* Allocation of driver data */
4458	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4459			     (void **)&ahc->qoutfifo,
4460			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4461		return (ENOMEM);
4462	}
4463
4464	ahc->init_level++;
4465
4466	/* And permanently map it in */
4467	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4468			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4469			&ahc->shared_data_busaddr, /*flags*/0);
4470
4471	if ((ahc->features & AHC_TARGETMODE) != 0) {
4472		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4473		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4474		ahc->dma_bug_buf = ahc->shared_data_busaddr
4475				 + driver_data_size - 1;
4476		/* All target command blocks start out invalid. */
4477		for (i = 0; i < AHC_TMODE_CMDS; i++)
4478			ahc->targetcmds[i].cmd_valid = 0;
4479		ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4480		ahc->tqinfifonext = 1;
4481		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4482		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4483		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4484	}
4485	ahc->qinfifo = &ahc->qoutfifo[256];
4486
4487	ahc->init_level++;
4488
4489	/* Allocate SCB data now that buffer_dmat is initialized */
4490	if (ahc->scb_data->maxhscbs == 0)
4491		if (ahc_init_scbdata(ahc) != 0)
4492			return (ENOMEM);
4493
4494	/*
4495	 * Allocate a tstate to house information for our
4496	 * initiator presence on the bus as well as the user
4497	 * data for any target mode initiator.
4498	 */
4499	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4500		printf("%s: unable to allocate ahc_tmode_tstate.  "
4501		       "Failing attach\n", ahc_name(ahc));
4502		return (ENOMEM);
4503	}
4504
4505	if ((ahc->features & AHC_TWIN) != 0) {
4506		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4507			printf("%s: unable to allocate ahc_tmode_tstate.  "
4508			       "Failing attach\n", ahc_name(ahc));
4509			return (ENOMEM);
4510		}
4511	}
4512
4513	ahc_outb(ahc, SEQ_FLAGS, 0);
4514	ahc_outb(ahc, SEQ_FLAGS2, 0);
4515
4516	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
4517		ahc->flags |= AHC_PAGESCBS;
4518	} else {
4519		ahc->flags &= ~AHC_PAGESCBS;
4520	}
4521
4522#ifdef AHC_DEBUG
4523	if (ahc_debug & AHC_SHOW_MISC) {
4524		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4525		       "ahc_dma %d bytes\n",
4526			ahc_name(ahc),
4527			sizeof(struct hardware_scb),
4528			sizeof(struct scb),
4529			sizeof(struct ahc_dma_seg));
4530	}
4531#endif /* AHC_DEBUG */
4532
4533	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4534	if (ahc->features & AHC_TWIN) {
4535
4536		/*
4537		 * The device is gated to channel B after a chip reset,
4538		 * so set those values first
4539		 */
4540		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4541		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4542		ahc_outb(ahc, SCSIID, ahc->our_id_b);
4543		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4544		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4545					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4546		if ((ahc->features & AHC_ULTRA2) != 0)
4547			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4548		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4549		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4550
4551		if ((scsi_conf & RESET_SCSI) != 0
4552		 && (ahc->flags & AHC_INITIATORROLE) != 0)
4553			ahc->flags |= AHC_RESET_BUS_B;
4554
4555		/* Select Channel A */
4556		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4557	}
4558	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4559	if ((ahc->features & AHC_ULTRA2) != 0)
4560		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4561	else
4562		ahc_outb(ahc, SCSIID, ahc->our_id);
4563	scsi_conf = ahc_inb(ahc, SCSICONF);
4564	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4565				|term|ahc->seltime
4566				|ENSTIMER|ACTNEGEN);
4567	if ((ahc->features & AHC_ULTRA2) != 0)
4568		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4569	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4570	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4571
4572	if ((scsi_conf & RESET_SCSI) != 0
4573	 && (ahc->flags & AHC_INITIATORROLE) != 0)
4574		ahc->flags |= AHC_RESET_BUS_A;
4575
4576	/*
4577	 * Look at the information that board initialization or
4578	 * the board bios has left us.
4579	 */
4580	ultraenb = 0;
4581	tagenable = ALL_TARGETS_MASK;
4582
4583	/* Grab the disconnection disable table and invert it for our needs */
4584	if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
4585		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4586			"device parameters\n", ahc_name(ahc));
4587		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4588			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4589		discenable = ALL_TARGETS_MASK;
4590		if ((ahc->features & AHC_ULTRA) != 0)
4591			ultraenb = ALL_TARGETS_MASK;
4592	} else {
4593		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4594			   | ahc_inb(ahc, DISC_DSB));
4595		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4596			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4597				      | ahc_inb(ahc, ULTRA_ENB);
4598	}
4599
4600	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4601		max_targ = 7;
4602
4603	for (i = 0; i <= max_targ; i++) {
4604		struct ahc_initiator_tinfo *tinfo;
4605		struct ahc_tmode_tstate *tstate;
4606		u_int our_id;
4607		u_int target_id;
4608		char channel;
4609
4610		channel = 'A';
4611		our_id = ahc->our_id;
4612		target_id = i;
4613		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4614			channel = 'B';
4615			our_id = ahc->our_id_b;
4616			target_id = i % 8;
4617		}
4618		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4619					    target_id, &tstate);
4620		/* Default to async narrow across the board */
4621		memset(tinfo, 0, sizeof(*tinfo));
4622		if (ahc->flags & AHC_USEDEFAULTS) {
4623			if ((ahc->features & AHC_WIDE) != 0)
4624				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4625
4626			/*
4627			 * These will be truncated when we determine the
4628			 * connection type we have with the target.
4629			 */
4630			tinfo->user.period = ahc_syncrates->period;
4631			tinfo->user.offset = ~0;
4632		} else {
4633			u_int scsirate;
4634			uint16_t mask;
4635
4636			/* Take the settings leftover in scratch RAM. */
4637			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4638			mask = (0x01 << i);
4639			if ((ahc->features & AHC_ULTRA2) != 0) {
4640				u_int offset;
4641				u_int maxsync;
4642
4643				if ((scsirate & SOFS) == 0x0F) {
4644					/*
4645					 * Haven't negotiated yet,
4646					 * so the format is different.
4647					 */
4648					scsirate = (scsirate & SXFR) >> 4
4649						 | (ultraenb & mask)
4650						  ? 0x08 : 0x0
4651						 | (scsirate & WIDEXFER);
4652					offset = MAX_OFFSET_ULTRA2;
4653				} else
4654					offset = ahc_inb(ahc, TARG_OFFSET + i);
4655				if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4656					/* Set to the lowest sync rate, 5MHz */
4657					scsirate |= 0x1c;
4658				maxsync = AHC_SYNCRATE_ULTRA2;
4659				if ((ahc->features & AHC_DT) != 0)
4660					maxsync = AHC_SYNCRATE_DT;
4661				tinfo->user.period =
4662				    ahc_find_period(ahc, scsirate, maxsync);
4663				if (offset == 0)
4664					tinfo->user.period = 0;
4665				else
4666					tinfo->user.offset = ~0;
4667				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4668				 && (ahc->features & AHC_DT) != 0)
4669					tinfo->user.ppr_options =
4670					    MSG_EXT_PPR_DT_REQ;
4671			} else if ((scsirate & SOFS) != 0) {
4672				if ((scsirate & SXFR) == 0x40
4673				 && (ultraenb & mask) != 0) {
4674					/* Treat 10MHz as a non-ultra speed */
4675					scsirate &= ~SXFR;
4676				 	ultraenb &= ~mask;
4677				}
4678				tinfo->user.period =
4679				    ahc_find_period(ahc, scsirate,
4680						    (ultraenb & mask)
4681						   ? AHC_SYNCRATE_ULTRA
4682						   : AHC_SYNCRATE_FAST);
4683				if (tinfo->user.period != 0)
4684					tinfo->user.offset = ~0;
4685			}
4686			if (tinfo->user.period == 0)
4687				tinfo->user.offset = 0;
4688			if ((scsirate & WIDEXFER) != 0
4689			 && (ahc->features & AHC_WIDE) != 0)
4690				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4691			tinfo->user.protocol_version = 4;
4692			if ((ahc->features & AHC_DT) != 0)
4693				tinfo->user.transport_version = 3;
4694			else
4695				tinfo->user.transport_version = 2;
4696			tinfo->goal.protocol_version = 2;
4697			tinfo->goal.transport_version = 2;
4698			tinfo->curr.protocol_version = 2;
4699			tinfo->curr.transport_version = 2;
4700		}
4701		tstate->ultraenb = ultraenb;
4702	}
4703	ahc->user_discenable = discenable;
4704	ahc->user_tagenable = tagenable;
4705
4706	/* There are no untagged SCBs active yet. */
4707	for (i = 0; i < 16; i++) {
4708		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4709		if ((ahc->flags & AHC_SCB_BTT) != 0) {
4710			int lun;
4711
4712			/*
4713			 * The SCB based BTT allows an entry per
4714			 * target and lun pair.
4715			 */
4716			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4717				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4718		}
4719	}
4720
4721	/* All of our queues are empty */
4722	for (i = 0; i < 256; i++)
4723		ahc->qoutfifo[i] = SCB_LIST_NULL;
4724	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4725
4726	for (i = 0; i < 256; i++)
4727		ahc->qinfifo[i] = SCB_LIST_NULL;
4728
4729	if ((ahc->features & AHC_MULTI_TID) != 0) {
4730		ahc_outb(ahc, TARGID, 0);
4731		ahc_outb(ahc, TARGID + 1, 0);
4732	}
4733
4734	/*
4735	 * Tell the sequencer where it can find our arrays in memory.
4736	 */
4737	physaddr = ahc->scb_data->hscb_busaddr;
4738	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4739	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4740	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4741	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4742
4743	physaddr = ahc->shared_data_busaddr;
4744	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4745	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4746	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4747	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4748
4749	/*
4750	 * Initialize the group code to command length table.
4751	 * This overrides the values in TARG_SCSIRATE, so only
4752	 * setup the table after we have processed that information.
4753	 */
4754	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4755	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4756	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4757	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4758	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4759	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4760	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4761	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4762
4763	/* Tell the sequencer of our initial queue positions */
4764	ahc_outb(ahc, KERNEL_QINPOS, 0);
4765	ahc_outb(ahc, QINPOS, 0);
4766	ahc_outb(ahc, QOUTPOS, 0);
4767
4768	/*
4769	 * Use the built in queue management registers
4770	 * if they are available.
4771	 */
4772	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4773		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4774		ahc_outb(ahc, SDSCB_QOFF, 0);
4775		ahc_outb(ahc, SNSCB_QOFF, 0);
4776		ahc_outb(ahc, HNSCB_QOFF, 0);
4777	}
4778
4779
4780	/* We don't have any waiting selections */
4781	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4782
4783	/* Our disconnection list is empty too */
4784	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4785
4786	/* Message out buffer starts empty */
4787	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4788
4789	/*
4790	 * Setup the allowed SCSI Sequences based on operational mode.
4791	 * If we are a target, we'll enalbe select in operations once
4792	 * we've had a lun enabled.
4793	 */
4794	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4795	if ((ahc->flags & AHC_INITIATORROLE) != 0)
4796		scsiseq_template |= ENRSELI;
4797	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4798
4799	/*
4800	 * Load the Sequencer program and Enable the adapter
4801	 * in "fast" mode.
4802	 */
4803	if (bootverbose)
4804		printf("%s: Downloading Sequencer Program...",
4805		       ahc_name(ahc));
4806
4807	ahc_loadseq(ahc);
4808
4809	if ((ahc->features & AHC_ULTRA2) != 0) {
4810		int wait;
4811
4812		/*
4813		 * Wait for up to 500ms for our transceivers
4814		 * to settle.  If the adapter does not have
4815		 * a cable attached, the tranceivers may
4816		 * never settle, so don't complain if we
4817		 * fail here.
4818		 */
4819		ahc_pause(ahc);
4820		for (wait = 5000;
4821		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4822		     wait--)
4823			ahc_delay(100);
4824		ahc_unpause(ahc);
4825	}
4826	return (0);
4827}
4828
4829void
4830ahc_intr_enable(struct ahc_softc *ahc, int enable)
4831{
4832	u_int hcntrl;
4833
4834	hcntrl = ahc_inb(ahc, HCNTRL);
4835	hcntrl &= ~INTEN;
4836	ahc->pause &= ~INTEN;
4837	ahc->unpause &= ~INTEN;
4838	if (enable) {
4839		hcntrl |= INTEN;
4840		ahc->pause |= INTEN;
4841		ahc->unpause |= INTEN;
4842	}
4843	ahc_outb(ahc, HCNTRL, hcntrl);
4844}
4845
4846/*
4847 * Ensure that the card is paused in a location
4848 * outside of all critical sections and that all
4849 * pending work is completed prior to returning.
4850 * This routine should only be called from outside
4851 * an interrupt context.
4852 */
4853void
4854ahc_pause_and_flushwork(struct ahc_softc *ahc)
4855{
4856	int intstat;
4857	int maxloops;
4858	int paused;
4859
4860	maxloops = 1000;
4861	ahc->flags |= AHC_ALL_INTERRUPTS;
4862	intstat = 0;
4863	paused = FALSE;
4864	do {
4865		if (paused)
4866			ahc_unpause(ahc);
4867		ahc_intr(ahc);
4868		ahc_pause(ahc);
4869		paused = TRUE;
4870		ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
4871		ahc_clear_critical_section(ahc);
4872		if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0)
4873			break;
4874	} while (--maxloops
4875	      && (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) != 0
4876	       || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO))));
4877	if (maxloops == 0) {
4878		printf("Infinite interrupt loop, INTSTAT = %x",
4879		       ahc_inb(ahc, INTSTAT));
4880	}
4881	ahc_platform_flushwork(ahc);
4882	ahc->flags &= ~AHC_ALL_INTERRUPTS;
4883}
4884
4885int
4886ahc_suspend(struct ahc_softc *ahc)
4887{
4888	uint8_t *ptr;
4889	int	 i;
4890
4891	ahc_pause_and_flushwork(ahc);
4892
4893	if (LIST_FIRST(&ahc->pending_scbs) != NULL)
4894		return (EBUSY);
4895
4896#if AHC_TARGET_MODE
4897	/*
4898	 * XXX What about ATIOs that have not yet been serviced?
4899	 * Perhaps we should just refuse to be suspended if we
4900	 * are acting in a target role.
4901	 */
4902	if (ahc->pending_device != NULL)
4903		return (EBUSY);
4904#endif
4905
4906	/* Save volatile registers */
4907	if ((ahc->features & AHC_TWIN) != 0) {
4908		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4909		ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ);
4910		ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4911		ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4912		ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0);
4913		ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1);
4914		ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER);
4915		ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL);
4916		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4917	}
4918	ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ);
4919	ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
4920	ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1);
4921	ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0);
4922	ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1);
4923	ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER);
4924	ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL);
4925
4926	if ((ahc->chip & AHC_PCI) != 0) {
4927		ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
4928		ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
4929	}
4930
4931	if ((ahc->features & AHC_DT) != 0) {
4932		u_int sfunct;
4933
4934		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
4935		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
4936		ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE);
4937		ahc_outb(ahc, SFUNCT, sfunct);
4938		ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1);
4939	}
4940
4941	if ((ahc->features & AHC_MULTI_FUNC) != 0)
4942		ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR);
4943
4944	if ((ahc->features & AHC_ULTRA2) != 0)
4945		ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
4946
4947	ptr = ahc->suspend_state.scratch_ram;
4948	for (i = 0; i < 64; i++)
4949		*ptr++ = ahc_inb(ahc, SRAM_BASE + i);
4950
4951	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4952		for (i = 0; i < 16; i++)
4953			*ptr++ = ahc_inb(ahc, TARG_OFFSET + i);
4954	}
4955
4956	ptr = ahc->suspend_state.btt;
4957	if ((ahc->flags & AHC_SCB_BTT) != 0) {
4958		for (i = 0;i < AHC_NUM_TARGETS; i++) {
4959			int j;
4960
4961			for (j = 0;j < AHC_NUM_LUNS; j++) {
4962				u_int tcl;
4963
4964				tcl = BUILD_TCL(i << 4, j);
4965				*ptr = ahc_index_busy_tcl(ahc, tcl);
4966			}
4967		}
4968	}
4969	ahc_shutdown(ahc);
4970	return (0);
4971}
4972
4973int
4974ahc_resume(struct ahc_softc *ahc)
4975{
4976	uint8_t *ptr;
4977	int	 i;
4978
4979	ahc_reset(ahc);
4980
4981	ahc_build_free_scb_list(ahc);
4982
4983	/* Restore volatile registers */
4984	if ((ahc->features & AHC_TWIN) != 0) {
4985		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4986		ahc_outb(ahc, SCSIID, ahc->our_id);
4987		ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq);
4988		ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0);
4989		ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1);
4990		ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0);
4991		ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1);
4992		ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer);
4993		ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl);
4994		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4995	}
4996	ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq);
4997	ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0);
4998	ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1);
4999	ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0);
5000	ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1);
5001	ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer);
5002	ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl);
5003	if ((ahc->features & AHC_ULTRA2) != 0)
5004		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
5005	else
5006		ahc_outb(ahc, SCSIID, ahc->our_id);
5007
5008	if ((ahc->chip & AHC_PCI) != 0) {
5009		ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0);
5010		ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus);
5011	}
5012
5013	if ((ahc->features & AHC_DT) != 0) {
5014		u_int sfunct;
5015
5016		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
5017		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
5018		ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode);
5019		ahc_outb(ahc, SFUNCT, sfunct);
5020		ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1);
5021	}
5022
5023	if ((ahc->features & AHC_MULTI_FUNC) != 0)
5024		ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr);
5025
5026	if ((ahc->features & AHC_ULTRA2) != 0)
5027		ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh);
5028
5029	ptr = ahc->suspend_state.scratch_ram;
5030	for (i = 0; i < 64; i++)
5031		ahc_outb(ahc, SRAM_BASE + i, *ptr++);
5032
5033	if ((ahc->features & AHC_MORE_SRAM) != 0) {
5034		for (i = 0; i < 16; i++)
5035			ahc_outb(ahc, TARG_OFFSET + i, *ptr++);
5036	}
5037
5038	ptr = ahc->suspend_state.btt;
5039	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5040		for (i = 0;i < AHC_NUM_TARGETS; i++) {
5041			int j;
5042
5043			for (j = 0;j < AHC_NUM_LUNS; j++) {
5044				u_int tcl;
5045
5046				tcl = BUILD_TCL(i << 4, j);
5047				ahc_busy_tcl(ahc, tcl, *ptr);
5048			}
5049		}
5050	}
5051	return (0);
5052}
5053
5054/************************** Busy Target Table *********************************/
5055/*
5056 * Return the untagged transaction id for a given target/channel lun.
5057 * Optionally, clear the entry.
5058 */
5059u_int
5060ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5061{
5062	u_int scbid;
5063	u_int target_offset;
5064
5065	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5066		u_int saved_scbptr;
5067
5068		saved_scbptr = ahc_inb(ahc, SCBPTR);
5069		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5070		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
5071		ahc_outb(ahc, SCBPTR, saved_scbptr);
5072	} else {
5073		target_offset = TCL_TARGET_OFFSET(tcl);
5074		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
5075	}
5076
5077	return (scbid);
5078}
5079
5080void
5081ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5082{
5083	u_int target_offset;
5084
5085	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5086		u_int saved_scbptr;
5087
5088		saved_scbptr = ahc_inb(ahc, SCBPTR);
5089		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5090		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
5091		ahc_outb(ahc, SCBPTR, saved_scbptr);
5092	} else {
5093		target_offset = TCL_TARGET_OFFSET(tcl);
5094		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
5095	}
5096}
5097
5098void
5099ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5100{
5101	u_int target_offset;
5102
5103	if ((ahc->flags & AHC_SCB_BTT) != 0) {
5104		u_int saved_scbptr;
5105
5106		saved_scbptr = ahc_inb(ahc, SCBPTR);
5107		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5108		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
5109		ahc_outb(ahc, SCBPTR, saved_scbptr);
5110	} else {
5111		target_offset = TCL_TARGET_OFFSET(tcl);
5112		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
5113	}
5114}
5115
5116/************************** SCB and SCB queue management **********************/
5117int
5118ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5119	      char channel, int lun, u_int tag, role_t role)
5120{
5121	int targ = SCB_GET_TARGET(ahc, scb);
5122	char chan = SCB_GET_CHANNEL(ahc, scb);
5123	int slun = SCB_GET_LUN(scb);
5124	int match;
5125
5126	match = ((chan == channel) || (channel == ALL_CHANNELS));
5127	if (match != 0)
5128		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
5129	if (match != 0)
5130		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
5131	if (match != 0) {
5132#if AHC_TARGET_MODE
5133		int group;
5134
5135		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
5136		if (role == ROLE_INITIATOR) {
5137			match = (group != XPT_FC_GROUP_TMODE)
5138			      && ((tag == scb->hscb->tag)
5139			       || (tag == SCB_LIST_NULL));
5140		} else if (role == ROLE_TARGET) {
5141			match = (group == XPT_FC_GROUP_TMODE)
5142			      && ((tag == scb->io_ctx->csio.tag_id)
5143			       || (tag == SCB_LIST_NULL));
5144		}
5145#else /* !AHC_TARGET_MODE */
5146		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
5147#endif /* AHC_TARGET_MODE */
5148	}
5149
5150	return match;
5151}
5152
5153void
5154ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5155{
5156	int	target;
5157	char	channel;
5158	int	lun;
5159
5160	target = SCB_GET_TARGET(ahc, scb);
5161	lun = SCB_GET_LUN(scb);
5162	channel = SCB_GET_CHANNEL(ahc, scb);
5163
5164	ahc_search_qinfifo(ahc, target, channel, lun,
5165			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5166			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5167
5168	ahc_platform_freeze_devq(ahc, scb);
5169}
5170
5171void
5172ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
5173{
5174	struct scb *prev_scb;
5175
5176	prev_scb = NULL;
5177	if (ahc_qinfifo_count(ahc) != 0) {
5178		u_int prev_tag;
5179		uint8_t prev_pos;
5180
5181		prev_pos = ahc->qinfifonext - 1;
5182		prev_tag = ahc->qinfifo[prev_pos];
5183		prev_scb = ahc_lookup_scb(ahc, prev_tag);
5184	}
5185	ahc_qinfifo_requeue(ahc, prev_scb, scb);
5186	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5187		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5188	} else {
5189		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5190	}
5191}
5192
5193static void
5194ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
5195		    struct scb *scb)
5196{
5197	if (prev_scb == NULL) {
5198		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5199	} else {
5200		prev_scb->hscb->next = scb->hscb->tag;
5201		ahc_sync_scb(ahc, prev_scb,
5202			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5203	}
5204	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
5205	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5206	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5207}
5208
5209static int
5210ahc_qinfifo_count(struct ahc_softc *ahc)
5211{
5212	uint8_t qinpos;
5213	uint8_t diff;
5214
5215	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5216		qinpos = ahc_inb(ahc, SNSCB_QOFF);
5217		ahc_outb(ahc, SNSCB_QOFF, qinpos);
5218	} else
5219		qinpos = ahc_inb(ahc, QINPOS);
5220	diff = ahc->qinfifonext - qinpos;
5221	return (diff);
5222}
5223
5224int
5225ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5226		   int lun, u_int tag, role_t role, uint32_t status,
5227		   ahc_search_action action)
5228{
5229	struct	scb *scb;
5230	struct	scb *prev_scb;
5231	uint8_t qinstart;
5232	uint8_t qinpos;
5233	uint8_t qintail;
5234	uint8_t next;
5235	uint8_t prev;
5236	uint8_t curscbptr;
5237	int	found;
5238	int	have_qregs;
5239
5240	qintail = ahc->qinfifonext;
5241	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5242	if (have_qregs) {
5243		qinstart = ahc_inb(ahc, SNSCB_QOFF);
5244		ahc_outb(ahc, SNSCB_QOFF, qinstart);
5245	} else
5246		qinstart = ahc_inb(ahc, QINPOS);
5247	qinpos = qinstart;
5248	found = 0;
5249	prev_scb = NULL;
5250
5251	if (action == SEARCH_COMPLETE) {
5252		/*
5253		 * Don't attempt to run any queued untagged transactions
5254		 * until we are done with the abort process.
5255		 */
5256		ahc_freeze_untagged_queues(ahc);
5257	}
5258
5259	/*
5260	 * Start with an empty queue.  Entries that are not chosen
5261	 * for removal will be re-added to the queue as we go.
5262	 */
5263	ahc->qinfifonext = qinpos;
5264	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5265
5266	while (qinpos != qintail) {
5267		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5268		if (scb == NULL) {
5269			printf("qinpos = %d, SCB index = %d\n",
5270				qinpos, ahc->qinfifo[qinpos]);
5271			panic("Loop 1\n");
5272		}
5273
5274		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5275			/*
5276			 * We found an scb that needs to be acted on.
5277			 */
5278			found++;
5279			switch (action) {
5280			case SEARCH_COMPLETE:
5281			{
5282				cam_status ostat;
5283				cam_status cstat;
5284
5285				ostat = ahc_get_transaction_status(scb);
5286				if (ostat == CAM_REQ_INPROG)
5287					ahc_set_transaction_status(scb, status);
5288				cstat = ahc_get_transaction_status(scb);
5289				if (cstat != CAM_REQ_CMP)
5290					ahc_freeze_scb(scb);
5291				if ((scb->flags & SCB_ACTIVE) == 0)
5292					printf("Inactive SCB in qinfifo\n");
5293				ahc_done(ahc, scb);
5294
5295				/* FALLTHROUGH */
5296			}
5297			case SEARCH_REMOVE:
5298				break;
5299			case SEARCH_COUNT:
5300				ahc_qinfifo_requeue(ahc, prev_scb, scb);
5301				prev_scb = scb;
5302				break;
5303			}
5304		} else {
5305			ahc_qinfifo_requeue(ahc, prev_scb, scb);
5306			prev_scb = scb;
5307		}
5308		qinpos++;
5309	}
5310
5311	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5312		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5313	} else {
5314		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5315	}
5316
5317	if (action != SEARCH_COUNT
5318	 && (found != 0)
5319	 && (qinstart != ahc->qinfifonext)) {
5320		/*
5321		 * The sequencer may be in the process of dmaing
5322		 * down the SCB at the beginning of the queue.
5323		 * This could be problematic if either the first,
5324		 * or the second SCB is removed from the queue
5325		 * (the first SCB includes a pointer to the "next"
5326		 * SCB to dma). If we have removed any entries, swap
5327		 * the first element in the queue with the next HSCB
5328		 * so the sequencer will notice that NEXT_QUEUED_SCB
5329		 * has changed during its dma attempt and will retry
5330		 * the DMA.
5331		 */
5332		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5333
5334		if (scb == NULL) {
5335			printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5336				found, qinstart, ahc->qinfifonext);
5337			panic("First/Second Qinfifo fixup\n");
5338		}
5339		/*
5340		 * ahc_swap_with_next_hscb forces our next pointer to
5341		 * point to the reserved SCB for future commands.  Save
5342		 * and restore our original next pointer to maintain
5343		 * queue integrity.
5344		 */
5345		next = scb->hscb->next;
5346		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5347		ahc_swap_with_next_hscb(ahc, scb);
5348		scb->hscb->next = next;
5349		ahc->qinfifo[qinstart] = scb->hscb->tag;
5350
5351		/* Tell the card about the new head of the qinfifo. */
5352		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5353
5354		/* Fixup the tail "next" pointer. */
5355		qintail = ahc->qinfifonext - 1;
5356		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5357		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5358	}
5359
5360	/*
5361	 * Search waiting for selection list.
5362	 */
5363	curscbptr = ahc_inb(ahc, SCBPTR);
5364	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
5365	prev = SCB_LIST_NULL;
5366
5367	while (next != SCB_LIST_NULL) {
5368		uint8_t scb_index;
5369
5370		ahc_outb(ahc, SCBPTR, next);
5371		scb_index = ahc_inb(ahc, SCB_TAG);
5372		if (scb_index >= ahc->scb_data->numscbs) {
5373			printf("Waiting List inconsistency. "
5374			       "SCB index == %d, yet numscbs == %d.",
5375			       scb_index, ahc->scb_data->numscbs);
5376			ahc_dump_card_state(ahc);
5377			panic("for safety");
5378		}
5379		scb = ahc_lookup_scb(ahc, scb_index);
5380		if (scb == NULL) {
5381			printf("scb_index = %d, next = %d\n",
5382				scb_index, next);
5383			panic("Waiting List traversal\n");
5384		}
5385		if (ahc_match_scb(ahc, scb, target, channel,
5386				  lun, SCB_LIST_NULL, role)) {
5387			/*
5388			 * We found an scb that needs to be acted on.
5389			 */
5390			found++;
5391			switch (action) {
5392			case SEARCH_COMPLETE:
5393			{
5394				cam_status ostat;
5395				cam_status cstat;
5396
5397				ostat = ahc_get_transaction_status(scb);
5398				if (ostat == CAM_REQ_INPROG)
5399					ahc_set_transaction_status(scb,
5400								   status);
5401				cstat = ahc_get_transaction_status(scb);
5402				if (cstat != CAM_REQ_CMP)
5403					ahc_freeze_scb(scb);
5404				if ((scb->flags & SCB_ACTIVE) == 0)
5405					printf("Inactive SCB in Waiting List\n");
5406				ahc_done(ahc, scb);
5407				/* FALLTHROUGH */
5408			}
5409			case SEARCH_REMOVE:
5410				next = ahc_rem_wscb(ahc, next, prev);
5411				break;
5412			case SEARCH_COUNT:
5413				prev = next;
5414				next = ahc_inb(ahc, SCB_NEXT);
5415				break;
5416			}
5417		} else {
5418
5419			prev = next;
5420			next = ahc_inb(ahc, SCB_NEXT);
5421		}
5422	}
5423	ahc_outb(ahc, SCBPTR, curscbptr);
5424
5425	found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
5426					    channel, lun, status, action);
5427
5428	if (action == SEARCH_COMPLETE)
5429		ahc_release_untagged_queues(ahc);
5430	return (found);
5431}
5432
5433int
5434ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
5435			   int target, char channel, int lun, uint32_t status,
5436			   ahc_search_action action)
5437{
5438	struct	scb *scb;
5439	int	maxtarget;
5440	int	found;
5441	int	i;
5442
5443	if (action == SEARCH_COMPLETE) {
5444		/*
5445		 * Don't attempt to run any queued untagged transactions
5446		 * until we are done with the abort process.
5447		 */
5448		ahc_freeze_untagged_queues(ahc);
5449	}
5450
5451	found = 0;
5452	i = 0;
5453	if ((ahc->flags & AHC_SCB_BTT) == 0) {
5454
5455		maxtarget = 16;
5456		if (target != CAM_TARGET_WILDCARD) {
5457
5458			i = target;
5459			if (channel == 'B')
5460				i += 8;
5461			maxtarget = i + 1;
5462		}
5463	} else {
5464		maxtarget = 0;
5465	}
5466
5467	for (; i < maxtarget; i++) {
5468		struct scb_tailq *untagged_q;
5469		struct scb *next_scb;
5470
5471		untagged_q = &(ahc->untagged_queues[i]);
5472		next_scb = TAILQ_FIRST(untagged_q);
5473		while (next_scb != NULL) {
5474
5475			scb = next_scb;
5476			next_scb = TAILQ_NEXT(scb, links.tqe);
5477
5478			/*
5479			 * The head of the list may be the currently
5480			 * active untagged command for a device.
5481			 * We're only searching for commands that
5482			 * have not been started.  A transaction
5483			 * marked active but still in the qinfifo
5484			 * is removed by the qinfifo scanning code
5485			 * above.
5486			 */
5487			if ((scb->flags & SCB_ACTIVE) != 0)
5488				continue;
5489
5490			if (ahc_match_scb(ahc, scb, target, channel, lun,
5491					  SCB_LIST_NULL, ROLE_INITIATOR) == 0
5492			 || (ctx != NULL && ctx != scb->io_ctx))
5493				continue;
5494
5495			/*
5496			 * We found an scb that needs to be acted on.
5497			 */
5498			found++;
5499			switch (action) {
5500			case SEARCH_COMPLETE:
5501			{
5502				cam_status ostat;
5503				cam_status cstat;
5504
5505				ostat = ahc_get_transaction_status(scb);
5506				if (ostat == CAM_REQ_INPROG)
5507					ahc_set_transaction_status(scb, status);
5508				cstat = ahc_get_transaction_status(scb);
5509				if (cstat != CAM_REQ_CMP)
5510					ahc_freeze_scb(scb);
5511				if ((scb->flags & SCB_ACTIVE) == 0)
5512					printf("Inactive SCB in untaggedQ\n");
5513				ahc_done(ahc, scb);
5514				break;
5515			}
5516			case SEARCH_REMOVE:
5517				scb->flags &= ~SCB_UNTAGGEDQ;
5518				TAILQ_REMOVE(untagged_q, scb, links.tqe);
5519				break;
5520			case SEARCH_COUNT:
5521				break;
5522			}
5523		}
5524	}
5525
5526	if (action == SEARCH_COMPLETE)
5527		ahc_release_untagged_queues(ahc);
5528	return (found);
5529}
5530
5531int
5532ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5533		     int lun, u_int tag, int stop_on_first, int remove,
5534		     int save_state)
5535{
5536	struct	scb *scbp;
5537	u_int	next;
5538	u_int	prev;
5539	u_int	count;
5540	u_int	active_scb;
5541
5542	count = 0;
5543	next = ahc_inb(ahc, DISCONNECTED_SCBH);
5544	prev = SCB_LIST_NULL;
5545
5546	if (save_state) {
5547		/* restore this when we're done */
5548		active_scb = ahc_inb(ahc, SCBPTR);
5549	} else
5550		/* Silence compiler */
5551		active_scb = SCB_LIST_NULL;
5552
5553	while (next != SCB_LIST_NULL) {
5554		u_int scb_index;
5555
5556		ahc_outb(ahc, SCBPTR, next);
5557		scb_index = ahc_inb(ahc, SCB_TAG);
5558		if (scb_index >= ahc->scb_data->numscbs) {
5559			printf("Disconnected List inconsistency. "
5560			       "SCB index == %d, yet numscbs == %d.",
5561			       scb_index, ahc->scb_data->numscbs);
5562			ahc_dump_card_state(ahc);
5563			panic("for safety");
5564		}
5565
5566		if (next == prev) {
5567			panic("Disconnected List Loop. "
5568			      "cur SCBPTR == %x, prev SCBPTR == %x.",
5569			      next, prev);
5570		}
5571		scbp = ahc_lookup_scb(ahc, scb_index);
5572		if (ahc_match_scb(ahc, scbp, target, channel, lun,
5573				  tag, ROLE_INITIATOR)) {
5574			count++;
5575			if (remove) {
5576				next =
5577				    ahc_rem_scb_from_disc_list(ahc, prev, next);
5578			} else {
5579				prev = next;
5580				next = ahc_inb(ahc, SCB_NEXT);
5581			}
5582			if (stop_on_first)
5583				break;
5584		} else {
5585			prev = next;
5586			next = ahc_inb(ahc, SCB_NEXT);
5587		}
5588	}
5589	if (save_state)
5590		ahc_outb(ahc, SCBPTR, active_scb);
5591	return (count);
5592}
5593
5594/*
5595 * Remove an SCB from the on chip list of disconnected transactions.
5596 * This is empty/unused if we are not performing SCB paging.
5597 */
5598static u_int
5599ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5600{
5601	u_int next;
5602
5603	ahc_outb(ahc, SCBPTR, scbptr);
5604	next = ahc_inb(ahc, SCB_NEXT);
5605
5606	ahc_outb(ahc, SCB_CONTROL, 0);
5607
5608	ahc_add_curscb_to_free_list(ahc);
5609
5610	if (prev != SCB_LIST_NULL) {
5611		ahc_outb(ahc, SCBPTR, prev);
5612		ahc_outb(ahc, SCB_NEXT, next);
5613	} else
5614		ahc_outb(ahc, DISCONNECTED_SCBH, next);
5615
5616	return (next);
5617}
5618
5619/*
5620 * Add the SCB as selected by SCBPTR onto the on chip list of
5621 * free hardware SCBs.  This list is empty/unused if we are not
5622 * performing SCB paging.
5623 */
5624static void
5625ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5626{
5627	/*
5628	 * Invalidate the tag so that our abort
5629	 * routines don't think it's active.
5630	 */
5631	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5632
5633	if ((ahc->flags & AHC_PAGESCBS) != 0) {
5634		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5635		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5636	}
5637}
5638
5639/*
5640 * Manipulate the waiting for selection list and return the
5641 * scb that follows the one that we remove.
5642 */
5643static u_int
5644ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5645{
5646	u_int curscb, next;
5647
5648	/*
5649	 * Select the SCB we want to abort and
5650	 * pull the next pointer out of it.
5651	 */
5652	curscb = ahc_inb(ahc, SCBPTR);
5653	ahc_outb(ahc, SCBPTR, scbpos);
5654	next = ahc_inb(ahc, SCB_NEXT);
5655
5656	/* Clear the necessary fields */
5657	ahc_outb(ahc, SCB_CONTROL, 0);
5658
5659	ahc_add_curscb_to_free_list(ahc);
5660
5661	/* update the waiting list */
5662	if (prev == SCB_LIST_NULL) {
5663		/* First in the list */
5664		ahc_outb(ahc, WAITING_SCBH, next);
5665
5666		/*
5667		 * Ensure we aren't attempting to perform
5668		 * selection for this entry.
5669		 */
5670		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5671	} else {
5672		/*
5673		 * Select the scb that pointed to us
5674		 * and update its next pointer.
5675		 */
5676		ahc_outb(ahc, SCBPTR, prev);
5677		ahc_outb(ahc, SCB_NEXT, next);
5678	}
5679
5680	/*
5681	 * Point us back at the original scb position.
5682	 */
5683	ahc_outb(ahc, SCBPTR, curscb);
5684	return next;
5685}
5686
5687/******************************** Error Handling ******************************/
5688/*
5689 * Abort all SCBs that match the given description (target/channel/lun/tag),
5690 * setting their status to the passed in status if the status has not already
5691 * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
5692 * is paused before it is called.
5693 */
5694int
5695ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5696	       int lun, u_int tag, role_t role, uint32_t status)
5697{
5698	struct	scb *scbp;
5699	struct	scb *scbp_next;
5700	u_int	active_scb;
5701	int	i, j;
5702	int	maxtarget;
5703	int	minlun;
5704	int	maxlun;
5705
5706	int	found;
5707
5708	/*
5709	 * Don't attempt to run any queued untagged transactions
5710	 * until we are done with the abort process.
5711	 */
5712	ahc_freeze_untagged_queues(ahc);
5713
5714	/* restore this when we're done */
5715	active_scb = ahc_inb(ahc, SCBPTR);
5716
5717	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5718				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5719
5720	/*
5721	 * Clean out the busy target table for any untagged commands.
5722	 */
5723	i = 0;
5724	maxtarget = 16;
5725	if (target != CAM_TARGET_WILDCARD) {
5726		i = target;
5727		if (channel == 'B')
5728			i += 8;
5729		maxtarget = i + 1;
5730	}
5731
5732	if (lun == CAM_LUN_WILDCARD) {
5733
5734		/*
5735		 * Unless we are using an SCB based
5736		 * busy targets table, there is only
5737		 * one table entry for all luns of
5738		 * a target.
5739		 */
5740		minlun = 0;
5741		maxlun = 1;
5742		if ((ahc->flags & AHC_SCB_BTT) != 0)
5743			maxlun = AHC_NUM_LUNS;
5744	} else {
5745		minlun = lun;
5746		maxlun = lun + 1;
5747	}
5748
5749	if (role != ROLE_TARGET) {
5750		for (;i < maxtarget; i++) {
5751			for (j = minlun;j < maxlun; j++) {
5752				u_int scbid;
5753				u_int tcl;
5754
5755				tcl = BUILD_TCL(i << 4, j);
5756				scbid = ahc_index_busy_tcl(ahc, tcl);
5757				scbp = ahc_lookup_scb(ahc, scbid);
5758				if (scbp == NULL
5759				 || ahc_match_scb(ahc, scbp, target, channel,
5760						  lun, tag, role) == 0)
5761					continue;
5762				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5763			}
5764		}
5765
5766		/*
5767		 * Go through the disconnected list and remove any entries we
5768		 * have queued for completion, 0'ing their control byte too.
5769		 * We save the active SCB and restore it ourselves, so there
5770		 * is no reason for this search to restore it too.
5771		 */
5772		ahc_search_disc_list(ahc, target, channel, lun, tag,
5773				     /*stop_on_first*/FALSE, /*remove*/TRUE,
5774				     /*save_state*/FALSE);
5775	}
5776
5777	/*
5778	 * Go through the hardware SCB array looking for commands that
5779	 * were active but not on any list.  In some cases, these remnants
5780	 * might not still have mappings in the scbindex array (e.g. unexpected
5781	 * bus free with the same scb queued for an abort).  Don't hold this
5782	 * against them.
5783	 */
5784	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5785		u_int scbid;
5786
5787		ahc_outb(ahc, SCBPTR, i);
5788		scbid = ahc_inb(ahc, SCB_TAG);
5789		scbp = ahc_lookup_scb(ahc, scbid);
5790		if ((scbp == NULL && scbid != SCB_LIST_NULL)
5791		 || (scbp != NULL
5792		  && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
5793			ahc_add_curscb_to_free_list(ahc);
5794	}
5795
5796	/*
5797	 * Go through the pending CCB list and look for
5798	 * commands for this target that are still active.
5799	 * These are other tagged commands that were
5800	 * disconnected when the reset occurred.
5801	 */
5802	scbp_next = LIST_FIRST(&ahc->pending_scbs);
5803	while (scbp_next != NULL) {
5804		scbp = scbp_next;
5805		scbp_next = LIST_NEXT(scbp, pending_links);
5806		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5807			cam_status ostat;
5808
5809			ostat = ahc_get_transaction_status(scbp);
5810			if (ostat == CAM_REQ_INPROG)
5811				ahc_set_transaction_status(scbp, status);
5812			if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
5813				ahc_freeze_scb(scbp);
5814			if ((scbp->flags & SCB_ACTIVE) == 0)
5815				printf("Inactive SCB on pending list\n");
5816			ahc_done(ahc, scbp);
5817			found++;
5818		}
5819	}
5820	ahc_outb(ahc, SCBPTR, active_scb);
5821	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5822	ahc_release_untagged_queues(ahc);
5823	return found;
5824}
5825
5826static void
5827ahc_reset_current_bus(struct ahc_softc *ahc)
5828{
5829	uint8_t scsiseq;
5830
5831	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5832	scsiseq = ahc_inb(ahc, SCSISEQ);
5833	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5834	ahc_flush_device_writes(ahc);
5835	ahc_delay(AHC_BUSRESET_DELAY);
5836	/* Turn off the bus reset */
5837	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5838
5839	ahc_clear_intstat(ahc);
5840
5841	/* Re-enable reset interrupts */
5842	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5843}
5844
5845int
5846ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5847{
5848	struct	ahc_devinfo devinfo;
5849	u_int	initiator, target, max_scsiid;
5850	u_int	sblkctl;
5851	u_int	scsiseq;
5852	u_int	simode1;
5853	int	found;
5854	int	restart_needed;
5855	char	cur_channel;
5856
5857	ahc->pending_device = NULL;
5858
5859	ahc_compile_devinfo(&devinfo,
5860			    CAM_TARGET_WILDCARD,
5861			    CAM_TARGET_WILDCARD,
5862			    CAM_LUN_WILDCARD,
5863			    channel, ROLE_UNKNOWN);
5864	ahc_pause(ahc);
5865
5866	/* Make sure the sequencer is in a safe location. */
5867	ahc_clear_critical_section(ahc);
5868
5869	/*
5870	 * Run our command complete fifos to ensure that we perform
5871	 * completion processing on any commands that 'completed'
5872	 * before the reset occurred.
5873	 */
5874	ahc_run_qoutfifo(ahc);
5875#if AHC_TARGET_MODE
5876	/*
5877	 * XXX - In Twin mode, the tqinfifo may have commands
5878	 *	 for an unaffected channel in it.  However, if
5879	 *	 we have run out of ATIO resources to drain that
5880	 *	 queue, we may not get them all out here.  Further,
5881	 *	 the blocked transactions for the reset channel
5882	 *	 should just be killed off, irrespecitve of whether
5883	 *	 we are blocked on ATIO resources.  Write a routine
5884	 *	 to compact the tqinfifo appropriately.
5885	 */
5886	if ((ahc->flags & AHC_TARGETROLE) != 0) {
5887		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5888	}
5889#endif
5890
5891	/*
5892	 * Reset the bus if we are initiating this reset
5893	 */
5894	sblkctl = ahc_inb(ahc, SBLKCTL);
5895	cur_channel = 'A';
5896	if ((ahc->features & AHC_TWIN) != 0
5897	 && ((sblkctl & SELBUSB) != 0))
5898	    cur_channel = 'B';
5899	scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
5900	if (cur_channel != channel) {
5901		/* Case 1: Command for another bus is active
5902		 * Stealthily reset the other bus without
5903		 * upsetting the current bus.
5904		 */
5905		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5906		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
5907#if AHC_TARGET_MODE
5908		/*
5909		 * Bus resets clear ENSELI, so we cannot
5910		 * defer re-enabling bus reset interrupts
5911		 * if we are in target mode.
5912		 */
5913		if ((ahc->flags & AHC_TARGETROLE) != 0)
5914			simode1 |= ENSCSIRST;
5915#endif
5916		ahc_outb(ahc, SIMODE1, simode1);
5917		if (initiate_reset)
5918			ahc_reset_current_bus(ahc);
5919		ahc_clear_intstat(ahc);
5920		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
5921		ahc_outb(ahc, SBLKCTL, sblkctl);
5922		restart_needed = FALSE;
5923	} else {
5924		/* Case 2: A command from this bus is active or we're idle */
5925		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
5926#if AHC_TARGET_MODE
5927		/*
5928		 * Bus resets clear ENSELI, so we cannot
5929		 * defer re-enabling bus reset interrupts
5930		 * if we are in target mode.
5931		 */
5932		if ((ahc->flags & AHC_TARGETROLE) != 0)
5933			simode1 |= ENSCSIRST;
5934#endif
5935		ahc_outb(ahc, SIMODE1, simode1);
5936		if (initiate_reset)
5937			ahc_reset_current_bus(ahc);
5938		ahc_clear_intstat(ahc);
5939		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
5940		restart_needed = TRUE;
5941	}
5942
5943	/*
5944	 * Clean up all the state information for the
5945	 * pending transactions on this bus.
5946	 */
5947	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
5948			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
5949			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
5950
5951	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
5952
5953#ifdef AHC_TARGET_MODE
5954	/*
5955	 * Send an immediate notify ccb to all target more peripheral
5956	 * drivers affected by this action.
5957	 */
5958	for (target = 0; target <= max_scsiid; target++) {
5959		struct ahc_tmode_tstate* tstate;
5960		u_int lun;
5961
5962		tstate = ahc->enabled_targets[target];
5963		if (tstate == NULL)
5964			continue;
5965		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
5966			struct ahc_tmode_lstate* lstate;
5967
5968			lstate = tstate->enabled_luns[lun];
5969			if (lstate == NULL)
5970				continue;
5971
5972			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
5973					       EVENT_TYPE_BUS_RESET, /*arg*/0);
5974			ahc_send_lstate_events(ahc, lstate);
5975		}
5976	}
5977#endif
5978	/* Notify the XPT that a bus reset occurred */
5979	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
5980		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
5981
5982	/*
5983	 * Revert to async/narrow transfers until we renegotiate.
5984	 */
5985	for (target = 0; target <= max_scsiid; target++) {
5986
5987		if (ahc->enabled_targets[target] == NULL)
5988			continue;
5989		for (initiator = 0; initiator <= max_scsiid; initiator++) {
5990			struct ahc_devinfo devinfo;
5991
5992			ahc_compile_devinfo(&devinfo, target, initiator,
5993					    CAM_LUN_WILDCARD,
5994					    channel, ROLE_UNKNOWN);
5995			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5996				      AHC_TRANS_CUR, /*paused*/TRUE);
5997			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
5998					 /*period*/0, /*offset*/0,
5999					 /*ppr_options*/0, AHC_TRANS_CUR,
6000					 /*paused*/TRUE);
6001		}
6002	}
6003
6004	if (restart_needed)
6005		ahc_restart(ahc);
6006	else
6007		ahc_unpause(ahc);
6008	return found;
6009}
6010
6011
6012/***************************** Residual Processing ****************************/
6013/*
6014 * Calculate the residual for a just completed SCB.
6015 */
6016void
6017ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6018{
6019	struct hardware_scb *hscb;
6020	struct status_pkt *spkt;
6021	uint32_t sgptr;
6022	uint32_t resid_sgptr;
6023	uint32_t resid;
6024
6025	/*
6026	 * 5 cases.
6027	 * 1) No residual.
6028	 *    SG_RESID_VALID clear in sgptr.
6029	 * 2) Transferless command
6030	 * 3) Never performed any transfers.
6031	 *    sgptr has SG_FULL_RESID set.
6032	 * 4) No residual but target did not
6033	 *    save data pointers after the
6034	 *    last transfer, so sgptr was
6035	 *    never updated.
6036	 * 5) We have a partial residual.
6037	 *    Use residual_sgptr to determine
6038	 *    where we are.
6039	 */
6040
6041	hscb = scb->hscb;
6042	sgptr = ahc_le32toh(hscb->sgptr);
6043	if ((sgptr & SG_RESID_VALID) == 0)
6044		/* Case 1 */
6045		return;
6046	sgptr &= ~SG_RESID_VALID;
6047
6048	if ((sgptr & SG_LIST_NULL) != 0)
6049		/* Case 2 */
6050		return;
6051
6052	spkt = &hscb->shared_data.status;
6053	resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
6054	if ((sgptr & SG_FULL_RESID) != 0) {
6055		/* Case 3 */
6056		resid = ahc_get_transfer_length(scb);
6057	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
6058		/* Case 4 */
6059		return;
6060	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
6061		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
6062	} else {
6063		struct ahc_dma_seg *sg;
6064
6065		/*
6066		 * Remainder of the SG where the transfer
6067		 * stopped.
6068		 */
6069		resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
6070		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
6071
6072		/* The residual sg_ptr always points to the next sg */
6073		sg--;
6074
6075		/*
6076		 * Add up the contents of all residual
6077		 * SG segments that are after the SG where
6078		 * the transfer stopped.
6079		 */
6080		while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
6081			sg++;
6082			resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
6083		}
6084	}
6085	if ((scb->flags & SCB_SENSE) == 0)
6086		ahc_set_residual(scb, resid);
6087	else
6088		ahc_set_sense_residual(scb, resid);
6089
6090#ifdef AHC_DEBUG
6091	if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6092		ahc_print_path(ahc, scb);
6093		printf("Handled Residual of %d bytes\n", resid);
6094	}
6095#endif
6096}
6097
6098/******************************* Target Mode **********************************/
6099#ifdef AHC_TARGET_MODE
6100/*
6101 * Add a target mode event to this lun's queue
6102 */
6103static void
6104ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6105		       u_int initiator_id, u_int event_type, u_int event_arg)
6106{
6107	struct ahc_tmode_event *event;
6108	int pending;
6109
6110	xpt_freeze_devq(lstate->path, /*count*/1);
6111	if (lstate->event_w_idx >= lstate->event_r_idx)
6112		pending = lstate->event_w_idx - lstate->event_r_idx;
6113	else
6114		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
6115			- (lstate->event_r_idx - lstate->event_w_idx);
6116
6117	if (event_type == EVENT_TYPE_BUS_RESET
6118	 || event_type == MSG_BUS_DEV_RESET) {
6119		/*
6120		 * Any earlier events are irrelevant, so reset our buffer.
6121		 * This has the effect of allowing us to deal with reset
6122		 * floods (an external device holding down the reset line)
6123		 * without losing the event that is really interesting.
6124		 */
6125		lstate->event_r_idx = 0;
6126		lstate->event_w_idx = 0;
6127		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
6128	}
6129
6130	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6131		xpt_print_path(lstate->path);
6132		printf("immediate event %x:%x lost\n",
6133		       lstate->event_buffer[lstate->event_r_idx].event_type,
6134		       lstate->event_buffer[lstate->event_r_idx].event_arg);
6135		lstate->event_r_idx++;
6136		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6137			lstate->event_r_idx = 0;
6138		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
6139	}
6140
6141	event = &lstate->event_buffer[lstate->event_w_idx];
6142	event->initiator_id = initiator_id;
6143	event->event_type = event_type;
6144	event->event_arg = event_arg;
6145	lstate->event_w_idx++;
6146	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6147		lstate->event_w_idx = 0;
6148}
6149
6150/*
6151 * Send any target mode events queued up waiting
6152 * for immediate notify resources.
6153 */
6154void
6155ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
6156{
6157	struct ccb_hdr *ccbh;
6158	struct ccb_immed_notify *inot;
6159
6160	while (lstate->event_r_idx != lstate->event_w_idx
6161	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
6162		struct ahc_tmode_event *event;
6163
6164		event = &lstate->event_buffer[lstate->event_r_idx];
6165		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
6166		inot = (struct ccb_immed_notify *)ccbh;
6167		switch (event->event_type) {
6168		case EVENT_TYPE_BUS_RESET:
6169			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
6170			break;
6171		default:
6172			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
6173			inot->message_args[0] = event->event_type;
6174			inot->message_args[1] = event->event_arg;
6175			break;
6176		}
6177		inot->initiator_id = event->initiator_id;
6178		inot->sense_len = 0;
6179		xpt_done((union ccb *)inot);
6180		lstate->event_r_idx++;
6181		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6182			lstate->event_r_idx = 0;
6183	}
6184}
6185#endif
6186
6187/******************** Sequencer Program Patching/Download *********************/
6188
6189#ifdef AHC_DUMP_SEQ
6190void
6191ahc_dumpseq(struct ahc_softc* ahc)
6192{
6193	int i;
6194	int max_prog;
6195
6196	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
6197		max_prog = 448;
6198	else if ((ahc->features & AHC_ULTRA2) != 0)
6199		max_prog = 768;
6200	else
6201		max_prog = 512;
6202
6203	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6204	ahc_outb(ahc, SEQADDR0, 0);
6205	ahc_outb(ahc, SEQADDR1, 0);
6206	for (i = 0; i < max_prog; i++) {
6207		uint8_t ins_bytes[4];
6208
6209		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6210		printf("0x%08x\n", ins_bytes[0] << 24
6211				 | ins_bytes[1] << 16
6212				 | ins_bytes[2] << 8
6213				 | ins_bytes[3]);
6214	}
6215}
6216#endif
6217
6218static void
6219ahc_loadseq(struct ahc_softc *ahc)
6220{
6221	struct	cs cs_table[num_critical_sections];
6222	u_int	begin_set[num_critical_sections];
6223	u_int	end_set[num_critical_sections];
6224	struct	patch *cur_patch;
6225	u_int	cs_count;
6226	u_int	cur_cs;
6227	u_int	i;
6228	int	downloaded;
6229	u_int	skip_addr;
6230	u_int	sg_prefetch_cnt;
6231	uint8_t	download_consts[7];
6232
6233	/*
6234	 * Start out with 0 critical sections
6235	 * that apply to this firmware load.
6236	 */
6237	cs_count = 0;
6238	cur_cs = 0;
6239	memset(begin_set, 0, sizeof(begin_set));
6240	memset(end_set, 0, sizeof(end_set));
6241
6242	/* Setup downloadable constant table */
6243	download_consts[QOUTFIFO_OFFSET] = 0;
6244	if (ahc->targetcmds != NULL)
6245		download_consts[QOUTFIFO_OFFSET] += 32;
6246	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
6247	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
6248	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
6249	sg_prefetch_cnt = ahc->pci_cachesize;
6250	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
6251		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
6252	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
6253	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
6254	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
6255
6256	cur_patch = patches;
6257	downloaded = 0;
6258	skip_addr = 0;
6259	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6260	ahc_outb(ahc, SEQADDR0, 0);
6261	ahc_outb(ahc, SEQADDR1, 0);
6262
6263	for (i = 0; i < sizeof(seqprog)/4; i++) {
6264		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
6265			/*
6266			 * Don't download this instruction as it
6267			 * is in a patch that was removed.
6268			 */
6269			continue;
6270		}
6271		/*
6272		 * Move through the CS table until we find a CS
6273		 * that might apply to this instruction.
6274		 */
6275		for (; cur_cs < num_critical_sections; cur_cs++) {
6276			if (critical_sections[cur_cs].end <= i) {
6277				if (begin_set[cs_count] == TRUE
6278				 && end_set[cs_count] == FALSE) {
6279					cs_table[cs_count].end = downloaded;
6280				 	end_set[cs_count] = TRUE;
6281					cs_count++;
6282				}
6283				continue;
6284			}
6285			if (critical_sections[cur_cs].begin <= i
6286			 && begin_set[cs_count] == FALSE) {
6287				cs_table[cs_count].begin = downloaded;
6288				begin_set[cs_count] = TRUE;
6289			}
6290			break;
6291		}
6292		ahc_download_instr(ahc, i, download_consts);
6293		downloaded++;
6294	}
6295
6296	ahc->num_critical_sections = cs_count;
6297	if (cs_count != 0) {
6298
6299		cs_count *= sizeof(struct cs);
6300		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6301		if (ahc->critical_sections == NULL)
6302			panic("ahc_loadseq: Could not malloc");
6303		memcpy(ahc->critical_sections, cs_table, cs_count);
6304	}
6305	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6306	ahc_restart(ahc);
6307
6308	if (bootverbose)
6309		printf(" %d instructions downloaded\n", downloaded);
6310}
6311
6312static int
6313ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6314		u_int start_instr, u_int *skip_addr)
6315{
6316	struct	patch *cur_patch;
6317	struct	patch *last_patch;
6318	u_int	num_patches;
6319
6320	num_patches = sizeof(patches)/sizeof(struct patch);
6321	last_patch = &patches[num_patches];
6322	cur_patch = *start_patch;
6323
6324	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6325
6326		if (cur_patch->patch_func(ahc) == 0) {
6327
6328			/* Start rejecting code */
6329			*skip_addr = start_instr + cur_patch->skip_instr;
6330			cur_patch += cur_patch->skip_patch;
6331		} else {
6332			/* Accepted this patch.  Advance to the next
6333			 * one and wait for our intruction pointer to
6334			 * hit this point.
6335			 */
6336			cur_patch++;
6337		}
6338	}
6339
6340	*start_patch = cur_patch;
6341	if (start_instr < *skip_addr)
6342		/* Still skipping */
6343		return (0);
6344
6345	return (1);
6346}
6347
6348static void
6349ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6350{
6351	union	ins_formats instr;
6352	struct	ins_format1 *fmt1_ins;
6353	struct	ins_format3 *fmt3_ins;
6354	u_int	opcode;
6355
6356	/*
6357	 * The firmware is always compiled into a little endian format.
6358	 */
6359	instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6360
6361	fmt1_ins = &instr.format1;
6362	fmt3_ins = NULL;
6363
6364	/* Pull the opcode */
6365	opcode = instr.format1.opcode;
6366	switch (opcode) {
6367	case AIC_OP_JMP:
6368	case AIC_OP_JC:
6369	case AIC_OP_JNC:
6370	case AIC_OP_CALL:
6371	case AIC_OP_JNE:
6372	case AIC_OP_JNZ:
6373	case AIC_OP_JE:
6374	case AIC_OP_JZ:
6375	{
6376		struct patch *cur_patch;
6377		int address_offset;
6378		u_int address;
6379		u_int skip_addr;
6380		u_int i;
6381
6382		fmt3_ins = &instr.format3;
6383		address_offset = 0;
6384		address = fmt3_ins->address;
6385		cur_patch = patches;
6386		skip_addr = 0;
6387
6388		for (i = 0; i < address;) {
6389
6390			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6391
6392			if (skip_addr > i) {
6393				int end_addr;
6394
6395				end_addr = MIN(address, skip_addr);
6396				address_offset += end_addr - i;
6397				i = skip_addr;
6398			} else {
6399				i++;
6400			}
6401		}
6402		address -= address_offset;
6403		fmt3_ins->address = address;
6404		/* FALLTHROUGH */
6405	}
6406	case AIC_OP_OR:
6407	case AIC_OP_AND:
6408	case AIC_OP_XOR:
6409	case AIC_OP_ADD:
6410	case AIC_OP_ADC:
6411	case AIC_OP_BMOV:
6412		if (fmt1_ins->parity != 0) {
6413			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6414		}
6415		fmt1_ins->parity = 0;
6416		if ((ahc->features & AHC_CMD_CHAN) == 0
6417		 && opcode == AIC_OP_BMOV) {
6418			/*
6419			 * Block move was added at the same time
6420			 * as the command channel.  Verify that
6421			 * this is only a move of a single element
6422			 * and convert the BMOV to a MOV
6423			 * (AND with an immediate of FF).
6424			 */
6425			if (fmt1_ins->immediate != 1)
6426				panic("%s: BMOV not supported\n",
6427				      ahc_name(ahc));
6428			fmt1_ins->opcode = AIC_OP_AND;
6429			fmt1_ins->immediate = 0xff;
6430		}
6431		/* FALLTHROUGH */
6432	case AIC_OP_ROL:
6433		if ((ahc->features & AHC_ULTRA2) != 0) {
6434			int i, count;
6435
6436			/* Calculate odd parity for the instruction */
6437			for (i = 0, count = 0; i < 31; i++) {
6438				uint32_t mask;
6439
6440				mask = 0x01 << i;
6441				if ((instr.integer & mask) != 0)
6442					count++;
6443			}
6444			if ((count & 0x01) == 0)
6445				instr.format1.parity = 1;
6446		} else {
6447			/* Compress the instruction for older sequencers */
6448			if (fmt3_ins != NULL) {
6449				instr.integer =
6450					fmt3_ins->immediate
6451				      | (fmt3_ins->source << 8)
6452				      | (fmt3_ins->address << 16)
6453				      |	(fmt3_ins->opcode << 25);
6454			} else {
6455				instr.integer =
6456					fmt1_ins->immediate
6457				      | (fmt1_ins->source << 8)
6458				      | (fmt1_ins->destination << 16)
6459				      |	(fmt1_ins->ret << 24)
6460				      |	(fmt1_ins->opcode << 25);
6461			}
6462		}
6463		/* The sequencer is a little endian cpu */
6464		instr.integer = ahc_htole32(instr.integer);
6465		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6466		break;
6467	default:
6468		panic("Unknown opcode encountered in seq program");
6469		break;
6470	}
6471}
6472
6473int
6474ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
6475		   const char *name, u_int address, u_int value,
6476		   u_int *cur_column, u_int wrap_point)
6477{
6478	int	printed;
6479	u_int	printed_mask;
6480
6481	if (*cur_column >= wrap_point) {
6482		printf("\n");
6483		*cur_column = 0;
6484	}
6485	printed = printf("%s[0x%x]", name, value);
6486	if (table == NULL) {
6487		printed += printf(" ");
6488		*cur_column += printed;
6489		return (printed);
6490	}
6491	printed_mask = 0;
6492	while (printed_mask != 0xFF) {
6493		int entry;
6494
6495		for (entry = 0; entry < num_entries; entry++) {
6496			if (((value & table[entry].mask)
6497			  != table[entry].value)
6498			 || ((printed_mask & table[entry].mask)
6499			  == table[entry].mask))
6500				continue;
6501
6502			printed += printf("%s%s",
6503					  printed_mask == 0 ? ":(" : "|",
6504					  table[entry].name);
6505			printed_mask |= table[entry].mask;
6506
6507			break;
6508		}
6509		if (entry >= num_entries)
6510			break;
6511	}
6512	if (printed_mask != 0)
6513		printed += printf(") ");
6514	else
6515		printed += printf(" ");
6516	*cur_column += printed;
6517	return (printed);
6518}
6519
6520void
6521ahc_dump_card_state(struct ahc_softc *ahc)
6522{
6523	struct scb *scb;
6524	struct scb_tailq *untagged_q;
6525	int target;
6526	int maxtarget;
6527	int i;
6528	uint8_t last_phase;
6529	uint8_t qinpos;
6530	uint8_t qintail;
6531	uint8_t qoutpos;
6532	uint8_t scb_index;
6533	uint8_t saved_scbptr;
6534
6535	saved_scbptr = ahc_inb(ahc, SCBPTR);
6536
6537	last_phase = ahc_inb(ahc, LASTPHASE);
6538	printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6539	       ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6540	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6541	printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6542	       ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6543	       ahc_inb(ahc, ARG_2));
6544	printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
6545	       ahc_inb(ahc, SCBPTR));
6546	printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n",
6547	       ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL));
6548	printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n",
6549	       ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS));
6550	printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n",
6551	       last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0));
6552	printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n",
6553	       ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1));
6554	if ((ahc->features & AHC_DT) != 0)
6555		printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE));
6556	printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n",
6557		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6558		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6559		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8),
6560		ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8));
6561	printf("SCB count = %d\n", ahc->scb_data->numscbs);
6562	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6563	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6564	/* QINFIFO */
6565	printf("QINFIFO entries: ");
6566	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6567		qinpos = ahc_inb(ahc, SNSCB_QOFF);
6568		ahc_outb(ahc, SNSCB_QOFF, qinpos);
6569	} else
6570		qinpos = ahc_inb(ahc, QINPOS);
6571	qintail = ahc->qinfifonext;
6572	while (qinpos != qintail) {
6573		printf("%d ", ahc->qinfifo[qinpos]);
6574		qinpos++;
6575	}
6576	printf("\n");
6577
6578	printf("Waiting Queue entries: ");
6579	scb_index = ahc_inb(ahc, WAITING_SCBH);
6580	i = 0;
6581	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6582		ahc_outb(ahc, SCBPTR, scb_index);
6583		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6584		scb_index = ahc_inb(ahc, SCB_NEXT);
6585	}
6586	printf("\n");
6587
6588	printf("Disconnected Queue entries: ");
6589	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6590	i = 0;
6591	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6592		ahc_outb(ahc, SCBPTR, scb_index);
6593		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6594		scb_index = ahc_inb(ahc, SCB_NEXT);
6595	}
6596	printf("\n");
6597
6598	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6599	printf("QOUTFIFO entries: ");
6600	qoutpos = ahc->qoutfifonext;
6601	i = 0;
6602	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6603		printf("%d ", ahc->qoutfifo[qoutpos]);
6604		qoutpos++;
6605	}
6606	printf("\n");
6607
6608	printf("Sequencer Free SCB List: ");
6609	scb_index = ahc_inb(ahc, FREE_SCBH);
6610	i = 0;
6611	while (scb_index != SCB_LIST_NULL && i++ < 256) {
6612		ahc_outb(ahc, SCBPTR, scb_index);
6613		printf("%d ", scb_index);
6614		scb_index = ahc_inb(ahc, SCB_NEXT);
6615	}
6616	printf("\n");
6617
6618	printf("Sequencer SCB Info: ");
6619	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6620		ahc_outb(ahc, SCBPTR, i);
6621		printf("%d(c 0x%x, s 0x%x, l %d, t 0x%x) ",
6622		       i, ahc_inb(ahc, SCB_CONTROL),
6623		       ahc_inb(ahc, SCB_SCSIID),
6624		       ahc_inb(ahc, SCB_LUN),
6625		       ahc_inb(ahc, SCB_TAG));
6626	}
6627	printf("\n");
6628
6629	printf("Pending list: ");
6630	i = 0;
6631	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6632		if (i++ > 256)
6633			break;
6634		if (scb != LIST_FIRST(&ahc->pending_scbs))
6635			printf(", ");
6636		printf("%d(c 0x%x, s 0x%x, l %d)", scb->hscb->tag,
6637		       scb->hscb->control, scb->hscb->scsiid, scb->hscb->lun);
6638		if ((ahc->flags & AHC_PAGESCBS) == 0) {
6639			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6640			printf("(0x%x, 0x%x)", ahc_inb(ahc, SCB_CONTROL),
6641			       ahc_inb(ahc, SCB_TAG));
6642		}
6643	}
6644	printf("\n");
6645
6646	printf("Kernel Free SCB list: ");
6647	i = 0;
6648	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6649		if (i++ > 256)
6650			break;
6651		printf("%d ", scb->hscb->tag);
6652	}
6653	printf("\n");
6654
6655	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6656	for (target = 0; target <= maxtarget; target++) {
6657		untagged_q = &ahc->untagged_queues[target];
6658		if (TAILQ_FIRST(untagged_q) == NULL)
6659			continue;
6660		printf("Untagged Q(%d): ", target);
6661		i = 0;
6662		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6663			if (i++ > 256)
6664				break;
6665			printf("%d ", scb->hscb->tag);
6666		}
6667		printf("\n");
6668	}
6669
6670	ahc_platform_dump_card_state(ahc);
6671	ahc_outb(ahc, SCBPTR, saved_scbptr);
6672}
6673
6674/************************* Target Mode ****************************************/
6675#ifdef AHC_TARGET_MODE
6676cam_status
6677ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6678		    struct ahc_tmode_tstate **tstate,
6679		    struct ahc_tmode_lstate **lstate,
6680		    int notfound_failure)
6681{
6682
6683	if ((ahc->features & AHC_TARGETMODE) == 0)
6684		return (CAM_REQ_INVALID);
6685
6686	/*
6687	 * Handle the 'black hole' device that sucks up
6688	 * requests to unattached luns on enabled targets.
6689	 */
6690	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6691	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6692		*tstate = NULL;
6693		*lstate = ahc->black_hole;
6694	} else {
6695		u_int max_id;
6696
6697		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
6698		if (ccb->ccb_h.target_id > max_id)
6699			return (CAM_TID_INVALID);
6700
6701		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6702			return (CAM_LUN_INVALID);
6703
6704		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6705		*lstate = NULL;
6706		if (*tstate != NULL)
6707			*lstate =
6708			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6709	}
6710
6711	if (notfound_failure != 0 && *lstate == NULL)
6712		return (CAM_PATH_INVALID);
6713
6714	return (CAM_REQ_CMP);
6715}
6716
6717void
6718ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6719{
6720	struct	   ahc_tmode_tstate *tstate;
6721	struct	   ahc_tmode_lstate *lstate;
6722	struct	   ccb_en_lun *cel;
6723	cam_status status;
6724	u_int	   target;
6725	u_int	   lun;
6726	u_int	   target_mask;
6727	u_int	   our_id;
6728	u_long	   s;
6729	char	   channel;
6730
6731	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6732				     /*notfound_failure*/FALSE);
6733
6734	if (status != CAM_REQ_CMP) {
6735		ccb->ccb_h.status = status;
6736		return;
6737	}
6738
6739	if (cam_sim_bus(sim) == 0)
6740		our_id = ahc->our_id;
6741	else
6742		our_id = ahc->our_id_b;
6743
6744	if (ccb->ccb_h.target_id != our_id) {
6745		/*
6746		 * our_id represents our initiator ID, or
6747		 * the ID of the first target to have an
6748		 * enabled lun in target mode.  There are
6749		 * two cases that may preclude enabling a
6750		 * target id other than our_id.
6751		 *
6752		 *   o our_id is for an active initiator role.
6753		 *     Since the hardware does not support
6754		 *     reselections to the initiator role at
6755		 *     anything other than our_id, and our_id
6756		 *     is used by the hardware to indicate the
6757		 *     ID to use for both select-out and
6758		 *     reselect-out operations, the only target
6759		 *     ID we can support in this mode is our_id.
6760		 *
6761		 *   o The MULTARGID feature is not available and
6762		 *     a previous target mode ID has been enabled.
6763		 */
6764		if ((ahc->features & AHC_MULTIROLE) != 0) {
6765
6766			if ((ahc->features & AHC_MULTI_TID) != 0
6767		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6768				/*
6769				 * Only allow additional targets if
6770				 * the initiator role is disabled.
6771				 * The hardware cannot handle a re-select-in
6772				 * on the initiator id during a re-select-out
6773				 * on a different target id.
6774				 */
6775				status = CAM_TID_INVALID;
6776			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
6777				|| ahc->enabled_luns > 0) {
6778				/*
6779				 * Only allow our target id to change
6780				 * if the initiator role is not configured
6781				 * and there are no enabled luns which
6782				 * are attached to the currently registered
6783				 * scsi id.
6784				 */
6785				status = CAM_TID_INVALID;
6786			}
6787		} else if ((ahc->features & AHC_MULTI_TID) == 0
6788			&& ahc->enabled_luns > 0) {
6789
6790			status = CAM_TID_INVALID;
6791		}
6792	}
6793
6794	if (status != CAM_REQ_CMP) {
6795		ccb->ccb_h.status = status;
6796		return;
6797	}
6798
6799	/*
6800	 * We now have an id that is valid.
6801	 * If we aren't in target mode, switch modes.
6802	 */
6803	if ((ahc->flags & AHC_TARGETROLE) == 0
6804	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6805		u_long	s;
6806
6807		printf("Configuring Target Mode\n");
6808		ahc_lock(ahc, &s);
6809		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6810			ccb->ccb_h.status = CAM_BUSY;
6811			ahc_unlock(ahc, &s);
6812			return;
6813		}
6814		ahc->flags |= AHC_TARGETROLE;
6815		if ((ahc->features & AHC_MULTIROLE) == 0)
6816			ahc->flags &= ~AHC_INITIATORROLE;
6817		ahc_pause(ahc);
6818		ahc_loadseq(ahc);
6819		ahc_unlock(ahc, &s);
6820	}
6821	cel = &ccb->cel;
6822	target = ccb->ccb_h.target_id;
6823	lun = ccb->ccb_h.target_lun;
6824	channel = SIM_CHANNEL(ahc, sim);
6825	target_mask = 0x01 << target;
6826	if (channel == 'B')
6827		target_mask <<= 8;
6828
6829	if (cel->enable != 0) {
6830		u_int scsiseq;
6831
6832		/* Are we already enabled?? */
6833		if (lstate != NULL) {
6834			xpt_print_path(ccb->ccb_h.path);
6835			printf("Lun already enabled\n");
6836			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6837			return;
6838		}
6839
6840		if (cel->grp6_len != 0
6841		 || cel->grp7_len != 0) {
6842			/*
6843			 * Don't (yet?) support vendor
6844			 * specific commands.
6845			 */
6846			ccb->ccb_h.status = CAM_REQ_INVALID;
6847			printf("Non-zero Group Codes\n");
6848			return;
6849		}
6850
6851		/*
6852		 * Seems to be okay.
6853		 * Setup our data structures.
6854		 */
6855		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6856			tstate = ahc_alloc_tstate(ahc, target, channel);
6857			if (tstate == NULL) {
6858				xpt_print_path(ccb->ccb_h.path);
6859				printf("Couldn't allocate tstate\n");
6860				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6861				return;
6862			}
6863		}
6864		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
6865		if (lstate == NULL) {
6866			xpt_print_path(ccb->ccb_h.path);
6867			printf("Couldn't allocate lstate\n");
6868			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6869			return;
6870		}
6871		memset(lstate, 0, sizeof(*lstate));
6872		status = xpt_create_path(&lstate->path, /*periph*/NULL,
6873					 xpt_path_path_id(ccb->ccb_h.path),
6874					 xpt_path_target_id(ccb->ccb_h.path),
6875					 xpt_path_lun_id(ccb->ccb_h.path));
6876		if (status != CAM_REQ_CMP) {
6877			free(lstate, M_DEVBUF);
6878			xpt_print_path(ccb->ccb_h.path);
6879			printf("Couldn't allocate path\n");
6880			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6881			return;
6882		}
6883		SLIST_INIT(&lstate->accept_tios);
6884		SLIST_INIT(&lstate->immed_notifies);
6885		ahc_lock(ahc, &s);
6886		ahc_pause(ahc);
6887		if (target != CAM_TARGET_WILDCARD) {
6888			tstate->enabled_luns[lun] = lstate;
6889			ahc->enabled_luns++;
6890
6891			if ((ahc->features & AHC_MULTI_TID) != 0) {
6892				u_int targid_mask;
6893
6894				targid_mask = ahc_inb(ahc, TARGID)
6895					    | (ahc_inb(ahc, TARGID + 1) << 8);
6896
6897				targid_mask |= target_mask;
6898				ahc_outb(ahc, TARGID, targid_mask);
6899				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
6900
6901				ahc_update_scsiid(ahc, targid_mask);
6902			} else {
6903				u_int our_id;
6904				char  channel;
6905
6906				channel = SIM_CHANNEL(ahc, sim);
6907				our_id = SIM_SCSI_ID(ahc, sim);
6908
6909				/*
6910				 * This can only happen if selections
6911				 * are not enabled
6912				 */
6913				if (target != our_id) {
6914					u_int sblkctl;
6915					char  cur_channel;
6916					int   swap;
6917
6918					sblkctl = ahc_inb(ahc, SBLKCTL);
6919					cur_channel = (sblkctl & SELBUSB)
6920						    ? 'B' : 'A';
6921					if ((ahc->features & AHC_TWIN) == 0)
6922						cur_channel = 'A';
6923					swap = cur_channel != channel;
6924					if (channel == 'A')
6925						ahc->our_id = target;
6926					else
6927						ahc->our_id_b = target;
6928
6929					if (swap)
6930						ahc_outb(ahc, SBLKCTL,
6931							 sblkctl ^ SELBUSB);
6932
6933					ahc_outb(ahc, SCSIID, target);
6934
6935					if (swap)
6936						ahc_outb(ahc, SBLKCTL, sblkctl);
6937				}
6938			}
6939		} else
6940			ahc->black_hole = lstate;
6941		/* Allow select-in operations */
6942		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
6943			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
6944			scsiseq |= ENSELI;
6945			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
6946			scsiseq = ahc_inb(ahc, SCSISEQ);
6947			scsiseq |= ENSELI;
6948			ahc_outb(ahc, SCSISEQ, scsiseq);
6949		}
6950		ahc_unpause(ahc);
6951		ahc_unlock(ahc, &s);
6952		ccb->ccb_h.status = CAM_REQ_CMP;
6953		xpt_print_path(ccb->ccb_h.path);
6954		printf("Lun now enabled for target mode\n");
6955	} else {
6956		struct scb *scb;
6957		int i, empty;
6958
6959		if (lstate == NULL) {
6960			ccb->ccb_h.status = CAM_LUN_INVALID;
6961			return;
6962		}
6963
6964		ahc_lock(ahc, &s);
6965
6966		ccb->ccb_h.status = CAM_REQ_CMP;
6967		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6968			struct ccb_hdr *ccbh;
6969
6970			ccbh = &scb->io_ctx->ccb_h;
6971			if (ccbh->func_code == XPT_CONT_TARGET_IO
6972			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
6973				printf("CTIO pending\n");
6974				ccb->ccb_h.status = CAM_REQ_INVALID;
6975				ahc_unlock(ahc, &s);
6976				return;
6977			}
6978		}
6979
6980		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
6981			printf("ATIOs pending\n");
6982			ccb->ccb_h.status = CAM_REQ_INVALID;
6983		}
6984
6985		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
6986			printf("INOTs pending\n");
6987			ccb->ccb_h.status = CAM_REQ_INVALID;
6988		}
6989
6990		if (ccb->ccb_h.status != CAM_REQ_CMP) {
6991			ahc_unlock(ahc, &s);
6992			return;
6993		}
6994
6995		xpt_print_path(ccb->ccb_h.path);
6996		printf("Target mode disabled\n");
6997		xpt_free_path(lstate->path);
6998		free(lstate, M_DEVBUF);
6999
7000		ahc_pause(ahc);
7001		/* Can we clean up the target too? */
7002		if (target != CAM_TARGET_WILDCARD) {
7003			tstate->enabled_luns[lun] = NULL;
7004			ahc->enabled_luns--;
7005			for (empty = 1, i = 0; i < 8; i++)
7006				if (tstate->enabled_luns[i] != NULL) {
7007					empty = 0;
7008					break;
7009				}
7010
7011			if (empty) {
7012				ahc_free_tstate(ahc, target, channel,
7013						/*force*/FALSE);
7014				if (ahc->features & AHC_MULTI_TID) {
7015					u_int targid_mask;
7016
7017					targid_mask = ahc_inb(ahc, TARGID)
7018						    | (ahc_inb(ahc, TARGID + 1)
7019						       << 8);
7020
7021					targid_mask &= ~target_mask;
7022					ahc_outb(ahc, TARGID, targid_mask);
7023					ahc_outb(ahc, TARGID+1,
7024					 	 (targid_mask >> 8));
7025					ahc_update_scsiid(ahc, targid_mask);
7026				}
7027			}
7028		} else {
7029
7030			ahc->black_hole = NULL;
7031
7032			/*
7033			 * We can't allow selections without
7034			 * our black hole device.
7035			 */
7036			empty = TRUE;
7037		}
7038		if (ahc->enabled_luns == 0) {
7039			/* Disallow select-in */
7040			u_int scsiseq;
7041
7042			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7043			scsiseq &= ~ENSELI;
7044			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7045			scsiseq = ahc_inb(ahc, SCSISEQ);
7046			scsiseq &= ~ENSELI;
7047			ahc_outb(ahc, SCSISEQ, scsiseq);
7048
7049			if ((ahc->features & AHC_MULTIROLE) == 0) {
7050				printf("Configuring Initiator Mode\n");
7051				ahc->flags &= ~AHC_TARGETROLE;
7052				ahc->flags |= AHC_INITIATORROLE;
7053				ahc_pause(ahc);
7054				ahc_loadseq(ahc);
7055			}
7056		}
7057		ahc_unpause(ahc);
7058		ahc_unlock(ahc, &s);
7059	}
7060}
7061
7062static void
7063ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7064{
7065	u_int scsiid_mask;
7066	u_int scsiid;
7067
7068	if ((ahc->features & AHC_MULTI_TID) == 0)
7069		panic("ahc_update_scsiid called on non-multitid unit\n");
7070
7071	/*
7072	 * Since we will rely on the TARGID mask
7073	 * for selection enables, ensure that OID
7074	 * in SCSIID is not set to some other ID
7075	 * that we don't want to allow selections on.
7076	 */
7077	if ((ahc->features & AHC_ULTRA2) != 0)
7078		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
7079	else
7080		scsiid = ahc_inb(ahc, SCSIID);
7081	scsiid_mask = 0x1 << (scsiid & OID);
7082	if ((targid_mask & scsiid_mask) == 0) {
7083		u_int our_id;
7084
7085		/* ffs counts from 1 */
7086		our_id = ffs(targid_mask);
7087		if (our_id == 0)
7088			our_id = ahc->our_id;
7089		else
7090			our_id--;
7091		scsiid &= TID;
7092		scsiid |= our_id;
7093	}
7094	if ((ahc->features & AHC_ULTRA2) != 0)
7095		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
7096	else
7097		ahc_outb(ahc, SCSIID, scsiid);
7098}
7099
7100void
7101ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7102{
7103	struct target_cmd *cmd;
7104
7105	/*
7106	 * If the card supports auto-access pause,
7107	 * we can access the card directly regardless
7108	 * of whether it is paused or not.
7109	 */
7110	if ((ahc->features & AHC_AUTOPAUSE) != 0)
7111		paused = TRUE;
7112
7113	ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
7114	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
7115
7116		/*
7117		 * Only advance through the queue if we
7118		 * have the resources to process the command.
7119		 */
7120		if (ahc_handle_target_cmd(ahc, cmd) != 0)
7121			break;
7122
7123		cmd->cmd_valid = 0;
7124		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
7125				ahc->shared_data_dmamap,
7126				ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
7127				sizeof(struct target_cmd),
7128				BUS_DMASYNC_PREREAD);
7129		ahc->tqinfifonext++;
7130
7131		/*
7132		 * Lazily update our position in the target mode incoming
7133		 * command queue as seen by the sequencer.
7134		 */
7135		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
7136			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
7137				u_int hs_mailbox;
7138
7139				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
7140				hs_mailbox &= ~HOST_TQINPOS;
7141				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
7142				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
7143			} else {
7144				if (!paused)
7145					ahc_pause(ahc);
7146				ahc_outb(ahc, KERNEL_TQINPOS,
7147					 ahc->tqinfifonext & HOST_TQINPOS);
7148				if (!paused)
7149					ahc_unpause(ahc);
7150			}
7151		}
7152	}
7153}
7154
7155static int
7156ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7157{
7158	struct	  ahc_tmode_tstate *tstate;
7159	struct	  ahc_tmode_lstate *lstate;
7160	struct	  ccb_accept_tio *atio;
7161	uint8_t *byte;
7162	int	  initiator;
7163	int	  target;
7164	int	  lun;
7165
7166	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
7167	target = SCSIID_OUR_ID(cmd->scsiid);
7168	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
7169
7170	byte = cmd->bytes;
7171	tstate = ahc->enabled_targets[target];
7172	lstate = NULL;
7173	if (tstate != NULL)
7174		lstate = tstate->enabled_luns[lun];
7175
7176	/*
7177	 * Commands for disabled luns go to the black hole driver.
7178	 */
7179	if (lstate == NULL)
7180		lstate = ahc->black_hole;
7181
7182	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
7183	if (atio == NULL) {
7184		ahc->flags |= AHC_TQINFIFO_BLOCKED;
7185		/*
7186		 * Wait for more ATIOs from the peripheral driver for this lun.
7187		 */
7188		if (bootverbose)
7189			printf("%s: ATIOs exhausted\n", ahc_name(ahc));
7190		return (1);
7191	} else
7192		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7193#if 0
7194	printf("Incoming command from %d for %d:%d%s\n",
7195	       initiator, target, lun,
7196	       lstate == ahc->black_hole ? "(Black Holed)" : "");
7197#endif
7198	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7199
7200	if (lstate == ahc->black_hole) {
7201		/* Fill in the wildcards */
7202		atio->ccb_h.target_id = target;
7203		atio->ccb_h.target_lun = lun;
7204	}
7205
7206	/*
7207	 * Package it up and send it off to
7208	 * whomever has this lun enabled.
7209	 */
7210	atio->sense_len = 0;
7211	atio->init_id = initiator;
7212	if (byte[0] != 0xFF) {
7213		/* Tag was included */
7214		atio->tag_action = *byte++;
7215		atio->tag_id = *byte++;
7216		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
7217	} else {
7218		atio->ccb_h.flags = 0;
7219	}
7220	byte++;
7221
7222	/* Okay.  Now determine the cdb size based on the command code */
7223	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
7224	case 0:
7225		atio->cdb_len = 6;
7226		break;
7227	case 1:
7228	case 2:
7229		atio->cdb_len = 10;
7230		break;
7231	case 4:
7232		atio->cdb_len = 16;
7233		break;
7234	case 5:
7235		atio->cdb_len = 12;
7236		break;
7237	case 3:
7238	default:
7239		/* Only copy the opcode. */
7240		atio->cdb_len = 1;
7241		printf("Reserved or VU command code type encountered\n");
7242		break;
7243	}
7244
7245	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
7246
7247	atio->ccb_h.status |= CAM_CDB_RECVD;
7248
7249	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
7250		/*
7251		 * We weren't allowed to disconnect.
7252		 * We're hanging on the bus until a
7253		 * continue target I/O comes in response
7254		 * to this accept tio.
7255		 */
7256#if 0
7257		printf("Received Immediate Command %d:%d:%d - %p\n",
7258		       initiator, target, lun, ahc->pending_device);
7259#endif
7260		ahc->pending_device = lstate;
7261		ahc_freeze_ccb((union ccb *)atio);
7262		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
7263	}
7264	xpt_done((union ccb*)atio);
7265	return (0);
7266}
7267
7268#endif
7269